diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..07c1c151ce --- /dev/null +++ b/.dockerignore @@ -0,0 +1,146 @@ +# Created by .ignore support plugin (hsz.mobi) +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +.poetry/ +.github/ +vendor/bin/ +docs/ +website/ diff --git a/.gitignore b/.gitignore index ba8805e013..26bf7cf65f 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,6 @@ coverage.xml .hypothesis/ .pytest_cache/ - # Node JS packages ################## node_modules @@ -92,4 +91,9 @@ website/i18n/* website/debug.log -website/.docusaurus \ No newline at end of file +website/.docusaurus + +# Poetry +######## + +.poetry/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 364555f8b2..b70f3f98f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,61 @@ # Changelog + +## [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) (2021-04-30) + +[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.0...2.17.1) + +**Enhancements:** + +- TVPaint frame range definition [\#1424](https://github.com/pypeclub/OpenPype/pull/1424) +- PS - group all published instances [\#1415](https://github.com/pypeclub/OpenPype/pull/1415) +- Nuke: deadline submission with gpu [\#1414](https://github.com/pypeclub/OpenPype/pull/1414) +- Add task name to context pop up. [\#1383](https://github.com/pypeclub/OpenPype/pull/1383) +- AE add duration validation [\#1363](https://github.com/pypeclub/OpenPype/pull/1363) +- Maya: Support for Redshift proxies [\#1360](https://github.com/pypeclub/OpenPype/pull/1360) + +**Fixed bugs:** + +- Nuke: fixing undo for loaded mov and sequence [\#1433](https://github.com/pypeclub/OpenPype/pull/1433) +- AE - validation for duration was 1 frame shorter [\#1426](https://github.com/pypeclub/OpenPype/pull/1426) +- Houdini menu filename [\#1417](https://github.com/pypeclub/OpenPype/pull/1417) +- Maya: Vray - problem getting all file nodes for look publishing [\#1399](https://github.com/pypeclub/OpenPype/pull/1399) + + +## [2.17.0](https://github.com/pypeclub/openpype/tree/2.17.0) (2021-04-20) + +[Full Changelog](https://github.com/pypeclub/openpype/compare/3.0.0-beta2...2.17.0) + +**Enhancements:** + +- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/OpenPype/pull/1243) +- Maya: Make tx option configurable with presets [\#1328](https://github.com/pypeclub/OpenPype/pull/1328) +- TVPaint asset name validation [\#1302](https://github.com/pypeclub/OpenPype/pull/1302) +- TV Paint: Set initial project settings. [\#1299](https://github.com/pypeclub/OpenPype/pull/1299) +- TV Paint: Validate mark in and out. [\#1298](https://github.com/pypeclub/OpenPype/pull/1298) +- Validate project settings [\#1297](https://github.com/pypeclub/OpenPype/pull/1297) +- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/OpenPype/pull/1234) +- Show error message in pyblish UI [\#1206](https://github.com/pypeclub/OpenPype/pull/1206) + +**Fixed bugs:** + +- Hiero: fixing source frame from correct object [\#1362](https://github.com/pypeclub/OpenPype/pull/1362) +- Nuke: fix colourspace, prerenders and nuke panes opening [\#1308](https://github.com/pypeclub/OpenPype/pull/1308) +- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/OpenPype/pull/1282) +- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/OpenPype/pull/1194) +- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/OpenPype/pull/1312) +- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/OpenPype/pull/1303) +- After Effects: remove orphaned instances [\#1275](https://github.com/pypeclub/OpenPype/pull/1275) +- Avalon schema names [\#1242](https://github.com/pypeclub/OpenPype/pull/1242) +- Handle duplication of Task name [\#1226](https://github.com/pypeclub/OpenPype/pull/1226) +- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/OpenPype/pull/1217) +- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/OpenPype/pull/1214) +- Bulk mov strict task [\#1204](https://github.com/pypeclub/OpenPype/pull/1204) +- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/OpenPype/pull/1202) +- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/OpenPype/pull/1199) +- Nuke: reverse search to make it more versatile [\#1178](https://github.com/pypeclub/OpenPype/pull/1178) + + ## [2.16.1](https://github.com/pypeclub/pype/tree/2.16.1) (2021-04-13) [Full Changelog](https://github.com/pypeclub/pype/compare/2.16.0...2.16.1) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..2d8ed27b15 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,82 @@ +# Build Pype docker image +FROM centos:7 AS builder +ARG OPENPYPE_PYTHON_VERSION=3.7.10 + +LABEL org.opencontainers.image.name="pypeclub/openpype" +LABEL org.opencontainers.image.title="OpenPype Docker Image" +LABEL org.opencontainers.image.url="https://openpype.io/" +LABEL org.opencontainers.image.source="https://github.com/pypeclub/pype" + +USER root + +# update base +RUN yum -y install deltarpm \ + && yum -y update \ + && yum clean all + +# add tools we need +RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ + && yum -y install centos-release-scl \ + && yum -y install \ + bash \ + which \ + git \ + devtoolset-7-gcc* \ + make \ + cmake \ + curl \ + wget \ + gcc \ + zlib-devel \ + bzip2 \ + bzip2-devel \ + readline-devel \ + sqlite sqlite-devel \ + openssl-devel \ + tk-devel libffi-devel \ + qt5-qtbase-devel \ + patchelf \ + && yum clean all + +RUN mkdir /opt/openpype +# RUN useradd -m pype +# RUN chown pype /opt/openpype +# USER pype + +RUN curl https://pyenv.run | bash +ENV PYTHON_CONFIGURE_OPTS --enable-shared + +RUN echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/.bashrc \ + && echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc \ + && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc \ + && echo 'eval "$(pyenv init --path)"' >> $HOME/.bashrc +RUN source $HOME/.bashrc && pyenv install ${OPENPYPE_PYTHON_VERSION} + +COPY . /opt/openpype/ +RUN rm -rf /openpype/.poetry || echo "No Poetry installed yet." +# USER root +# RUN chown -R pype /opt/openpype +RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh + +# USER pype + +WORKDIR /opt/openpype + +RUN cd /opt/openpype \ + && source $HOME/.bashrc \ + && pyenv local ${OPENPYPE_PYTHON_VERSION} + +RUN source $HOME/.bashrc \ + && ./tools/create_env.sh + +RUN source $HOME/.bashrc \ + && ./tools/fetch_thirdparty_libs.sh + +RUN source $HOME/.bashrc \ + && bash ./tools/build.sh \ + && cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \ + && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \ + && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib + +RUN cd /opt/openpype \ + rm -rf ./vendor/bin diff --git a/igniter/Poppins/OFL.txt b/igniter/Poppins/OFL.txt new file mode 100644 index 0000000000..76df3b5656 --- /dev/null +++ b/igniter/Poppins/OFL.txt @@ -0,0 +1,93 @@ +Copyright 2020 The Poppins Project Authors (https://github.com/itfoundry/Poppins) + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/igniter/Poppins/Poppins-Black.ttf b/igniter/Poppins/Poppins-Black.ttf new file mode 100644 index 0000000000..a9520b78ac Binary files /dev/null and b/igniter/Poppins/Poppins-Black.ttf differ diff --git a/igniter/Poppins/Poppins-BlackItalic.ttf b/igniter/Poppins/Poppins-BlackItalic.ttf new file mode 100644 index 0000000000..ebfdd707e5 Binary files /dev/null and b/igniter/Poppins/Poppins-BlackItalic.ttf differ diff --git a/igniter/Poppins/Poppins-Bold.ttf b/igniter/Poppins/Poppins-Bold.ttf new file mode 100644 index 0000000000..b94d47f3af Binary files /dev/null and b/igniter/Poppins/Poppins-Bold.ttf differ diff --git a/igniter/Poppins/Poppins-BoldItalic.ttf b/igniter/Poppins/Poppins-BoldItalic.ttf new file mode 100644 index 0000000000..e2e64456c7 Binary files /dev/null and b/igniter/Poppins/Poppins-BoldItalic.ttf differ diff --git a/igniter/Poppins/Poppins-ExtraBold.ttf b/igniter/Poppins/Poppins-ExtraBold.ttf new file mode 100644 index 0000000000..8f008c3684 Binary files /dev/null and b/igniter/Poppins/Poppins-ExtraBold.ttf differ diff --git a/igniter/Poppins/Poppins-ExtraBoldItalic.ttf b/igniter/Poppins/Poppins-ExtraBoldItalic.ttf new file mode 100644 index 0000000000..b2a9bf557a Binary files /dev/null and b/igniter/Poppins/Poppins-ExtraBoldItalic.ttf differ diff --git a/igniter/Poppins/Poppins-ExtraLight.ttf b/igniter/Poppins/Poppins-ExtraLight.ttf new file mode 100644 index 0000000000..ee6238251f Binary files /dev/null and b/igniter/Poppins/Poppins-ExtraLight.ttf differ diff --git a/igniter/Poppins/Poppins-ExtraLightItalic.ttf b/igniter/Poppins/Poppins-ExtraLightItalic.ttf new file mode 100644 index 0000000000..e392492abd Binary files /dev/null and b/igniter/Poppins/Poppins-ExtraLightItalic.ttf differ diff --git a/igniter/Poppins/Poppins-Italic.ttf b/igniter/Poppins/Poppins-Italic.ttf new file mode 100644 index 0000000000..46203996d3 Binary files /dev/null and b/igniter/Poppins/Poppins-Italic.ttf differ diff --git a/igniter/Poppins/Poppins-Light.ttf b/igniter/Poppins/Poppins-Light.ttf new file mode 100644 index 0000000000..2ab022196b Binary files /dev/null and b/igniter/Poppins/Poppins-Light.ttf differ diff --git a/igniter/Poppins/Poppins-LightItalic.ttf b/igniter/Poppins/Poppins-LightItalic.ttf new file mode 100644 index 0000000000..6f9279daef Binary files /dev/null and b/igniter/Poppins/Poppins-LightItalic.ttf differ diff --git a/igniter/Poppins/Poppins-Medium.ttf b/igniter/Poppins/Poppins-Medium.ttf new file mode 100644 index 0000000000..e90e87ed69 Binary files /dev/null and b/igniter/Poppins/Poppins-Medium.ttf differ diff --git a/igniter/Poppins/Poppins-MediumItalic.ttf b/igniter/Poppins/Poppins-MediumItalic.ttf new file mode 100644 index 0000000000..d8a251c7c4 Binary files /dev/null and b/igniter/Poppins/Poppins-MediumItalic.ttf differ diff --git a/igniter/Poppins/Poppins-Regular.ttf b/igniter/Poppins/Poppins-Regular.ttf new file mode 100644 index 0000000000..be06e7fdca Binary files /dev/null and b/igniter/Poppins/Poppins-Regular.ttf differ diff --git a/igniter/Poppins/Poppins-SemiBold.ttf b/igniter/Poppins/Poppins-SemiBold.ttf new file mode 100644 index 0000000000..dabf7c242e Binary files /dev/null and b/igniter/Poppins/Poppins-SemiBold.ttf differ diff --git a/igniter/Poppins/Poppins-SemiBoldItalic.ttf b/igniter/Poppins/Poppins-SemiBoldItalic.ttf new file mode 100644 index 0000000000..29d5f7419b Binary files /dev/null and b/igniter/Poppins/Poppins-SemiBoldItalic.ttf differ diff --git a/igniter/Poppins/Poppins-Thin.ttf b/igniter/Poppins/Poppins-Thin.ttf new file mode 100644 index 0000000000..f5c0fdd531 Binary files /dev/null and b/igniter/Poppins/Poppins-Thin.ttf differ diff --git a/igniter/Poppins/Poppins-ThinItalic.ttf b/igniter/Poppins/Poppins-ThinItalic.ttf new file mode 100644 index 0000000000..b910089316 Binary files /dev/null and b/igniter/Poppins/Poppins-ThinItalic.ttf differ diff --git a/igniter/__init__.py b/igniter/__init__.py index c2442ad57f..20bf9be106 100644 --- a/igniter/__init__.py +++ b/igniter/__init__.py @@ -10,29 +10,22 @@ from .bootstrap_repos import BootstrapRepos from .version import __version__ as version -RESULT = 0 - - -def get_result(res: int): - """Sets result returned from dialog.""" - global RESULT - RESULT = res - - def open_dialog(): """Show Igniter dialog.""" - from Qt import QtWidgets + from Qt import QtWidgets, QtCore from .install_dialog import InstallDialog + scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) + if scale_attr is not None: + QtWidgets.QApplication.setAttribute(scale_attr) + app = QtWidgets.QApplication(sys.argv) d = InstallDialog() - d.finished.connect(get_result) d.open() - app.exec() - - return RESULT + app.exec_() + return d.result() __all__ = [ diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index f624b96125..b44689ba89 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Bootstrap OpenPype repositories.""" -import functools +from __future__ import annotations import logging as log import os import re @@ -9,10 +9,12 @@ import sys import tempfile from pathlib import Path from typing import Union, Callable, List, Tuple + from zipfile import ZipFile, BadZipFile from appdirs import user_data_dir from speedcopy import copyfile +import semver from .user_settings import ( OpenPypeSecureRegistry, @@ -26,159 +28,138 @@ LOG_WARNING = 1 LOG_ERROR = 3 -@functools.total_ordering -class OpenPypeVersion: +class OpenPypeVersion(semver.VersionInfo): """Class for storing information about OpenPype version. Attributes: - major (int): [1].2.3-client-variant - minor (int): 1.[2].3-client-variant - subversion (int): 1.2.[3]-client-variant - client (str): 1.2.3-[client]-variant - variant (str): 1.2.3-client-[variant] + staging (bool): True if it is staging version path (str): path to OpenPype """ - major = 0 - minor = 0 - subversion = 0 - variant = "" - client = None + staging = False path = None + _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$") # noqa: E501 - _version_regex = re.compile( - r"(?P\d+)\.(?P\d+)\.(?P\d+)(-(?Pstaging)|-(?P.+)(-(?Pstaging)))?") # noqa: E501 + def __init__(self, *args, **kwargs): + """Create OpenPype version. - @property - def version(self): - """return formatted version string.""" - return self._compose_version() + .. deprecated:: 3.0.0-rc.2 + `client` and `variant` are removed. - @version.setter - def version(self, val): - decomposed = self._decompose_version(val) - self.major = decomposed[0] - self.minor = decomposed[1] - self.subversion = decomposed[2] - self.variant = decomposed[3] - self.client = decomposed[4] - def __init__(self, major: int = None, minor: int = None, - subversion: int = None, version: str = None, - variant: str = "", client: str = None, - path: Path = None): - self.path = path + Args: + major (int): version when you make incompatible API changes. + minor (int): version when you add functionality in a + backwards-compatible manner. + patch (int): version when you make backwards-compatible bug fixes. + prerelease (str): an optional prerelease string + build (str): an optional build string + version (str): if set, it will be parsed and will override + parameters like `major`, `minor` and so on. + staging (bool): set to True if version is staging. + path (Path): path to version location. - if ( - major is None or minor is None or subversion is None - ) and version is None: - raise ValueError("Need version specified in some way.") - if version: - values = self._decompose_version(version) - self.major = values[0] - self.minor = values[1] - self.subversion = values[2] - self.variant = values[3] - self.client = values[4] - else: - self.major = major - self.minor = minor - self.subversion = subversion - # variant is set only if it is "staging", otherwise "production" is - # implied and no need to mention it in version string. - if variant == "staging": - self.variant = variant - self.client = client + """ + self.path = None + self.staging = False - def _compose_version(self): - version = "{}.{}.{}".format(self.major, self.minor, self.subversion) + if "version" in kwargs.keys(): + if not kwargs.get("version"): + raise ValueError("Invalid version specified") + v = OpenPypeVersion.parse(kwargs.get("version")) + kwargs["major"] = v.major + kwargs["minor"] = v.minor + kwargs["patch"] = v.patch + kwargs["prerelease"] = v.prerelease + kwargs["build"] = v.build + kwargs.pop("version") - if self.client: - version = "{}-{}".format(version, self.client) + if kwargs.get("path"): + if isinstance(kwargs.get("path"), str): + self.path = Path(kwargs.get("path")) + elif isinstance(kwargs.get("path"), Path): + self.path = kwargs.get("path") + else: + raise TypeError("Path must be str or Path") + kwargs.pop("path") - if self.variant == "staging": - version = "{}-{}".format(version, self.variant) + if "path" in kwargs.keys(): + kwargs.pop("path") - return version + if kwargs.get("staging"): + self.staging = kwargs.get("staging", False) + kwargs.pop("staging") - @classmethod - def _decompose_version(cls, version_string: str) -> tuple: - m = re.search(cls._version_regex, version_string) - if not m: - raise ValueError( - "Cannot parse version string: {}".format(version_string)) + if "staging" in kwargs.keys(): + kwargs.pop("staging") - variant = None - if m.group("var1") == "staging" or m.group("var2") == "staging": - variant = "staging" + if self.staging: + if kwargs.get("build"): + if "staging" not in kwargs.get("build"): + kwargs["build"] = "{}-staging".format(kwargs.get("build")) + else: + kwargs["build"] = "staging" - client = m.group("client") + if kwargs.get("build") and "staging" in kwargs.get("build", ""): + self.staging = True - return (int(m.group("major")), int(m.group("minor")), - int(m.group("sub")), variant, client) + super().__init__(*args, **kwargs) def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return self.version == other.version - - def __str__(self): - return self.version + result = super().__eq__(other) + return bool(result and self.staging == other.staging) def __repr__(self): - return "{}, {}: {}".format( - self.__class__.__name__, self.version, self.path) - - def __hash__(self): - return hash(self.version) - - def __lt__(self, other): - if (self.major, self.minor, self.subversion) < \ - (other.major, other.minor, other.subversion): - return True - - # 1.2.3-staging < 1.2.3-client-staging - if self.get_main_version() == other.get_main_version() and \ - not self.client and self.variant and \ - other.client and other.variant: - return True - - # 1.2.3 < 1.2.3-staging - if self.get_main_version() == other.get_main_version() and \ - not self.client and self.variant and \ - not other.client and not other.variant: - return True - - # 1.2.3 < 1.2.3-client - if self.get_main_version() == other.get_main_version() and \ - not self.client and not self.variant and \ - other.client and not other.variant: - return True - - # 1.2.3 < 1.2.3-client-staging - if self.get_main_version() == other.get_main_version() and \ - not self.client and not self.variant and other.client: - return True - - # 1.2.3-client-staging < 1.2.3-client - if self.get_main_version() == other.get_main_version() and \ - self.client and self.variant and \ - other.client and not other.variant: - return True + return "<{}: {} - path={}>".format( + self.__class__.__name__, str(self), self.path) + def __lt__(self, other: OpenPypeVersion): + result = super().__lt__(other) # prefer path over no path - if self.version == other.version and \ - not self.path and other.path: + if self == other and not self.path and other.path: return True - # prefer path with dir over path with file - return self.version == other.version and self.path and \ - other.path and self.path.is_file() and \ - other.path.is_dir() + if self == other and self.path and other.path and \ + other.path.is_dir() and self.path.is_file(): + return True + + if self.finalize_version() == other.finalize_version() and \ + self.prerelease == other.prerelease and \ + self.is_staging() and not other.is_staging(): + return True + + return result + + def set_staging(self) -> OpenPypeVersion: + """Set version as staging and return it. + + This will preserve current one. + + Returns: + OpenPypeVersion: Set as staging. + + """ + if self.staging: + return self + return self.replace(parts={"build": f"{self.build}-staging"}) + + def set_production(self) -> OpenPypeVersion: + """Set version as production and return it. + + This will preserve current one. + + Returns: + OpenPypeVersion: Set as production. + + """ + if not self.staging: + return self + return self.replace( + parts={"build": self.build.replace("-staging", "")}) def is_staging(self) -> bool: """Test if current version is staging one.""" - return self.variant == "staging" + return self.staging def get_main_version(self) -> str: """Return main version component. @@ -186,11 +167,13 @@ class OpenPypeVersion: This returns x.x.x part of version from possibly more complex one like x.x.x-foo-bar. + .. deprecated:: 3.0.0-rc.2 + use `finalize_version()` instead. Returns: str: main version component """ - return "{}.{}.{}".format(self.major, self.minor, self.subversion) + return str(self.finalize_version()) @staticmethod def version_in_str(string: str) -> Tuple: @@ -203,15 +186,22 @@ class OpenPypeVersion: tuple: True/False and OpenPypeVersion if found. """ - try: - result = OpenPypeVersion._decompose_version(string) - except ValueError: + m = re.search(OpenPypeVersion._VERSION_REGEX, string) + if not m: return False, None - return True, OpenPypeVersion(major=result[0], - minor=result[1], - subversion=result[2], - variant=result[3], - client=result[4]) + version = OpenPypeVersion.parse(string[m.start():m.end()]) + return True, version + + @classmethod + def parse(cls, version): + """Extends parse to handle ta handle staging variant.""" + v = super().parse(version) + openpype_version = cls(major=v.major, minor=v.minor, + patch=v.patch, prerelease=v.prerelease, + build=v.build) + if v.build and "staging" in v.build: + openpype_version.staging = True + return openpype_version class BootstrapRepos: @@ -223,7 +213,7 @@ class BootstrapRepos: otherwise `None`. registry (OpenPypeSettingsRegistry): OpenPype registry object. zip_filter (list): List of files to exclude from zip - openpype_filter (list): list of top level directories not to + openpype_filter (list): list of top level directories to include in zip in OpenPype repository. """ @@ -246,7 +236,7 @@ class BootstrapRepos: self.registry = OpenPypeSettingsRegistry() self.zip_filter = [".pyc", "__pycache__"] self.openpype_filter = [ - "build", "docs", "tests", "tools", "venv", "coverage" + "openpype", "repos", "schema", "LICENSE" ] self._message = message @@ -269,7 +259,7 @@ class BootstrapRepos: """Get path for specific version in list of OpenPype versions. Args: - version (str): Version string to look for (1.2.4-staging) + version (str): Version string to look for (1.2.4+staging) version_list (list of OpenPypeVersion): list of version to search. Returns: @@ -285,7 +275,7 @@ class BootstrapRepos: """Get version of local OpenPype.""" version = {} - path = Path(os.path.dirname(__file__)).parent / "openpype" / "version.py" + path = Path(os.environ["OPENPYPE_ROOT"]) / "openpype" / "version.py" with open(path, "r") as fp: exec(fp.read(), version) return version["__version__"] @@ -423,18 +413,13 @@ class BootstrapRepos: """ frozen_root = Path(sys.executable).parent - # from frozen code we need igniter, openpype, schema vendor - openpype_list = self._filter_dir( - frozen_root / "openpype", self.zip_filter) - openpype_list += self._filter_dir( - frozen_root / "igniter", self.zip_filter) - openpype_list += self._filter_dir( - frozen_root / "repos", self.zip_filter) - openpype_list += self._filter_dir( - frozen_root / "schema", self.zip_filter) - openpype_list += self._filter_dir( - frozen_root / "vendor", self.zip_filter) - openpype_list.append(frozen_root / "LICENSE") + openpype_list = [] + for f in self.openpype_filter: + if (frozen_root / f).is_dir(): + openpype_list += self._filter_dir( + frozen_root / f, self.zip_filter) + else: + openpype_list.append(frozen_root / f) version = self.get_version(frozen_root) @@ -477,11 +462,16 @@ class BootstrapRepos: openpype_path (Path): Path to OpenPype sources. """ - openpype_list = [] - openpype_inc = 0 - # get filtered list of file in Pype repository - openpype_list = self._filter_dir(openpype_path, self.zip_filter) + # openpype_list = self._filter_dir(openpype_path, self.zip_filter) + openpype_list = [] + for f in self.openpype_filter: + if (openpype_path / f).is_dir(): + openpype_list += self._filter_dir( + openpype_path / f, self.zip_filter) + else: + openpype_list.append(openpype_path / f) + openpype_files = len(openpype_list) openpype_inc = 98.0 / float(openpype_files) @@ -506,7 +496,7 @@ class BootstrapRepos: except ValueError: pass - if is_inside: + if not is_inside: continue processed_path = file @@ -575,7 +565,7 @@ class BootstrapRepos: """ sys.path.insert(0, directory.as_posix()) - directory = directory / "repos" + directory /= "repos" if not directory.exists() and not directory.is_dir(): raise ValueError("directory is invalid") @@ -632,7 +622,7 @@ class BootstrapRepos: " not implemented yet.")) dir_to_search = self.data_dir - + user_versions = self.get_openpype_versions(self.data_dir, staging) # if we have openpype_path specified, search only there. if openpype_path: dir_to_search = openpype_path @@ -652,6 +642,7 @@ class BootstrapRepos: pass openpype_versions = self.get_openpype_versions(dir_to_search, staging) + openpype_versions += user_versions # remove zip file version if needed. if not include_zips: @@ -681,7 +672,7 @@ class BootstrapRepos: openpype_path = None # try to get OpenPype path from mongo. if location.startswith("mongodb"): - pype_path = get_openpype_path_from_db(location) + openpype_path = get_openpype_path_from_db(location) if not openpype_path: self._print("cannot find OPENPYPE_PATH in settings.") return None @@ -764,12 +755,13 @@ class BootstrapRepos: destination = self.data_dir / version.path.stem if destination.exists(): + assert destination.is_dir() try: - destination.unlink() - except OSError: + shutil.rmtree(destination) + except OSError as e: msg = f"!!! Cannot remove already existing {destination}" self._print(msg, LOG_ERROR, exc_info=True) - return None + raise e destination.mkdir(parents=True) @@ -808,7 +800,7 @@ class BootstrapRepos: """Install OpenPype version to user data directory. Args: - oepnpype_version (OpenPypeVersion): OpenPype version to install. + openpype_version (OpenPypeVersion): OpenPype version to install. force (bool, optional): Force overwrite existing version. Returns: @@ -821,7 +813,6 @@ class BootstrapRepos: OpenPypeVersionIOError: If copying or zipping fail. """ - if self.is_inside_user_data(openpype_version.path) and not openpype_version.path.is_file(): # noqa raise OpenPypeVersionExists( "OpenPype already inside user data dir") @@ -868,26 +859,20 @@ class BootstrapRepos: # set zip as version source openpype_version.path = temp_zip + if self.is_inside_user_data(openpype_version.path): + raise OpenPypeVersionInvalid( + "Version is in user data dir.") + openpype_version.path = self._copy_zip( + openpype_version.path, destination) + elif openpype_version.path.is_file(): # check if file is zip (by extension) if openpype_version.path.suffix.lower() != ".zip": raise OpenPypeVersionInvalid("Invalid file format") - if not self.is_inside_user_data(openpype_version.path): - try: - # copy file to destination - self._print("Copying zip to destination ...") - _destination_zip = destination.parent / openpype_version.path.name # noqa: E501 - copyfile( - openpype_version.path.as_posix(), - _destination_zip.as_posix()) - except OSError as e: - self._print( - "cannot copy version to user data directory", LOG_ERROR, - exc_info=True) - raise OpenPypeVersionIOError(( - f"can't copy version {openpype_version.path.as_posix()} " - f"to destination {destination.parent.as_posix()}")) from e + if not self.is_inside_user_data(openpype_version.path): + openpype_version.path = self._copy_zip( + openpype_version.path, destination) # extract zip there self._print("extracting zip to destination ...") @@ -896,6 +881,23 @@ class BootstrapRepos: return destination + def _copy_zip(self, source: Path, destination: Path) -> Path: + try: + # copy file to destination + self._print("Copying zip to destination ...") + _destination_zip = destination.parent / source.name # noqa: E501 + copyfile( + source.as_posix(), + _destination_zip.as_posix()) + except OSError as e: + self._print( + "cannot copy version to user data directory", LOG_ERROR, + exc_info=True) + raise OpenPypeVersionIOError(( + f"can't copy version {source.as_posix()} " + f"to destination {destination.parent.as_posix()}")) from e + return _destination_zip + def _is_openpype_in_dir(self, dir_item: Path, detected_version: OpenPypeVersion) -> bool: diff --git a/igniter/install_dialog.py b/igniter/install_dialog.py index 27b2d1fe37..1ec8cc6768 100644 --- a/igniter/install_dialog.py +++ b/igniter/install_dialog.py @@ -2,14 +2,15 @@ """Show dialog for choosing central pype repository.""" import os import sys +import re +import collections from Qt import QtCore, QtGui, QtWidgets # noqa from Qt.QtGui import QValidator # noqa from Qt.QtCore import QTimer # noqa -from .install_thread import InstallThread, InstallResult +from .install_thread import InstallThread from .tools import ( - validate_path_string, validate_mongo_connection, get_openpype_path_from_db ) @@ -17,504 +18,483 @@ from .user_settings import OpenPypeSecureRegistry from .version import __version__ -class FocusHandlingLineEdit(QtWidgets.QLineEdit): - """Handling focus in/out on QLineEdit.""" - focusIn = QtCore.Signal() - focusOut = QtCore.Signal() +def load_stylesheet(): + stylesheet_path = os.path.join( + os.path.dirname(__file__), + "stylesheet.css" + ) + with open(stylesheet_path, "r") as file_stream: + stylesheet = file_stream.read() - def focusOutEvent(self, event): # noqa - """For emitting signal on focus out.""" - self.focusOut.emit() - super().focusOutEvent(event) + return stylesheet - def focusInEvent(self, event): # noqa - """For emitting signal on focus in.""" - self.focusIn.emit() - super().focusInEvent(event) + +class ButtonWithOptions(QtWidgets.QFrame): + option_clicked = QtCore.Signal(str) + + def __init__(self, commands, parent=None): + super(ButtonWithOptions, self).__init__(parent) + + self.setObjectName("ButtonWithOptions") + + options_btn = QtWidgets.QToolButton(self) + options_btn.setArrowType(QtCore.Qt.DownArrow) + options_btn.setIconSize(QtCore.QSize(12, 12)) + + default = None + default_label = None + options_menu = QtWidgets.QMenu(self) + for option, option_label in commands.items(): + if default is None: + default = option + default_label = option_label + continue + action = QtWidgets.QAction(option_label, options_menu) + action.setData(option) + options_menu.addAction(action) + + main_btn = QtWidgets.QPushButton(default_label, self) + main_btn.setFlat(True) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(1) + + main_layout.addWidget(main_btn, 1, QtCore.Qt.AlignVCenter) + main_layout.addWidget(options_btn, 0, QtCore.Qt.AlignVCenter) + + main_btn.clicked.connect(self._on_main_button) + options_btn.clicked.connect(self._on_options_click) + options_menu.triggered.connect(self._on_trigger) + + self.main_btn = main_btn + self.options_btn = options_btn + self.options_menu = options_menu + + options_btn.setEnabled(not options_menu.isEmpty()) + + self._default_value = default + + def resizeEvent(self, event): + super(ButtonWithOptions, self).resizeEvent(event) + self.options_btn.setFixedHeight(self.main_btn.height()) + + def _on_options_click(self): + pos = self.main_btn.rect().bottomLeft() + point = self.main_btn.mapToGlobal(pos) + self.options_menu.popup(point) + + def _on_trigger(self, action): + self.option_clicked.emit(action.data()) + + def _on_main_button(self): + self.option_clicked.emit(self._default_value) + + +class NiceProgressBar(QtWidgets.QProgressBar): + def __init__(self, parent=None): + super(NiceProgressBar, self).__init__(parent) + self._real_value = 0 + + def setValue(self, value): + self._real_value = value + if value != 0 and value < 11: + value = 11 + + super(NiceProgressBar, self).setValue(value) + + def value(self): + return self._real_value + + def text(self): + return "{} %".format(self._real_value) + + +class ConsoleWidget(QtWidgets.QWidget): + def __init__(self, parent=None): + super(ConsoleWidget, self).__init__(parent) + + # style for normal and error console text + default_console_style = QtGui.QTextCharFormat() + error_console_style = QtGui.QTextCharFormat() + default_console_style.setForeground( + QtGui.QColor.fromRgb(72, 200, 150) + ) + error_console_style.setForeground( + QtGui.QColor.fromRgb(184, 54, 19) + ) + + label = QtWidgets.QLabel("Console:", self) + + console_output = QtWidgets.QPlainTextEdit(self) + console_output.setMinimumSize(QtCore.QSize(300, 200)) + console_output.setReadOnly(True) + console_output.setCurrentCharFormat(default_console_style) + console_output.setObjectName("Console") + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(label, 0) + main_layout.addWidget(console_output, 1) + + self.default_console_style = default_console_style + self.error_console_style = error_console_style + + self.label = label + self.console_output = console_output + + self.hide_console() + + def hide_console(self): + self.label.setVisible(False) + self.console_output.setVisible(False) + + self.updateGeometry() + + def show_console(self): + self.label.setVisible(True) + self.console_output.setVisible(True) + + self.updateGeometry() + + def update_console(self, msg: str, error: bool = False) -> None: + if not error: + self.console_output.setCurrentCharFormat( + self.default_console_style + ) + else: + self.console_output.setCurrentCharFormat( + self.error_console_style + ) + self.console_output.appendPlainText(msg) + + +class MongoUrlInput(QtWidgets.QLineEdit): + """Widget to input mongodb URL.""" + + def set_valid(self): + """Set valid state on mongo url input.""" + self.setProperty("state", "valid") + self.style().polish(self) + + def remove_state(self): + """Set invalid state on mongo url input.""" + self.setProperty("state", "") + self.style().polish(self) + + def set_invalid(self): + """Set invalid state on mongo url input.""" + self.setProperty("state", "invalid") + self.style().polish(self) class InstallDialog(QtWidgets.QDialog): """Main Igniter dialog window.""" - _size_w = 400 - _size_h = 600 - path = "" - _controls_disabled = False + + mongo_url_regex = re.compile(r"^(mongodb|mongodb\+srv)://.*?") + + _width = 500 + _height = 200 + commands = collections.OrderedDict([ + ("run", "Start"), + ("run_from_code", "Run from code") + ]) def __init__(self, parent=None): super(InstallDialog, self).__init__(parent) - self.secure_registry = OpenPypeSecureRegistry("mongodb") - self.mongo_url = "" + self.setWindowTitle( + f"OpenPype Igniter {__version__}" + ) + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + + current_dir = os.path.dirname(os.path.abspath(__file__)) + roboto_font_path = os.path.join(current_dir, "RobotoMono-Regular.ttf") + poppins_font_path = os.path.join(current_dir, "Poppins") + icon_path = os.path.join(current_dir, "openpype_icon.png") + + # Install roboto font + QtGui.QFontDatabase.addApplicationFont(roboto_font_path) + for filename in os.listdir(poppins_font_path): + if os.path.splitext(filename)[1] == ".ttf": + QtGui.QFontDatabase.addApplicationFont(filename) + + # Load logo + pixmap_openpype_logo = QtGui.QPixmap(icon_path) + # Set logo as icon of window + self.setWindowIcon(QtGui.QIcon(pixmap_openpype_logo)) + + secure_registry = OpenPypeSecureRegistry("mongodb") + mongo_url = "" try: - self.mongo_url = ( + mongo_url = ( os.getenv("OPENPYPE_MONGO", "") - or self.secure_registry.get_item("openPypeMongo") + or secure_registry.get_item("openPypeMongo") ) except ValueError: pass - self.setWindowTitle( - f"OpenPype Igniter {__version__} - OpenPype installation") - self._icon_path = os.path.join( - os.path.dirname(__file__), 'openpype_icon.png') - icon = QtGui.QIcon(self._icon_path) - self.setWindowIcon(icon) - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) + self.mongo_url = mongo_url + self._pixmap_openpype_logo = pixmap_openpype_logo - self.setMinimumSize( - QtCore.QSize(self._size_w, self._size_h)) - self.setMaximumSize( - QtCore.QSize(self._size_w + 100, self._size_h + 500)) - - # style for normal console text - self.default_console_style = QtGui.QTextCharFormat() - # self.default_console_style.setFontPointSize(0.1) - self.default_console_style.setForeground( - QtGui.QColor.fromRgb(72, 200, 150)) - - # style for error console text - self.error_console_style = QtGui.QTextCharFormat() - # self.error_console_style.setFontPointSize(0.1) - self.error_console_style.setForeground( - QtGui.QColor.fromRgb(184, 54, 19)) - - QtGui.QFontDatabase.addApplicationFont( - os.path.join( - os.path.dirname(__file__), 'RobotoMono-Regular.ttf') - ) - self._openpype_run_ready = False + self._secure_registry = secure_registry + self._controls_disabled = False + self._install_thread = None + self.resize(QtCore.QSize(self._width, self._height)) self._init_ui() + # Set stylesheet + self.setStyleSheet(load_stylesheet()) + + # Trigger Mongo URL validation + self._mongo_input.setText(self.mongo_url) + def _init_ui(self): # basic visual style - dark background, light text - self.setStyleSheet(""" - color: rgb(200, 200, 200); - background-color: rgb(23, 23, 23); - """) - - main = QtWidgets.QVBoxLayout(self) # Main info # -------------------------------------------------------------------- - self.main_label = QtWidgets.QLabel( - """Welcome to OpenPype -

- We've detected OpenPype is not configured yet. But don't worry, - this is as easy as setting one or two things. -

- """) - self.main_label.setWordWrap(True) - self.main_label.setStyleSheet("color: rgb(200, 200, 200);") - - # OpenPype path info - # -------------------------------------------------------------------- - - self.openpype_path_label = QtWidgets.QLabel( - """This is Path to studio location where OpenPype versions - are stored. It will be pre-filled if your MongoDB connection is - already set and your studio defined this location. -

- Leave it empty if you want to install OpenPype version that - comes with this installation. -

-

- If you want to just try OpenPype without installing, hit the - middle button that states "run without installation". -

- """ - ) - - self.openpype_path_label.setWordWrap(True) - self.openpype_path_label.setStyleSheet("color: rgb(150, 150, 150);") - - # Path/Url box | Select button - # -------------------------------------------------------------------- - - input_layout = QtWidgets.QHBoxLayout() - - input_layout.setContentsMargins(0, 10, 0, 10) - self.user_input = FocusHandlingLineEdit() - - self.user_input.setPlaceholderText("Path to OpenPype versions") - self.user_input.textChanged.connect(self._path_changed) - self.user_input.setStyleSheet( - ("color: rgb(233, 233, 233);" - "background-color: rgb(64, 64, 64);" - "padding: 0.5em;" - "border: 1px solid rgb(32, 32, 32);") - ) - - self.user_input.setValidator(PathValidator(self.user_input)) - - self._btn_select = QtWidgets.QPushButton("Select") - self._btn_select.setToolTip( - "Select OpenPype repository" - ) - self._btn_select.setStyleSheet( - ("color: rgb(64, 64, 64);" - "background-color: rgb(72, 200, 150);" - "padding: 0.5em;") - ) - self._btn_select.setMaximumSize(100, 140) - self._btn_select.clicked.connect(self._on_select_clicked) - - input_layout.addWidget(self.user_input) - input_layout.addWidget(self._btn_select) + main_label = QtWidgets.QLabel("Welcome to OpenPype", self) + main_label.setWordWrap(True) + main_label.setObjectName("MainLabel") # Mongo box | OK button # -------------------------------------------------------------------- - - self.mongo_label = QtWidgets.QLabel( - """Enter URL for running MongoDB instance:""" + mongo_input = MongoUrlInput(self) + mongo_input.setPlaceholderText( + "Enter your database Address. Example: mongodb://192.168.1.10:2707" ) - self.mongo_label.setWordWrap(True) - self.mongo_label.setStyleSheet("color: rgb(150, 150, 150);") + mongo_messages_widget = QtWidgets.QWidget(self) - class MongoWidget(QtWidgets.QWidget): - """Widget to input mongodb URL.""" - - def __init__(self, parent=None): - self._btn_mongo = None - super(MongoWidget, self).__init__(parent) - mongo_layout = QtWidgets.QHBoxLayout() - mongo_layout.setContentsMargins(0, 0, 0, 0) - self._mongo_input = FocusHandlingLineEdit() - self._mongo_input.setPlaceholderText("Mongo URL") - self._mongo_input.textChanged.connect(self._mongo_changed) - self._mongo_input.focusIn.connect(self._focus_in) - self._mongo_input.focusOut.connect(self._focus_out) - self._mongo_input.setValidator( - MongoValidator(self._mongo_input)) - self._mongo_input.setStyleSheet( - ("color: rgb(233, 233, 233);" - "background-color: rgb(64, 64, 64);" - "padding: 0.5em;" - "border: 1px solid rgb(32, 32, 32);") - ) - - mongo_layout.addWidget(self._mongo_input) - self.setLayout(mongo_layout) - - def _focus_out(self): - self.validate_url() - - def _focus_in(self): - self._mongo_input.setStyleSheet( - """ - background-color: rgb(32, 32, 19); - color: rgb(255, 190, 15); - padding: 0.5em; - border: 1px solid rgb(64, 64, 32); - """ - ) - - def _mongo_changed(self, mongo: str): - self.parent().mongo_url = mongo - - def get_mongo_url(self) -> str: - """Helper to get url from parent.""" - return self.parent().mongo_url - - def set_mongo_url(self, mongo: str): - """Helper to set url to parent. - - Args: - mongo (str): mongodb url string. - - """ - self._mongo_input.setText(mongo) - - def set_valid(self): - """Set valid state on mongo url input.""" - self._mongo_input.setStyleSheet( - """ - background-color: rgb(19, 19, 19); - color: rgb(64, 230, 132); - padding: 0.5em; - border: 1px solid rgb(32, 64, 32); - """ - ) - self.parent().install_button.setEnabled(True) - - def set_invalid(self): - """Set invalid state on mongo url input.""" - self._mongo_input.setStyleSheet( - """ - background-color: rgb(32, 19, 19); - color: rgb(255, 69, 0); - padding: 0.5em; - border: 1px solid rgb(64, 32, 32); - """ - ) - self.parent().install_button.setEnabled(False) - - def set_read_only(self, state: bool): - """Set input read-only.""" - self._mongo_input.setReadOnly(state) - - def validate_url(self) -> bool: - """Validate if entered url is ok. - - Returns: - True if url is valid monogo string. - - """ - if self.parent().mongo_url == "": - return False - - is_valid, reason_str = validate_mongo_connection( - self.parent().mongo_url - ) - if not is_valid: - self.set_invalid() - self.parent().update_console(f"!!! {reason_str}", True) - return False - else: - self.set_valid() - return True - - self._mongo = MongoWidget(self) - if self.mongo_url: - self._mongo.set_mongo_url(self.mongo_url) - - # Bottom button bar - # -------------------------------------------------------------------- - bottom_widget = QtWidgets.QWidget() - bottom_layout = QtWidgets.QHBoxLayout() - openpype_logo_label = QtWidgets.QLabel("openpype logo") - openpype_logo = QtGui.QPixmap(self._icon_path) - # openpype_logo.scaled( - # openpype_logo_label.width(), - # openpype_logo_label.height(), QtCore.Qt.KeepAspectRatio) - openpype_logo_label.setPixmap(openpype_logo) - openpype_logo_label.setContentsMargins(10, 0, 0, 10) - - # install button - - - - - - - - - - - - - - - - - - - - - - - - - - - - self.install_button = QtWidgets.QPushButton("Install") - self.install_button.setStyleSheet( - ("color: rgb(64, 64, 64);" - "background-color: rgb(72, 200, 150);" - "padding: 0.5em;") + mongo_connection_msg = QtWidgets.QLabel(mongo_messages_widget) + mongo_connection_msg.setVisible(True) + mongo_connection_msg.setTextInteractionFlags( + QtCore.Qt.TextSelectableByMouse ) - self.install_button.setMinimumSize(64, 24) - self.install_button.setToolTip("Install OpenPype") - self.install_button.clicked.connect(self._on_ok_clicked) - # run from current button - - - - - - - - - - - - - - - - - - - - - - - self.run_button = QtWidgets.QPushButton("Run without installation") - self.run_button.setStyleSheet( - ("color: rgb(64, 64, 64);" - "background-color: rgb(200, 164, 64);" - "padding: 0.5em;") - ) - self.run_button.setMinimumSize(64, 24) - self.run_button.setToolTip("Run without installing Pype") - self.run_button.clicked.connect(self._on_run_clicked) - - # install button - - - - - - - - - - - - - - - - - - - - - - - - - - - - self._exit_button = QtWidgets.QPushButton("Exit") - self._exit_button.setStyleSheet( - ("color: rgb(64, 64, 64);" - "background-color: rgb(128, 128, 128);" - "padding: 0.5em;") - ) - self._exit_button.setMinimumSize(64, 24) - self._exit_button.setToolTip("Exit") - self._exit_button.clicked.connect(self._on_exit_clicked) - - bottom_layout.setContentsMargins(0, 10, 10, 0) - bottom_layout.setAlignment(QtCore.Qt.AlignVCenter) - bottom_layout.addWidget(openpype_logo_label, 0, QtCore.Qt.AlignVCenter) - bottom_layout.addStretch(1) - bottom_layout.addWidget(self.install_button, 0, QtCore.Qt.AlignVCenter) - bottom_layout.addWidget(self.run_button, 0, QtCore.Qt.AlignVCenter) - bottom_layout.addWidget(self._exit_button, 0, QtCore.Qt.AlignVCenter) - - bottom_widget.setLayout(bottom_layout) - bottom_widget.setStyleSheet("background-color: rgb(32, 32, 32);") - - # Console label - # -------------------------------------------------------------------- - self._status_label = QtWidgets.QLabel("Console:") - self._status_label.setContentsMargins(0, 10, 0, 10) - self._status_label.setStyleSheet("color: rgb(61, 115, 97);") - - # Console - # -------------------------------------------------------------------- - self._status_box = QtWidgets.QPlainTextEdit() - self._status_box.setReadOnly(True) - self._status_box.setCurrentCharFormat(self.default_console_style) - self._status_box.setStyleSheet( - """QPlainTextEdit { - background-color: rgb(32, 32, 32); - color: rgb(72, 200, 150); - font-family: "Roboto Mono"; - font-size: 0.5em; - border: 1px solid rgb(48, 48, 48); - } - QScrollBar:vertical { - border: 1px solid rgb(61, 115, 97); - background: #000; - width:5px; - margin: 0px 0px 0px 0px; - } - QScrollBar::handle:vertical { - background: rgb(72, 200, 150); - min-height: 0px; - } - QScrollBar::sub-page:vertical { - background: rgb(31, 62, 50); - } - QScrollBar::add-page:vertical { - background: rgb(31, 62, 50); - } - QScrollBar::add-line:vertical { - background: rgb(72, 200, 150); - height: 0px; - subcontrol-position: bottom; - subcontrol-origin: margin; - } - QScrollBar::sub-line:vertical { - background: rgb(72, 200, 150); - height: 0 px; - subcontrol-position: top; - subcontrol-origin: margin; - } - """ - ) + mongo_messages_layout = QtWidgets.QVBoxLayout(mongo_messages_widget) + mongo_messages_layout.setContentsMargins(0, 0, 0, 0) + mongo_messages_layout.addWidget(mongo_connection_msg) # Progress bar # -------------------------------------------------------------------- - self._progress_bar = QtWidgets.QProgressBar() - self._progress_bar.setValue(0) - self._progress_bar.setAlignment(QtCore.Qt.AlignCenter) - self._progress_bar.setTextVisible(False) - # setting font and the size - self._progress_bar.setFont(QtGui.QFont('Arial', 7)) - self._progress_bar.setStyleSheet( - """QProgressBar:horizontal { - height: 5px; - border: 1px solid rgb(31, 62, 50); - color: rgb(72, 200, 150); - } - QProgressBar::chunk:horizontal { - background-color: rgb(72, 200, 150); - } - """ + progress_bar = NiceProgressBar(self) + progress_bar.setAlignment(QtCore.Qt.AlignCenter) + progress_bar.setTextVisible(False) + + # Console + # -------------------------------------------------------------------- + console_widget = ConsoleWidget(self) + + # Bottom button bar + # -------------------------------------------------------------------- + bottom_widget = QtWidgets.QWidget(self) + + btns_widget = QtWidgets.QWidget(bottom_widget) + + openpype_logo_label = QtWidgets.QLabel("openpype logo", bottom_widget) + openpype_logo_label.setPixmap(self._pixmap_openpype_logo) + + run_button = ButtonWithOptions( + self.commands, + btns_widget ) + run_button.setMinimumSize(64, 24) + run_button.setToolTip("Run OpenPype") + + # install button - - - - - - - - - - - - - - - - - - - - - - - - - - - + exit_button = QtWidgets.QPushButton("Exit", btns_widget) + exit_button.setObjectName("ExitBtn") + exit_button.setFlat(True) + exit_button.setMinimumSize(64, 24) + exit_button.setToolTip("Exit") + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addWidget(run_button, 0) + btns_layout.addWidget(exit_button, 0) + + bottom_layout = QtWidgets.QHBoxLayout(bottom_widget) + bottom_layout.setContentsMargins(0, 0, 0, 0) + bottom_layout.setAlignment(QtCore.Qt.AlignHCenter) + bottom_layout.addWidget(openpype_logo_label, 0) + bottom_layout.addStretch(1) + bottom_layout.addWidget(btns_widget, 0) + # add all to main - main.addWidget(self.main_label, 0) - main.addWidget(self.openpype_path_label, 0) - main.addLayout(input_layout, 0) - main.addWidget(self.mongo_label, 0) - main.addWidget(self._mongo, 0) + main = QtWidgets.QVBoxLayout(self) + main.addSpacing(15) + main.addWidget(main_label, 0) + main.addSpacing(15) + main.addWidget(mongo_input, 0) + main.addWidget(mongo_messages_widget, 0) - main.addWidget(self._status_label, 0) - main.addWidget(self._status_box, 1) + main.addWidget(progress_bar, 0) + main.addSpacing(15) + + main.addWidget(console_widget, 1) - main.addWidget(self._progress_bar, 0) main.addWidget(bottom_widget, 0) - self.setLayout(main) + run_button.option_clicked.connect(self._on_run_btn_click) + exit_button.clicked.connect(self._on_exit_clicked) + mongo_input.textChanged.connect(self._on_mongo_url_change) - # if mongo url is ok, try to get openpype path from there - if self._mongo.validate_url() and len(self.path) == 0: - self.path = get_openpype_path_from_db(self.mongo_url) - self.user_input.setText(self.path) + self._console_widget = console_widget - def _on_select_clicked(self): - """Show directory dialog.""" - options = QtWidgets.QFileDialog.Options() - options |= QtWidgets.QFileDialog.DontUseNativeDialog - options |= QtWidgets.QFileDialog.ShowDirsOnly + self.main_label = main_label - result = QtWidgets.QFileDialog.getExistingDirectory( - parent=self, - caption='Select path', - directory=os.getcwd(), - options=options) + self._mongo_input = mongo_input - if not result: + self._mongo_connection_msg = mongo_connection_msg + + self._run_button = run_button + self._exit_button = exit_button + self._progress_bar = progress_bar + + def _on_run_btn_click(self, option): + # Disable buttons + self._disable_buttons() + # Set progress to any value + self._update_progress(1) + self._progress_bar.repaint() + # Add label to show that is connecting to mongo + self.set_invalid_mongo_connection(self.mongo_url, True) + + # Process events to repaint changes + QtWidgets.QApplication.processEvents() + + if not self.validate_url(): + self._enable_buttons() + self._update_progress(0) + # Update any messages + self._mongo_input.setText(self.mongo_url) return - filename = QtCore.QDir.toNativeSeparators(result) - - if os.path.isdir(filename): - self.path = filename - self.user_input.setText(filename) - - def _on_run_clicked(self): - valid, reason = validate_mongo_connection( - self._mongo.get_mongo_url() - ) - if not valid: - self._mongo.set_invalid() - self.update_console(f"!!! {reason}", True) - return + if option == "run": + self._run_openpype() + elif option == "run_from_code": + self._run_openpype_from_code() else: - self._mongo.set_valid() + raise AssertionError("BUG: Unknown variant \"{}\"".format(option)) + + def _run_openpype_from_code(self): + os.environ["OPENPYPE_MONGO"] = self.mongo_url + try: + self._secure_registry.set_item("openPypeMongo", self.mongo_url) + except ValueError: + print("Couldn't save Mongo URL to keyring") self.done(2) - def _on_ok_clicked(self): + def _run_openpype(self): """Start install process. This will once again validate entered path and mongo if ok, start working thread that will do actual job. """ - valid, reason = validate_mongo_connection( - self._mongo.get_mongo_url() - ) - if not valid: - self._mongo.set_invalid() - self.update_console(f"!!! {reason}", True) - return - else: - self._mongo.set_valid() - - if self._openpype_run_ready: - self.done(3) + # Check if install thread is not already running + if self._install_thread and self._install_thread.isRunning(): return - if self.path and len(self.path) > 0: - valid, reason = validate_path_string(self.path) + self._mongo_input.set_valid() - if not valid: - self.update_console(f"!!! {reason}", True) - return + install_thread = InstallThread(self) + install_thread.message.connect(self.update_console) + install_thread.progress.connect(self._update_progress) + install_thread.finished.connect(self._installation_finished) + install_thread.set_mongo(self.mongo_url) - self._disable_buttons() - self._install_thread = InstallThread( - self.install_result_callback_handler, self) - self._install_thread.message.connect(self.update_console) - self._install_thread.progress.connect(self._update_progress) - self._install_thread.finished.connect(self._enable_buttons) - self._install_thread.set_path(self.path) - self._install_thread.set_mongo(self._mongo.get_mongo_url()) - self._install_thread.start() + self._install_thread = install_thread - def install_result_callback_handler(self, result: InstallResult): - """Change button behaviour based on installation outcome.""" - status = result.status + install_thread.start() + + def _installation_finished(self): + status = self._install_thread.result() if status >= 0: - self.install_button.setText("Run installed OpenPype") - self._openpype_run_ready = True + self._update_progress(100) + QtWidgets.QApplication.processEvents() + self.done(3) + else: + self._enable_buttons() + self._show_console() def _update_progress(self, progress: int): self._progress_bar.setValue(progress) + text_visible = self._progress_bar.isTextVisible() + if progress == 0: + if text_visible: + self._progress_bar.setTextVisible(False) + elif not text_visible: + self._progress_bar.setTextVisible(True) def _on_exit_clicked(self): self.reject() - def _path_changed(self, path: str) -> str: - """Set path.""" - self.path = path - return path + def _on_mongo_url_change(self, new_value): + # Strip the value + new_value = new_value.strip() + # Store new mongo url to variable + self.mongo_url = new_value + + msg = None + # Change style of input + if not new_value: + self._mongo_input.remove_state() + elif not self.mongo_url_regex.match(new_value): + self._mongo_input.set_invalid() + msg = ( + "Mongo URL should start with" + " \"mongodb://\" or \"mongodb+srv://\"" + ) + else: + self._mongo_input.set_valid() + + self.set_invalid_mongo_url(msg) + + def validate_url(self): + """Validate if entered url is ok. + + Returns: + True if url is valid monogo string. + + """ + if self.mongo_url == "": + return False + + is_valid, reason_str = validate_mongo_connection(self.mongo_url) + if not is_valid: + self.set_invalid_mongo_connection(self.mongo_url) + self._mongo_input.set_invalid() + self.update_console(f"!!! {reason_str}", True) + return False + + self.set_invalid_mongo_connection(None) + self._mongo_input.set_valid() + return True + + def set_invalid_mongo_url(self, reason): + if reason is None: + self._mongo_connection_msg.setText("") + else: + self._mongo_connection_msg.setText("- {}".format(reason)) + + def set_invalid_mongo_connection(self, mongo_url, connecting=False): + if mongo_url is None: + self.set_invalid_mongo_url(mongo_url) + return + + if connecting: + msg = "Connecting to: {}".format(mongo_url) + else: + msg = "Can't connect to: {}".format(mongo_url) + + self.set_invalid_mongo_url(msg) def update_console(self, msg: str, error: bool = False) -> None: """Display message in console. @@ -523,26 +503,22 @@ class InstallDialog(QtWidgets.QDialog): msg (str): message. error (bool): if True, print it red. """ - if not error: - self._status_box.setCurrentCharFormat(self.default_console_style) - else: - self._status_box.setCurrentCharFormat(self.error_console_style) - self._status_box.appendPlainText(msg) + self._console_widget.update_console(msg, error) + + def _show_console(self): + self._console_widget.show_console() + self.updateGeometry() def _disable_buttons(self): """Disable buttons so user interaction doesn't interfere.""" - self._btn_select.setEnabled(False) - self.run_button.setEnabled(False) self._exit_button.setEnabled(False) - self.install_button.setEnabled(False) + self._run_button.setEnabled(False) self._controls_disabled = True def _enable_buttons(self): """Enable buttons after operation is complete.""" - self._btn_select.setEnabled(True) - self.run_button.setEnabled(True) self._exit_button.setEnabled(True) - self.install_button.setEnabled(True) + self._run_button.setEnabled(True) self._controls_disabled = False def closeEvent(self, event): # noqa @@ -552,212 +528,6 @@ class InstallDialog(QtWidgets.QDialog): return super(InstallDialog, self).closeEvent(event) -class MongoValidator(QValidator): - """Validate mongodb url for Qt widgets.""" - - def __init__(self, parent=None, intermediate=False): - self.parent = parent - self.intermediate = intermediate - self._validate_lock = False - self.timer = QTimer() - self.timer.timeout.connect(self._unlock_validator) - super().__init__(parent) - - def _unlock_validator(self): - self._validate_lock = False - - def _return_state( - self, state: QValidator.State, reason: str, mongo: str): - """Set stylesheets and actions on parent based on state. - - Warning: - This will always return `QValidator.State.Acceptable` as - anything different will stop input to `QLineEdit` - - """ - - if state == QValidator.State.Invalid: - self.parent.setToolTip(reason) - self.parent.setStyleSheet( - """ - background-color: rgb(32, 19, 19); - color: rgb(255, 69, 0); - padding: 0.5em; - border: 1px solid rgb(64, 32, 32); - """ - ) - elif state == QValidator.State.Intermediate and self.intermediate: - self.parent.setToolTip(reason) - self.parent.setStyleSheet( - """ - background-color: rgb(32, 32, 19); - color: rgb(255, 190, 15); - padding: 0.5em; - border: 1px solid rgb(64, 64, 32); - """ - ) - else: - self.parent.setToolTip(reason) - self.parent.setStyleSheet( - """ - background-color: rgb(19, 19, 19); - color: rgb(64, 230, 132); - padding: 0.5em; - border: 1px solid rgb(32, 64, 32); - """ - ) - - return QValidator.State.Acceptable, mongo, len(mongo) - - def validate(self, mongo: str, pos: int) -> (QValidator.State, str, int): # noqa - """Validate entered mongodb connection string. - - As url (it should start with `mongodb://` or - `mongodb+srv:// url schema. - - Args: - mongo (str): connection string url. - pos (int): current position. - - Returns: - (QValidator.State.Acceptable, str, int): - Indicate input state with color and always return - Acceptable state as we need to be able to edit input further. - - """ - if not mongo.startswith("mongodb"): - return self._return_state( - QValidator.State.Invalid, "need mongodb schema", mongo) - - return self._return_state( - QValidator.State.Intermediate, "", mongo) - - -class PathValidator(MongoValidator): - """Validate mongodb url for Qt widgets.""" - - def validate(self, path: str, pos: int) -> (QValidator.State, str, int): # noqa - """Validate path to be accepted by Igniter. - - Args: - path (str): path to OpenPype. - pos (int): current position. - - Returns: - (QValidator.State.Acceptable, str, int): - Indicate input state with color and always return - Acceptable state as we need to be able to edit input further. - - """ - # allow empty path as that will use current version coming with - # OpenPype Igniter - if len(path) == 0: - return self._return_state( - QValidator.State.Acceptable, "Use version with Igniter", path) - - if len(path) > 3: - valid, reason = validate_path_string(path) - if not valid: - return self._return_state( - QValidator.State.Invalid, reason, path) - else: - return self._return_state( - QValidator.State.Acceptable, reason, path) - - -class CollapsibleWidget(QtWidgets.QWidget): - """Collapsible widget to hide mongo url in necessary.""" - - def __init__(self, parent=None, title: str = "", animation: int = 300): - self._mainLayout = QtWidgets.QGridLayout(parent) - self._toggleButton = QtWidgets.QToolButton(parent) - self._headerLine = QtWidgets.QFrame(parent) - self._toggleAnimation = QtCore.QParallelAnimationGroup(parent) - self._contentArea = QtWidgets.QScrollArea(parent) - self._animation = animation - self._title = title - super(CollapsibleWidget, self).__init__(parent) - self._init_ui() - - def _init_ui(self): - self._toggleButton.setStyleSheet( - """QToolButton { - border: none; - } - """) - self._toggleButton.setToolButtonStyle( - QtCore.Qt.ToolButtonTextBesideIcon) - - self._toggleButton.setArrowType(QtCore.Qt.ArrowType.RightArrow) - self._toggleButton.setText(self._title) - self._toggleButton.setCheckable(True) - self._toggleButton.setChecked(False) - - self._headerLine.setFrameShape(QtWidgets.QFrame.HLine) - self._headerLine.setFrameShadow(QtWidgets.QFrame.Sunken) - self._headerLine.setSizePolicy(QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Maximum) - - self._contentArea.setStyleSheet( - """QScrollArea { - background-color: rgb(32, 32, 32); - border: none; - } - """) - self._contentArea.setSizePolicy(QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Fixed) - self._contentArea.setMaximumHeight(0) - self._contentArea.setMinimumHeight(0) - - self._toggleAnimation.addAnimation( - QtCore.QPropertyAnimation(self, b"minimumHeight")) - self._toggleAnimation.addAnimation( - QtCore.QPropertyAnimation(self, b"maximumHeight")) - self._toggleAnimation.addAnimation( - QtCore.QPropertyAnimation(self._contentArea, b"maximumHeight")) - - self._mainLayout.setVerticalSpacing(0) - self._mainLayout.setContentsMargins(0, 0, 0, 0) - - row = 0 - - self._mainLayout.addWidget( - self._toggleButton, row, 0, 1, 1, QtCore.Qt.AlignCenter) - self._mainLayout.addWidget( - self._headerLine, row, 2, 1, 1) - row += row - self._mainLayout.addWidget(self._contentArea, row, 0, 1, 3) - self.setLayout(self._mainLayout) - - self._toggleButton.toggled.connect(self._toggle_action) - - def _toggle_action(self, collapsed: bool): - arrow = QtCore.Qt.ArrowType.DownArrow if collapsed else QtCore.Qt.ArrowType.RightArrow # noqa: E501 - direction = QtCore.QAbstractAnimation.Forward if collapsed else QtCore.QAbstractAnimation.Backward # noqa: E501 - self._toggleButton.setArrowType(arrow) - self._toggleAnimation.setDirection(direction) - self._toggleAnimation.start() - - def setContentLayout(self, content_layout: QtWidgets.QLayout): # noqa - self._contentArea.setLayout(content_layout) - collapsed_height = \ - self.sizeHint().height() - self._contentArea.maximumHeight() - content_height = self._contentArea.sizeHint().height() - - for i in range(self._toggleAnimation.animationCount() - 1): - sec_anim = self._toggleAnimation.animationAt(i) - sec_anim.setDuration(self._animation) - sec_anim.setStartValue(collapsed_height) - sec_anim.setEndValue(collapsed_height + content_height) - - con_anim = self._toggleAnimation.animationAt( - self._toggleAnimation.animationCount() - 1) - - con_anim.setDuration(self._animation) - con_anim.setStartValue(0) - con_anim.setEndValue(collapsed_height + content_height) - - if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) d = InstallDialog() diff --git a/igniter/install_thread.py b/igniter/install_thread.py index df8b830209..383012b88b 100644 --- a/igniter/install_thread.py +++ b/igniter/install_thread.py @@ -17,12 +17,6 @@ from .bootstrap_repos import ( from .tools import validate_mongo_connection -class InstallResult(QObject): - """Used to pass results back.""" - def __init__(self, value): - self.status = value - - class InstallThread(QThread): """Install Worker thread. @@ -36,15 +30,22 @@ class InstallThread(QThread): """ progress = Signal(int) message = Signal((str, bool)) - finished = Signal(object) - def __init__(self, callback, parent=None,): + def __init__(self, parent=None,): self._mongo = None self._path = None - self.result_callback = callback + self._result = None QThread.__init__(self, parent) - self.finished.connect(callback) + + def result(self): + """Result of finished installation.""" + return self._result + + def _set_result(self, value): + if self._result is not None: + raise AssertionError("BUG: Result was set more than once!") + self._result = value def run(self): """Thread entry point. @@ -76,7 +77,7 @@ class InstallThread(QThread): except ValueError: self.message.emit( "!!! We need MongoDB URL to proceed.", True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return else: self._mongo = os.getenv("OPENPYPE_MONGO") @@ -101,7 +102,7 @@ class InstallThread(QThread): self.message.emit("Skipping OpenPype install ...", False) if detected[-1].path.suffix.lower() == ".zip": bs.extract_openpype(detected[-1]) - self.finished.emit(InstallResult(0)) + self._set_result(0) return if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa @@ -110,7 +111,7 @@ class InstallThread(QThread): f"currently running {local_version}" ), False) self.message.emit("Skipping OpenPype install ...", False) - self.finished.emit(InstallResult(0)) + self._set_result(0) return self.message.emit(( @@ -126,13 +127,13 @@ class InstallThread(QThread): if not openpype_version: self.message.emit( f"!!! Install failed - {openpype_version}", True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return self.message.emit(f"Using: {openpype_version}", False) bs.install_version(openpype_version) self.message.emit(f"Installed as {openpype_version}", False) self.progress.emit(100) - self.finished.emit(InstallResult(1)) + self._set_result(1) return else: self.message.emit("None detected.", False) @@ -144,7 +145,7 @@ class InstallThread(QThread): if not local_openpype: self.message.emit( f"!!! Install failed - {local_openpype}", True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return try: @@ -154,11 +155,12 @@ class InstallThread(QThread): OpenPypeVersionIOError) as e: self.message.emit(f"Installed failed: ", True) self.message.emit(str(e), True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return self.message.emit(f"Installed as {local_openpype}", False) self.progress.emit(100) + self._set_result(1) return else: # if we have mongo connection string, validate it, set it to @@ -167,7 +169,7 @@ class InstallThread(QThread): if not validate_mongo_connection(self._mongo): self.message.emit( f"!!! invalid mongo url {self._mongo}", True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return bs.secure_registry.set_item("openPypeMongo", self._mongo) os.environ["OPENPYPE_MONGO"] = self._mongo @@ -177,11 +179,11 @@ class InstallThread(QThread): if not repo_file: self.message.emit("!!! Cannot install", True) - self.finished.emit(InstallResult(-1)) + self._set_result(-1) return self.progress.emit(100) - self.finished.emit(InstallResult(1)) + self._set_result(1) return def set_path(self, path: str) -> None: diff --git a/igniter/openpype.icns b/igniter/openpype.icns new file mode 100644 index 0000000000..792f819ad9 Binary files /dev/null and b/igniter/openpype.icns differ diff --git a/igniter/stylesheet.css b/igniter/stylesheet.css new file mode 100644 index 0000000000..8df2621d83 --- /dev/null +++ b/igniter/stylesheet.css @@ -0,0 +1,280 @@ +*{ + font-size: 10pt; + font-family: "Poppins"; +} + +QWidget { + color: #bfccd6; + background-color: #282C34; + border-radius: 0px; +} + +QMenu { + border: 1px solid #555555; + background-color: #21252B; +} + +QMenu::item { + padding: 5px 10px 5px 10px; + border-left: 5px solid #313741;; +} + +QMenu::item:selected { + border-left-color: rgb(84, 209, 178); + background-color: #222d37; +} + +QLineEdit, QPlainTextEdit { + border: 1px solid #464b54; + border-radius: 3px; + background-color: #21252B; + padding: 0.5em; +} + +QLineEdit[state="valid"] { + background-color: rgb(19, 19, 19); + color: rgb(64, 230, 132); + border-color: rgb(32, 64, 32); +} + +QLineEdit[state="invalid"] { + background-color: rgb(32, 19, 19); + color: rgb(255, 69, 0); + border-color: rgb(64, 32, 32); +} + +QLabel { + background: transparent; + color: #969b9e; +} + +QLabel:hover {color: #b8c1c5;} + +QPushButton { + border: 1px solid #aaaaaa; + border-radius: 3px; + padding: 5px; +} + +QPushButton:hover { + background-color: #333840; + border: 1px solid #fff; + color: #fff; +} + +QTableView { + border: 1px solid #444; + gridline-color: #6c6c6c; + background-color: #201F1F; + alternate-background-color:#21252B; +} + +QTableView::item:pressed, QListView::item:pressed, QTreeView::item:pressed { + background: #78879b; + color: #FFFFFF; +} + +QTableView::item:selected:active, QTreeView::item:selected:active, QListView::item:selected:active { + background: #3d8ec9; +} + +QProgressBar { + border: 1px solid grey; + border-radius: 10px; + color: #222222; + font-weight: bold; +} +QProgressBar:horizontal { + height: 20px; +} + +QProgressBar::chunk { + border-radius: 10px; + background-color: qlineargradient( + x1: 0, + y1: 0.5, + x2: 1, + y2: 0.5, + stop: 0 rgb(72, 200, 150), + stop: 1 rgb(82, 172, 215) + ); +} + + +QScrollBar:horizontal { + height: 15px; + margin: 3px 15px 3px 15px; + border: 1px transparent #21252B; + border-radius: 4px; + background-color: #21252B; +} + +QScrollBar::handle:horizontal { + background-color: #4B5362; + min-width: 5px; + border-radius: 4px; +} + +QScrollBar::add-line:horizontal { + margin: 0px 3px 0px 3px; + border-image: url(:/qss_icons/rc/right_arrow_disabled.png); + width: 10px; + height: 10px; + subcontrol-position: right; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:horizontal { + margin: 0px 3px 0px 3px; + border-image: url(:/qss_icons/rc/left_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: left; + subcontrol-origin: margin; +} + +QScrollBar::add-line:horizontal:hover,QScrollBar::add-line:horizontal:on { + border-image: url(:/qss_icons/rc/right_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: right; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:horizontal:hover, QScrollBar::sub-line:horizontal:on { + border-image: url(:/qss_icons/rc/left_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: left; + subcontrol-origin: margin; +} + +QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal { + background: none; +} + +QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal { + background: none; +} + +QScrollBar:vertical { + background-color: #21252B; + width: 15px; + margin: 15px 3px 15px 3px; + border: 1px transparent #21252B; + border-radius: 4px; +} + +QScrollBar::handle:vertical { + background-color: #4B5362; + min-height: 5px; + border-radius: 4px; +} + +QScrollBar::sub-line:vertical { + margin: 3px 0px 3px 0px; + border-image: url(:/qss_icons/rc/up_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: top; + subcontrol-origin: margin; +} + +QScrollBar::add-line:vertical { + margin: 3px 0px 3px 0px; + border-image: url(:/qss_icons/rc/down_arrow_disabled.png); + height: 10px; + width: 10px; + subcontrol-position: bottom; + subcontrol-origin: margin; +} + +QScrollBar::sub-line:vertical:hover,QScrollBar::sub-line:vertical:on { + + border-image: url(:/qss_icons/rc/up_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: top; + subcontrol-origin: margin; +} + + +QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on { + border-image: url(:/qss_icons/rc/down_arrow.png); + height: 10px; + width: 10px; + subcontrol-position: bottom; + subcontrol-origin: margin; +} + +QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical { + background: none; +} + + +QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { + background: none; +} + +#MainLabel { + color: rgb(200, 200, 200); + font-size: 12pt; +} + +#Console { + background-color: #21252B; + color: rgb(72, 200, 150); + font-family: "Roboto Mono"; + font-size: 8pt; +} + +#ExitBtn { + /* `border` must be set to background of flat button is painted .*/ + border: none; + color: rgb(39, 39, 39); + background-color: #828a97; + padding: 0.5em; + font-weight: 400; +} + +#ExitBtn:hover{ + background-color: #b2bece +} +#ExitBtn:disabled { + background-color: rgba(185, 185, 185, 31); + color: rgba(64, 64, 64, 63); +} + +#ButtonWithOptions QPushButton{ + border-top-right-radius: 0px; + border-bottom-right-radius: 0px; + border: none; + background-color: rgb(84, 209, 178); + color: rgb(39, 39, 39); + font-weight: 400; + padding: 0.5em; +} +#ButtonWithOptions QPushButton:hover{ + background-color: rgb(85, 224, 189) +} +#ButtonWithOptions QPushButton:disabled { + background-color: rgba(72, 200, 150, 31); + color: rgba(64, 64, 64, 63); +} + +#ButtonWithOptions QToolButton{ + border: none; + border-top-left-radius: 0px; + border-bottom-left-radius: 0px; + border-top-right-radius: 3px; + border-bottom-right-radius: 3px; + background-color: rgb(84, 209, 178); + color: rgb(39, 39, 39); +} +#ButtonWithOptions QToolButton:hover{ + background-color: rgb(85, 224, 189) +} +#ButtonWithOptions QToolButton:disabled { + background-color: rgba(72, 200, 150, 31); + color: rgba(64, 64, 64, 63); +} diff --git a/igniter/tools.py b/igniter/tools.py index ff2db6bc7e..529d535c25 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -14,7 +14,12 @@ from pathlib import Path import platform from pymongo import MongoClient -from pymongo.errors import ServerSelectionTimeoutError, InvalidURI +from pymongo.errors import ( + ServerSelectionTimeoutError, + InvalidURI, + ConfigurationError, + OperationFailure +) def decompose_url(url: str) -> Dict: @@ -115,30 +120,20 @@ def validate_mongo_connection(cnx: str) -> (bool, str): parsed = urlparse(cnx) if parsed.scheme not in ["mongodb", "mongodb+srv"]: return False, "Not mongodb schema" - # we have mongo connection string. Let's try if we can connect. - try: - components = decompose_url(cnx) - except RuntimeError: - return False, f"Invalid port specified." - - mongo_args = { - "host": compose_url(**components), - "serverSelectionTimeoutMS": 2000 - } - port = components.get("port") - if port is not None: - mongo_args["port"] = int(port) try: - client = MongoClient(**mongo_args) + client = MongoClient( + cnx, + serverSelectionTimeoutMS=2000 + ) client.server_info() client.close() except ServerSelectionTimeoutError as e: return False, f"Cannot connect to server {cnx} - {e}" except ValueError: return False, f"Invalid port specified {parsed.port}" - except InvalidURI as e: - return False, str(e) + except (ConfigurationError, OperationFailure, InvalidURI) as exc: + return False, str(exc) else: return True, "Connection is successful" diff --git a/igniter/version.py b/igniter/version.py index 8c8ffdccb7..4f8f0907e9 100644 --- a/igniter/version.py +++ b/igniter/version.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- """Definition of Igniter version.""" -__version__ = "1.0.0-beta" +__version__ = "1.0.0-rc1" diff --git a/inno_setup.iss b/inno_setup.iss new file mode 100644 index 0000000000..ead9907955 --- /dev/null +++ b/inno_setup.iss @@ -0,0 +1,50 @@ +; Script generated by the Inno Setup Script Wizard. +; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! + + +#define MyAppName "OpenPype" +#define Build GetEnv("BUILD_DIR") +#define AppVer GetEnv("BUILD_VERSION") + + +[Setup] +; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications. +; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) +AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93} +AppName={#MyAppName} +AppVersion={#AppVer} +AppVerName={#MyAppName} version {#AppVer} +AppPublisher=Orbi Tools s.r.o +AppPublisherURL=http://pype.club +AppSupportURL=http://pype.club +AppUpdatesURL=http://pype.club +DefaultDirName={autopf}\{#MyAppName} +DisableProgramGroupPage=yes +OutputBaseFilename={#MyAppName}-{#AppVer}-install +AllowCancelDuringInstall=yes +; Uncomment the following line to run in non administrative install mode (install for current user only.) +;PrivilegesRequired=lowest +PrivilegesRequiredOverridesAllowed=dialog +SetupIconFile=igniter\openpype.ico +OutputDir=build\ +Compression=lzma +SolidCompression=yes +WizardStyle=modern + +[Languages] +Name: "english"; MessagesFile: "compiler:Default.isl" + +[Tasks] +Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked + +[Files] +Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs +; NOTE: Don't use "Flags: ignoreversion" on any shared system files + +[Icons] +Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe" +Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon + +[Run] +Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent + diff --git a/openpype/__init__.py b/openpype/__init__.py index edd48a018d..a86d2bc2be 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -9,6 +9,7 @@ from .settings import get_project_settings from .lib import ( Anatomy, filter_pyblish_plugins, + set_plugin_attributes_from_settings, change_timer_to_current_context ) @@ -58,44 +59,23 @@ def patched_discover(superclass): # run original discover and get plugins plugins = _original_discover(superclass) - # determine host application to use for finding presets - if avalon.registered_host() is None: - return plugins - host = avalon.registered_host().__name__.split(".")[-1] + set_plugin_attributes_from_settings(plugins, superclass) - # map plugin superclass to preset json. Currenly suppoted is load and - # create (avalon.api.Loader and avalon.api.Creator) - plugin_type = "undefined" - if superclass.__name__.split(".")[-1] == "Loader": - plugin_type = "load" - elif superclass.__name__.split(".")[-1] == "Creator": - plugin_type = "create" - - print(">>> Finding presets for {}:{} ...".format(host, plugin_type)) - try: - settings = ( - get_project_settings(os.environ['AVALON_PROJECT']) - [host][plugin_type] - ) - except KeyError: - print("*** no presets found.") - else: - for plugin in plugins: - if plugin.__name__ in settings: - print(">>> We have preset for {}".format(plugin.__name__)) - for option, value in settings[plugin.__name__].items(): - if option == "enabled" and value is False: - setattr(plugin, "active", False) - print(" - is disabled by preset") - else: - setattr(plugin, option, value) - print(" - setting `{}`: `{}`".format(option, value)) return plugins @import_wrapper def install(): """Install Pype to Avalon.""" + from pyblish.lib import MessageHandler + + def modified_emit(obj, record): + """Method replacing `emit` in Pyblish's MessageHandler.""" + record.msg = record.getMessage() + obj.records.append(record) + + MessageHandler.emit = modified_emit + log.info("Registering global plug-ins..") pyblish.register_plugin_path(PUBLISH_PATH) pyblish.register_discovery_filter(filter_pyblish_plugins) diff --git a/openpype/cli.py b/openpype/cli.py index c6da88cbc1..9c49825721 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -224,17 +224,6 @@ def launch(app, project, asset, task, PypeCommands().run_application(app, project, asset, task, tools, arguments) -@main.command() -@click.option("-p", "--path", help="Path to zip file", default=None) -def generate_zip(path): - """Generate Pype zip from current sources. - - If PATH is not provided, it will create zip file in user data dir. - - """ - PypeCommands().generate_zip(path) - - @main.command( context_settings=dict( ignore_unknown_options=True, diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index 9a80801652..5f6a64a6d0 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -5,7 +5,7 @@ import logging from avalon import io from avalon import api as avalon from avalon.vendor import Qt -from openpype import lib +from openpype import lib, api import pyblish.api as pyblish import openpype.hosts.aftereffects @@ -81,3 +81,35 @@ def uninstall(): def on_pyblish_instance_toggled(instance, old_value, new_value): """Toggle layer visibility on instance toggles.""" instance[0].Visible = new_value + + +def get_asset_settings(): + """Get settings on current asset from database. + + Returns: + dict: Scene data. + + """ + asset_data = lib.get_asset()["data"] + fps = asset_data.get("fps") + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") + handle_start = asset_data.get("handleStart") + handle_end = asset_data.get("handleEnd") + resolution_width = asset_data.get("resolutionWidth") + resolution_height = asset_data.get("resolutionHeight") + duration = (frame_end - frame_start + 1) + handle_start + handle_end + entity_type = asset_data.get("entityType") + + scene_data = { + "fps": fps, + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "duration": duration + } + + return scene_data diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index bb78e89a89..4234ee0f0c 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -47,6 +47,10 @@ class CreateRender(openpype.api.Creator): self.data["members"] = [item.id] self.data["uuid"] = item.id # for SubsetManager + self.data["subset"] = self.data["subset"]\ + .replace(stub.PUBLISH_ICON, '')\ + .replace(stub.LOADED_ICON, '') + stub.imprint(item, self.data) stub.set_label_color(item.id, 14) # Cyan options 0 - 16 stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index ba64551283..baac64ed0c 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -12,6 +12,7 @@ class AERenderInstance(RenderInstance): # extend generic, composition name is needed comp_name = attr.ib(default=None) comp_id = attr.ib(default=None) + fps = attr.ib(default=None) class CollectAERender(abstract_collect_render.AbstractCollectRender): @@ -45,6 +46,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): raise ValueError("Couldn't find id, unable to publish. " + "Please recreate instance.") item_id = inst["members"][0] + work_area_info = self.stub.get_work_area(int(item_id)) if not work_area_info: @@ -57,6 +59,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): frameEnd = round(work_area_info.workAreaStart + float(work_area_info.workAreaDuration) * float(work_area_info.frameRate)) - 1 + fps = work_area_info.frameRate + # TODO add resolution when supported by extension if inst["family"] == "render" and inst["active"]: instance = AERenderInstance( @@ -86,7 +90,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): frameStart=frameStart, frameEnd=frameEnd, frameStep=1, - toBeRenderedOn='deadline' + toBeRenderedOn='deadline', + fps=fps ) comp = compositions_by_id.get(int(item_id)) @@ -102,7 +107,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): instances.append(instance) - self.log.debug("instances::{}".format(instances)) return instances def get_expected_files(self, render_instance): diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..5301a2f3ea --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +"""Validate scene settings.""" +import os +import re + +import pyblish.api + +from avalon import aftereffects + +import openpype.hosts.aftereffects.api as api + +stub = aftereffects.stub() + + +class ValidateSceneSettings(pyblish.api.InstancePlugin): + """ + Ensures that Composition Settings (right mouse on comp) are same as + in FTrack on task. + + By default checks only duration - how many frames should be rendered. + Compares: + Frame start - Frame end + 1 from FTrack + against + Duration in Composition Settings. + + If this complains: + Check error message where is discrepancy. + Check FTrack task 'pype' section of task attributes for expected + values. + Check/modify rendered Composition Settings. + + If you know what you are doing run publishing again, uncheck this + validation before Validation phase. + """ + + """ + Dev docu: + Could be configured by 'presets/plugins/aftereffects/publish' + + skip_timelines_check - fill task name for which skip validation of + frameStart + frameEnd + fps + handleStart + handleEnd + skip_resolution_check - fill entity type ('asset') to skip validation + resolutionWidth + resolutionHeight + TODO support in extension is missing for now + + By defaults validates duration (how many frames should be published) + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Scene Settings" + families = ["render.farm"] + hosts = ["aftereffects"] + optional = True + + skip_timelines_check = [".*"] # * >> skip for all + skip_resolution_check = [".*"] + + def process(self, instance): + """Plugin entry point.""" + expected_settings = api.get_asset_settings() + self.log.info("config from DB::{}".format(expected_settings)) + + if any(re.search(pattern, os.getenv('AVALON_TASK')) + for pattern in self.skip_resolution_check): + expected_settings.pop("resolutionWidth") + expected_settings.pop("resolutionHeight") + + if any(re.search(pattern, os.getenv('AVALON_TASK')) + for pattern in self.skip_timelines_check): + expected_settings.pop('fps', None) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('handleStart', None) + expected_settings.pop('handleEnd', None) + + # handle case where ftrack uses only two decimal places + # 23.976023976023978 vs. 23.98 + fps = instance.data.get("fps") + if fps: + if isinstance(fps, float): + fps = float( + "{:.2f}".format(fps)) + expected_settings["fps"] = fps + + duration = instance.data.get("frameEndHandle") - \ + instance.data.get("frameStartHandle") + 1 + + self.log.debug("filtered config::{}".format(expected_settings)) + + current_settings = { + "fps": fps, + "frameStartHandle": instance.data.get("frameStartHandle"), + "frameEndHandle": instance.data.get("frameEndHandle"), + "resolutionWidth": instance.data.get("resolutionWidth"), + "resolutionHeight": instance.data.get("resolutionHeight"), + "duration": duration + } + self.log.info("current_settings:: {}".format(current_settings)) + + invalid_settings = [] + for key, value in expected_settings.items(): + if value != current_settings[key]: + invalid_settings.append( + "{} expected: {} found: {}".format(key, value, + current_settings[key]) + ) + + if ((expected_settings.get("handleStart") + or expected_settings.get("handleEnd")) + and invalid_settings): + msg = "Handles included in calculation. Remove handles in DB " +\ + "or extend frame range in Composition Setting." + invalid_settings[-1]["reason"] = msg + + msg = "Found invalid settings:\n{}".format( + "\n".join(invalid_settings) + ) + assert not invalid_settings, msg + assert os.path.exists(instance.data.get("source")), ( + "Scene file not found (saved under wrong name)" + ) diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index eb88e7af63..de30da3319 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -9,7 +9,7 @@ from avalon import api import avalon.blender from openpype.api import PypeCreatorMixin -VALID_EXTENSIONS = [".blend", ".json"] +VALID_EXTENSIONS = [".blend", ".json", ".abc"] def asset_name( diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py index 088a27566d..6d253300d9 100644 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ b/openpype/hosts/blender/hooks/pre_pyside_install.py @@ -1,4 +1,5 @@ import os +import re import subprocess from openpype.lib import PreLaunchHook @@ -31,10 +32,46 @@ class InstallPySideToBlender(PreLaunchHook): def inner_execute(self): # Get blender's python directory + version_regex = re.compile(r"^2\.[0-9]{2}$") + executable = self.launch_context.executable.executable_path - # Blender installation contain subfolder named with it's version where - # python binaries are stored. - version_subfolder = self.launch_context.app_name.split("_")[1] + if os.path.basename(executable).lower() != "blender.exe": + self.log.info(( + "Executable does not lead to blender.exe file. Can't determine" + " blender's python to check/install PySide2." + )) + return + + executable_dir = os.path.dirname(executable) + version_subfolders = [] + for name in os.listdir(executable_dir): + fullpath = os.path.join(name, executable_dir) + if not os.path.isdir(fullpath): + continue + + if not version_regex.match(name): + continue + + version_subfolders.append(name) + + if not version_subfolders: + self.log.info( + "Didn't find version subfolder next to Blender executable" + ) + return + + if len(version_subfolders) > 1: + self.log.info(( + "Found more than one version subfolder next" + " to blender executable. {}" + ).format(", ".join([ + '"./{}"'.format(name) + for name in version_subfolders + ]))) + return + + version_subfolder = version_subfolders[0] + pythond_dir = os.path.join( os.path.dirname(executable), version_subfolder, @@ -65,6 +102,7 @@ class InstallPySideToBlender(PreLaunchHook): # Check if PySide2 is installed and skip if yes if self.is_pyside_installed(python_executable): + self.log.debug("Blender has already installed PySide2.") return # Install PySide2 in blender's python diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..03a468f82e --- /dev/null +++ b/openpype/hosts/blender/plugins/create/create_pointcache.py @@ -0,0 +1,35 @@ +"""Create a pointcache asset.""" + +import bpy + +from avalon import api +from avalon.blender import lib +import openpype.hosts.blender.api.plugin + + +class CreatePointcache(openpype.hosts.blender.api.plugin.Creator): + """Polygonal static geometry""" + + name = "pointcacheMain" + label = "Point Cache" + family = "pointcache" + icon = "gears" + + def process(self): + + asset = self.data["asset"] + subset = self.data["subset"] + name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) + collection = bpy.data.collections.new(name=name) + bpy.context.scene.collection.children.link(collection) + self.data['task'] = api.Session.get('AVALON_TASK') + lib.imprint(collection, self.data) + + if (self.options or {}).get("useSelection"): + objects = lib.get_selection() + for obj in objects: + collection.objects.link(obj) + if obj.type == 'EMPTY': + objects.extend(obj.children) + + return collection diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py new file mode 100644 index 0000000000..4248cffd69 --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -0,0 +1,246 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +from avalon import api, blender +import bpy +import openpype.hosts.blender.api.plugin as plugin + + +class CacheModelLoader(plugin.AssetLoader): + """Load cache models. + + Stores the imported asset in a collection named after the asset. + + Note: + At least for now it only supports Alembic files. + """ + + families = ["model", "pointcache"] + representations = ["abc"] + + label = "Link Alembic" + icon = "code-fork" + color = "orange" + + def _remove(self, objects, container): + for obj in list(objects): + if obj.type == 'MESH': + bpy.data.meshes.remove(obj.data) + elif obj.type == 'EMPTY': + bpy.data.objects.remove(obj) + + bpy.data.collections.remove(container) + + def _process(self, libpath, container_name, parent_collection): + bpy.ops.object.select_all(action='DESELECT') + + view_layer = bpy.context.view_layer + view_layer_collection = view_layer.active_layer_collection.collection + + relative = bpy.context.preferences.filepaths.use_relative_paths + bpy.ops.wm.alembic_import( + filepath=libpath, + relative_path=relative + ) + + parent = parent_collection + + if parent is None: + parent = bpy.context.scene.collection + + model_container = bpy.data.collections.new(container_name) + parent.children.link(model_container) + for obj in bpy.context.selected_objects: + model_container.objects.link(obj) + view_layer_collection.objects.unlink(obj) + + name = obj.name + obj.name = f"{name}:{container_name}" + + # Groups are imported as Empty objects in Blender + if obj.type == 'MESH': + data_name = obj.data.name + obj.data.name = f"{data_name}:{container_name}" + + if not obj.get(blender.pipeline.AVALON_PROPERTY): + obj[blender.pipeline.AVALON_PROPERTY] = dict() + + avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info.update({"container_name": container_name}) + + bpy.ops.object.select_all(action='DESELECT') + + return model_container + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + lib_container = plugin.asset_name( + asset, subset + ) + unique_number = plugin.get_unique_number( + asset, subset + ) + namespace = namespace or f"{asset}_{unique_number}" + container_name = plugin.asset_name( + asset, subset, unique_number + ) + + container = bpy.data.collections.new(lib_container) + container.name = container_name + blender.pipeline.containerise_existing( + container, + name, + namespace, + context, + self.__class__.__name__, + ) + + container_metadata = container.get( + blender.pipeline.AVALON_PROPERTY) + + container_metadata["libpath"] = libpath + container_metadata["lib_container"] = lib_container + + obj_container = self._process( + libpath, container_name, None) + + container_metadata["obj_container"] = obj_container + + # Save the list of objects in the metadata container + container_metadata["objects"] = obj_container.all_objects + + nodes = list(container.objects) + nodes.append(container) + self[:] = nodes + return nodes + + def update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert collection, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert not (collection.children), ( + "Nested collections are not supported." + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + collection_metadata = collection.get( + blender.pipeline.AVALON_PROPERTY) + collection_libpath = collection_metadata["libpath"] + + obj_container = plugin.get_local_collection_with_name( + collection_metadata["obj_container"].name + ) + objects = obj_container.all_objects + + container_name = obj_container.name + + normalized_collection_libpath = ( + str(Path(bpy.path.abspath(collection_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_collection_libpath, + normalized_libpath, + ) + if normalized_collection_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + parent = plugin.get_parent_collection(obj_container) + + self._remove(objects, obj_container) + + obj_container = self._process( + str(libpath), container_name, parent) + + collection_metadata["obj_container"] = obj_container + collection_metadata["objects"] = obj_container.all_objects + collection_metadata["libpath"] = str(libpath) + collection_metadata["representation"] = str(representation["_id"]) + + def remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + if not collection: + return False + assert not (collection.children), ( + "Nested collections are not supported." + ) + + collection_metadata = collection.get( + blender.pipeline.AVALON_PROPERTY) + + obj_container = plugin.get_local_collection_with_name( + collection_metadata["obj_container"].name + ) + objects = obj_container.all_objects + + self._remove(objects, obj_container) + + bpy.data.collections.remove(collection) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index ed0f2faf17..35a241b98e 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -244,65 +244,3 @@ class BlendModelLoader(plugin.AssetLoader): bpy.data.collections.remove(collection) return True - - -class CacheModelLoader(plugin.AssetLoader): - """Load cache models. - - Stores the imported asset in a collection named after the asset. - - Note: - At least for now it only supports Alembic files. - """ - - families = ["model"] - representations = ["abc"] - - label = "Link Model" - icon = "code-fork" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - raise NotImplementedError( - "Loading of Alembic files is not yet implemented.") - # TODO (jasper): implement Alembic import. - - libpath = self.fname - asset = context["asset"]["name"] - subset = context["subset"]["name"] - # TODO (jasper): evaluate use of namespace which is 'alien' to Blender. - lib_container = container_name = ( - plugin.asset_name(asset, subset, namespace) - ) - relative = bpy.context.preferences.filepaths.use_relative_paths - - with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (data_from, data_to): - data_to.collections = [lib_container] - - scene = bpy.context.scene - instance_empty = bpy.data.objects.new( - container_name, None - ) - scene.collection.objects.link(instance_empty) - instance_empty.instance_type = 'COLLECTION' - collection = bpy.data.collections[lib_container] - collection.name = container_name - instance_empty.instance_collection = collection - - nodes = list(collection.objects) - nodes.append(collection) - nodes.append(instance_empty) - self[:] = nodes - return nodes diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py index 6a89c6019b..a6315908fc 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc.py +++ b/openpype/hosts/blender/plugins/publish/extract_abc.py @@ -11,14 +11,14 @@ class ExtractABC(openpype.api.Extractor): label = "Extract ABC" hosts = ["blender"] - families = ["model"] + families = ["model", "pointcache"] optional = True def process(self, instance): # Define extract output file path stagingdir = self.staging_dir(instance) - filename = f"{instance.name}.fbx" + filename = f"{instance.name}.abc" filepath = os.path.join(stagingdir, filename) context = bpy.context @@ -52,6 +52,8 @@ class ExtractABC(openpype.api.Extractor): old_scale = scene.unit_settings.scale_length + bpy.ops.object.select_all(action='DESELECT') + selected = list() for obj in instance: @@ -67,14 +69,11 @@ class ExtractABC(openpype.api.Extractor): # We set the scale of the scene for the export scene.unit_settings.scale_length = 0.01 - self.log.info(new_context) - # We export the abc bpy.ops.wm.alembic_export( new_context, filepath=filepath, - start=1, - end=1 + selected=True ) view_layer.active_layer_collection = old_active_layer_collection diff --git a/openpype/hosts/harmony/api/__init__.py b/openpype/hosts/harmony/api/__init__.py index 705ccef892..fd21725bd5 100644 --- a/openpype/hosts/harmony/api/__init__.py +++ b/openpype/hosts/harmony/api/__init__.py @@ -3,6 +3,7 @@ import os from pathlib import Path import logging +import re from openpype import lib from openpype.api import (get_current_project_settings) @@ -63,26 +64,9 @@ def get_asset_settings(): "handleStart": handle_start, "handleEnd": handle_end, "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height + "resolutionHeight": resolution_height, + "entityType": entity_type } - settings = get_current_project_settings() - - try: - skip_resolution_check = \ - settings["harmony"]["general"]["skip_resolution_check"] - skip_timelines_check = \ - settings["harmony"]["general"]["skip_timelines_check"] - except KeyError: - skip_resolution_check = [] - skip_timelines_check = [] - - if os.getenv('AVALON_TASK') in skip_resolution_check: - scene_data.pop("resolutionWidth") - scene_data.pop("resolutionHeight") - - if entity_type in skip_timelines_check: - scene_data.pop('frameStart', None) - scene_data.pop('frameEnd', None) return scene_data diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index b3e7f49268..0371e80095 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -2,6 +2,7 @@ """Validate scene settings.""" import os import json +import re import pyblish.api @@ -41,22 +42,42 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): families = ["workfile"] hosts = ["harmony"] actions = [ValidateSceneSettingsRepair] + optional = True - frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] - # used for skipping resolution validation for render tasks - render_check_filter = ["render", "Render"] + # skip frameEnd check if asset contains any of: + frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] # regex + + # skip resolution check if Task name matches any of regex patterns + skip_resolution_check = ["render", "Render"] # regex + + # skip frameStart, frameEnd check if Task name matches any of regex patt. + skip_timelines_check = [] # regex def process(self, instance): """Plugin entry point.""" expected_settings = openpype.hosts.harmony.api.get_asset_settings() - self.log.info(expected_settings) + self.log.info("scene settings from DB:".format(expected_settings)) expected_settings = _update_frames(dict.copy(expected_settings)) expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\ expected_settings["handleEnd"] - if any(string in instance.context.data['anatomyData']['asset'] - for string in self.frame_check_filter): + if (any(re.search(pattern, os.getenv('AVALON_TASK')) + for pattern in self.skip_resolution_check)): + expected_settings.pop("resolutionWidth") + expected_settings.pop("resolutionHeight") + + entity_type = expected_settings.get("entityType") + if (any(re.search(pattern, entity_type) + for pattern in self.skip_timelines_check)): + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + + expected_settings.pop("entityType") # not useful after the check + + asset_name = instance.context.data['anatomyData']['asset'] + if any(re.search(pattern, asset_name) + for pattern in self.frame_check_filter): expected_settings.pop("frameEnd") # handle case where ftrack uses only two decimal places @@ -66,13 +87,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): fps = float( "{:.2f}".format(instance.context.data.get("frameRate"))) - if any(string in instance.context.data['anatomyData']['task'] - for string in self.render_check_filter): - self.log.debug("Render task detected, resolution check skipped") - expected_settings.pop("resolutionWidth") - expected_settings.pop("resolutionHeight") - - self.log.debug(expected_settings) + self.log.debug("filtered settings: {}".format(expected_settings)) current_settings = { "fps": fps, @@ -84,7 +99,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): "resolutionWidth": instance.context.data.get("resolutionWidth"), "resolutionHeight": instance.context.data.get("resolutionHeight"), } - self.log.debug("curr:: {}".format(current_settings)) + self.log.debug("current scene settings {}".format(current_settings)) invalid_settings = [] for key, value in expected_settings.items(): diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index fcb1d50ea8..8d0105ae5f 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -22,6 +22,7 @@ from .pipeline import ( ) from .lib import ( + pype_tag_name, get_track_items, get_current_project, get_current_sequence, @@ -73,6 +74,7 @@ __all__ = [ "work_root", # Lib functions + "pype_tag_name", "get_track_items", "get_current_project", "get_current_sequence", diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index c02e3e2ac4..3df095f9e4 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -2,7 +2,12 @@ import os import hiero.core.events import avalon.api as avalon from openpype.api import Logger -from .lib import sync_avalon_data_to_workfile, launch_workfiles_app +from .lib import ( + sync_avalon_data_to_workfile, + launch_workfiles_app, + selection_changed_timeline, + before_project_save +) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -78,7 +83,7 @@ def register_hiero_events(): "Registering events for: kBeforeNewProjectCreated, " "kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, " "kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, " - "kAfterProjectClose, kShutdown, kStartup" + "kAfterProjectClose, kShutdown, kStartup, kSelectionChanged" ) # hiero.core.events.registerInterest( @@ -91,8 +96,8 @@ def register_hiero_events(): hiero.core.events.registerInterest( "kAfterProjectLoad", afterProjectLoad) - # hiero.core.events.registerInterest( - # "kBeforeProjectSave", beforeProjectSaved) + hiero.core.events.registerInterest( + "kBeforeProjectSave", before_project_save) # hiero.core.events.registerInterest( # "kAfterProjectSave", afterProjectSaved) # @@ -104,10 +109,16 @@ def register_hiero_events(): # hiero.core.events.registerInterest("kShutdown", shutDown) # hiero.core.events.registerInterest("kStartup", startupCompleted) - # workfiles - hiero.core.events.registerEventType("kStartWorkfiles") - hiero.core.events.registerInterest("kStartWorkfiles", launch_workfiles_app) + hiero.core.events.registerInterest( + ("kSelectionChanged", "kTimeline"), selection_changed_timeline) + # workfiles + try: + hiero.core.events.registerEventType("kStartWorkfiles") + hiero.core.events.registerInterest( + "kStartWorkfiles", launch_workfiles_app) + except RuntimeError: + pass def register_events(): """ diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index b74e70cae3..a9982d96c4 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -9,7 +9,7 @@ import hiero import avalon.api as avalon import avalon.io from avalon.vendor.Qt import QtWidgets -from openpype.api import (Logger, Anatomy, config) +from openpype.api import (Logger, Anatomy, get_anatomy_settings) from . import tags import shutil from compiler.ast import flatten @@ -30,9 +30,9 @@ self = sys.modules[__name__] self._has_been_setup = False self._has_menu = False self._registered_gui = None -self.pype_tag_name = "Pype Data" -self.default_sequence_name = "PypeSequence" -self.default_bin_name = "PypeBin" +self.pype_tag_name = "openpypeData" +self.default_sequence_name = "openpypeSequence" +self.default_bin_name = "openpypeBin" AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") @@ -150,15 +150,27 @@ def get_track_items( # get selected track items or all in active sequence if selected: - selected_items = list(hiero.selection) - for item in selected_items: - if track_name and track_name in item.parent().name(): - # filter only items fitting input track name - track_items.append(item) - elif not track_name: - # or add all if no track_name was defined - track_items.append(item) - else: + try: + selected_items = list(hiero.selection) + for item in selected_items: + if track_name and track_name in item.parent().name(): + # filter only items fitting input track name + track_items.append(item) + elif not track_name: + # or add all if no track_name was defined + track_items.append(item) + except AttributeError: + pass + + # check if any collected track items are + # `core.Hiero.Python.TrackItem` instance + if track_items: + any_track_item = track_items[0] + if not isinstance(any_track_item, hiero.core.TrackItem): + selected_items = [] + + # collect all available active sequence track items + if not track_items: sequence = get_current_sequence(name=sequence_name) # get all available tracks from sequence tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) @@ -240,7 +252,7 @@ def set_track_item_pype_tag(track_item, data=None): # basic Tag's attribute tag_data = { "editable": "0", - "note": "Pype data holder", + "note": "OpenPype data container", "icon": "openpype_icon.png", "metadata": {k: v for k, v in data.items()} } @@ -744,10 +756,13 @@ def _set_hrox_project_knobs(doc, **knobs): # set attributes to Project Tag proj_elem = doc.documentElement().firstChildElement("Project") for k, v in knobs.items(): - proj_elem.setAttribute(k, v) + if isinstance(v, dict): + continue + proj_elem.setAttribute(str(k), v) def apply_colorspace_project(): + project_name = os.getenv("AVALON_PROJECT") # get path the the active projects project = get_current_project(remove_untitled=True) current_file = project.path() @@ -756,9 +771,9 @@ def apply_colorspace_project(): project.close() # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_project_clrs = colorspace.get("hiero", {}).get("project", {}) + imageio = get_anatomy_settings( + project_name)["imageio"].get("hiero", None) + presets = imageio.get("workfile") # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) @@ -789,13 +804,13 @@ def apply_colorspace_project(): os.remove(copy_current_file_tmp) # use the code from bellow for changing xml hrox Attributes - hiero_project_clrs.update({"name": os.path.basename(copy_current_file)}) + presets.update({"name": os.path.basename(copy_current_file)}) # read HROX in as QDomSocument doc = _read_doc_from_path(copy_current_file) # apply project colorspace properties - _set_hrox_project_knobs(doc, **hiero_project_clrs) + _set_hrox_project_knobs(doc, **presets) # write QDomSocument back as HROX _write_doc_to_path(doc, copy_current_file) @@ -805,14 +820,17 @@ def apply_colorspace_project(): def apply_colorspace_clips(): + project_name = os.getenv("AVALON_PROJECT") project = get_current_project(remove_untitled=True) clips = project.clips() # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {}) + imageio = get_anatomy_settings( + project_name)["imageio"].get("hiero", None) + from pprint import pprint + presets = imageio.get("regexInputs", {}).get("inputs", {}) + pprint(presets) for clip in clips: clip_media_source_path = clip.mediaSource().firstpath() clip_name = clip.name() @@ -822,10 +840,11 @@ def apply_colorspace_clips(): continue # check if any colorspace presets for read is mathing - preset_clrsp = next((hiero_clips_clrs[k] - for k in hiero_clips_clrs - if bool(re.search(k, clip_media_source_path))), - None) + preset_clrsp = None + for k in presets: + if not bool(re.search(k["regex"], clip_media_source_path)): + continue + preset_clrsp = k["colorspace"] if preset_clrsp: log.debug("Changing clip.path: {}".format(clip_media_source_path)) @@ -893,3 +912,61 @@ def get_sequence_pattern_and_padding(file): return found, padding else: return None, None + + +def sync_clip_name_to_data_asset(track_items_list): + # loop trough all selected clips + for track_item in track_items_list: + # ignore if parent track is locked or disabled + if track_item.parent().isLocked(): + continue + if not track_item.parent().isEnabled(): + continue + # ignore if the track item is disabled + if not track_item.isEnabled(): + continue + + # get name and data + ti_name = track_item.name() + data = get_track_item_pype_data(track_item) + + # ignore if no data on the clip or not publish instance + if not data: + continue + if data.get("id") != "pyblish.avalon.instance": + continue + + # fix data if wrong name + if data["asset"] != ti_name: + data["asset"] = ti_name + # remove the original tag + tag = get_track_item_pype_tag(track_item) + track_item.removeTag(tag) + # create new tag with updated data + set_track_item_pype_tag(track_item, data) + print("asset was changed in clip: {}".format(ti_name)) + + +def selection_changed_timeline(event): + """Callback on timeline to check if asset in data is the same as clip name. + + Args: + event (hiero.core.Event): timeline event + """ + timeline_editor = event.sender + selection = timeline_editor.selection() + + # run checking function + sync_clip_name_to_data_asset(selection) + + +def before_project_save(event): + track_items = get_track_items( + selected=False, + track_type="video", + check_enabled=True, + check_locked=True, + check_tagged=True) + + # run checking function + sync_clip_name_to_data_asset(track_items) diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index 9ccf5e39d1..ab49251093 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -68,50 +68,45 @@ def menu_install(): menu.addSeparator() - workfiles_action = menu.addAction("Work Files...") + workfiles_action = menu.addAction("Work Files ...") workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) workfiles_action.triggered.connect(launch_workfiles_app) - default_tags_action = menu.addAction("Create Default Tags...") + default_tags_action = menu.addAction("Create Default Tags") default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) default_tags_action.triggered.connect(tags.add_tags_to_workfile) menu.addSeparator() - publish_action = menu.addAction("Publish...") + publish_action = menu.addAction("Publish ...") publish_action.setIcon(QtGui.QIcon("icons:Output.png")) publish_action.triggered.connect( lambda *args: publish(hiero.ui.mainWindow()) ) - creator_action = menu.addAction("Create...") + creator_action = menu.addAction("Create ...") creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) creator_action.triggered.connect(creator.show) - loader_action = menu.addAction("Load...") + loader_action = menu.addAction("Load ...") loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) loader_action.triggered.connect(cbloader.show) - sceneinventory_action = menu.addAction("Manage...") + sceneinventory_action = menu.addAction("Manage ...") sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) sceneinventory_action.triggered.connect(sceneinventory.show) menu.addSeparator() - reload_action = menu.addAction("Reload pipeline...") - reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) - reload_action.triggered.connect(reload_config) + if os.getenv("OPENPYPE_DEVELOP"): + reload_action = menu.addAction("Reload pipeline") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + reload_action.triggered.connect(reload_config) menu.addSeparator() - apply_colorspace_p_action = menu.addAction("Apply Colorspace Project...") + apply_colorspace_p_action = menu.addAction("Apply Colorspace Project") apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) apply_colorspace_p_action.triggered.connect(apply_colorspace_project) - apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips...") + apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips") apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) apply_colorspace_c_action.triggered.connect(apply_colorspace_clips) - - self.context_label_action = context_label_action - self.workfile_actions = workfiles_action - self.default_tags_action = default_tags_action - self.publish_action = publish_action - self.reload_action = reload_action diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 92e15cfae4..c46ef9abfa 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -4,10 +4,10 @@ import hiero from Qt import QtWidgets, QtCore from avalon.vendor import qargparse import avalon.api as avalon -import openpype.api as pype +import openpype.api as openpype from . import lib -log = pype.Logger().get_logger(__name__) +log = openpype.Logger().get_logger(__name__) def load_stylesheet(): @@ -266,7 +266,8 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMaximum=10000, setToolTip=tool_tip) + setValue=v["value"], setMinimum=0, + setMaximum=100000, setToolTip=tool_tip) return data @@ -387,7 +388,8 @@ class ClipLoader: # try to get value from options or evaluate key value for `load_to` self.new_sequence = options.get("newSequence") or bool( "New timeline" in options.get("load_to", "")) - + self.clip_name_template = options.get( + "clipNameTemplate") or "{asset}_{subset}_{representation}" assert self._populate_data(), str( "Cannot Load selected data, look into database " "or call your supervisor") @@ -432,7 +434,7 @@ class ClipLoader: asset = str(repr_cntx["asset"]) subset = str(repr_cntx["subset"]) representation = str(repr_cntx["representation"]) - self.data["clip_name"] = "_".join([asset, subset, representation]) + self.data["clip_name"] = self.clip_name_template.format(**repr_cntx) self.data["track_name"] = "_".join([subset, representation]) self.data["versionData"] = self.context["version"]["data"] # gets file path @@ -476,7 +478,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = pype.get_asset(asset_name)["data"] + self.data["assetData"] = openpype.get_asset(asset_name)["data"] def _make_track_item(self, source_bin_item, audio=False): """ Create track item with """ @@ -543,15 +545,9 @@ class ClipLoader: if "slate" in f), # if nothing was found then use default None # so other bool could be used - None) or bool((( - # put together duration of clip attributes - self.timeline_out - self.timeline_in + 1) \ - + self.handle_start \ - + self.handle_end - # and compare it with meda duration - ) > self.media_duration) - - print("__ slate_on: `{}`".format(slate_on)) + None) or bool(int( + (self.timeline_out - self.timeline_in + 1) + + self.handle_start + self.handle_end) < self.media_duration) # if slate is on then remove the slate frame from begining if slate_on: @@ -592,7 +588,7 @@ class ClipLoader: return track_item -class Creator(pype.Creator): +class Creator(openpype.Creator): """Creator class wrapper """ clip_color = "Purple" @@ -601,7 +597,7 @@ class Creator(pype.Creator): def __init__(self, *args, **kwargs): import openpype.hosts.hiero.api as phiero super(Creator, self).__init__(*args, **kwargs) - self.presets = pype.get_current_project_settings()[ + self.presets = openpype.get_current_project_settings()[ "hiero"]["create"].get(self.__class__.__name__, {}) # adding basic current context resolve objects @@ -674,6 +670,9 @@ class PublishClip: if kwargs.get("avalon"): self.tag_data.update(kwargs["avalon"]) + # add publish attribute to tag data + self.tag_data.update({"publish": True}) + # adding ui inputs if any self.ui_inputs = kwargs.get("ui_inputs", {}) @@ -687,6 +686,7 @@ class PublishClip: self._create_parents() def convert(self): + # solve track item data and add them to tag data self._convert_to_tag_data() @@ -705,6 +705,12 @@ class PublishClip: self.tag_data["asset"] = new_name else: self.tag_data["asset"] = self.ti_name + self.tag_data["hierarchyData"]["shot"] = self.ti_name + + if self.tag_data["heroTrack"] and self.review_layer: + self.tag_data.update({"reviewTrack": self.review_layer}) + else: + self.tag_data.update({"reviewTrack": None}) # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) @@ -773,8 +779,8 @@ class PublishClip: _spl = text.split("#") _len = (len(_spl) - 1) _repl = "{{{0}:0>{1}}}".format(name, _len) - new_text = text.replace(("#" * _len), _repl) - return new_text + return text.replace(("#" * _len), _repl) + def _convert_to_tag_data(self): """ Convert internal data to tag data. @@ -782,13 +788,13 @@ class PublishClip: Populating the tag data into internal variable self.tag_data """ # define vertical sync attributes - master_layer = True + hero_track = True self.review_layer = "" if self.vertical_sync: # check if track name is not in driving layer if self.track_name not in self.driving_layer: # if it is not then define vertical sync as None - master_layer = False + hero_track = False # increasing steps by index of rename iteration self.count_steps *= self.rename_index @@ -802,7 +808,7 @@ class PublishClip: self.tag_data[_k] = _v["value"] # driving layer is set as positive match - if master_layer or self.vertical_sync: + if hero_track or self.vertical_sync: # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): @@ -836,40 +842,40 @@ class PublishClip: hierarchy_formating_data ) - tag_hierarchy_data.update({"masterLayer": True}) - if master_layer and self.vertical_sync: + tag_hierarchy_data.update({"heroTrack": True}) + if hero_track and self.vertical_sync: self.vertical_clip_match.update({ (self.clip_in, self.clip_out): tag_hierarchy_data }) - if not master_layer and self.vertical_sync: + if not hero_track and self.vertical_sync: # driving layer is set as negative match - for (_in, _out), master_data in self.vertical_clip_match.items(): - master_data.update({"masterLayer": False}) + for (_in, _out), hero_data in self.vertical_clip_match.items(): + hero_data.update({"heroTrack": False}) if _in == self.clip_in and _out == self.clip_out: - data_subset = master_data["subset"] - # add track index in case duplicity of names in master data + data_subset = hero_data["subset"] + # add track index in case duplicity of names in hero data if self.subset in data_subset: - master_data["subset"] = self.subset + str( + hero_data["subset"] = self.subset + str( self.track_index) # in case track name and subset name is the same then add if self.subset_name == self.track_name: - master_data["subset"] = self.subset + hero_data["subset"] = self.subset # assing data to return hierarchy data to tag - tag_hierarchy_data = master_data + tag_hierarchy_data = hero_data # add data to return data dict self.tag_data.update(tag_hierarchy_data) - if master_layer and self.review_layer: - self.tag_data.update({"reviewTrack": self.review_layer}) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + # remove shot from hierarchy data: is not needed anymore + hierarchy_formating_data.pop("shot") + return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index 06fa655a2e..d2502f3c71 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -84,6 +84,13 @@ def update_tag(tag, data): mtd = tag.metadata() # get metadata key from data data_mtd = data.get("metadata", {}) + + # due to hiero bug we have to make sure keys which are not existent in + # data are cleared of value by `None` + for _mk in mtd.keys(): + if _mk.replace("tag.", "") not in data_mtd.keys(): + mtd.setValue(_mk, str(None)) + # set all data metadata to tag metadata for k, v in data_mtd.items(): mtd.setValue( diff --git a/openpype/hosts/hiero/otio/__init__.py b/openpype/hosts/hiero/otio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/otio/hiero_export.py b/openpype/hosts/hiero/otio/hiero_export.py new file mode 100644 index 0000000000..6e751d3aa4 --- /dev/null +++ b/openpype/hosts/hiero/otio/hiero_export.py @@ -0,0 +1,366 @@ +""" compatibility OpenTimelineIO 0.12.0 and newer +""" + +import os +import re +import sys +import ast +from compiler.ast import flatten +import opentimelineio as otio +from . import utils +import hiero.core +import hiero.ui + +self = sys.modules[__name__] +self.track_types = { + hiero.core.VideoTrack: otio.schema.TrackKind.Video, + hiero.core.AudioTrack: otio.schema.TrackKind.Audio +} +self.project_fps = None +self.marker_color_map = { + "magenta": otio.schema.MarkerColor.MAGENTA, + "red": otio.schema.MarkerColor.RED, + "yellow": otio.schema.MarkerColor.YELLOW, + "green": otio.schema.MarkerColor.GREEN, + "cyan": otio.schema.MarkerColor.CYAN, + "blue": otio.schema.MarkerColor.BLUE, +} +self.timeline = None +self.include_tags = True + + +def get_current_hiero_project(remove_untitled=False): + projects = flatten(hiero.core.projects()) + if not remove_untitled: + return next(iter(projects)) + + # if remove_untitled + for proj in projects: + if "Untitled" in proj.name(): + proj.close() + else: + return proj + + +def create_otio_rational_time(frame, fps): + return otio.opentime.RationalTime( + float(frame), + float(fps) + ) + + +def create_otio_time_range(start_frame, frame_duration, fps): + return otio.opentime.TimeRange( + start_time=create_otio_rational_time(start_frame, fps), + duration=create_otio_rational_time(frame_duration, fps) + ) + + +def _get_metadata(item): + if hasattr(item, 'metadata'): + return {key: value for key, value in dict(item.metadata()).items()} + return {} + + +def create_otio_reference(clip): + metadata = _get_metadata(clip) + media_source = clip.mediaSource() + + # get file info for path and start frame + file_info = media_source.fileinfos().pop() + frame_start = file_info.startFrame() + path = file_info.filename() + + # get padding and other file infos + padding = media_source.filenamePadding() + file_head = media_source.filenameHead() + is_sequence = not media_source.singleFile() + frame_duration = media_source.duration() + fps = utils.get_rate(clip) or self.project_fps + extension = os.path.splitext(path)[-1] + + if is_sequence: + metadata.update({ + "isSequence": True, + "padding": padding + }) + + # add resolution metadata + metadata.update({ + "openpype.source.colourtransform": clip.sourceMediaColourTransform(), + "openpype.source.width": int(media_source.width()), + "openpype.source.height": int(media_source.height()), + "openpype.source.pixelAspect": float(media_source.pixelAspect()) + }) + + otio_ex_ref_item = None + + if is_sequence: + # if it is file sequence try to create `ImageSequenceReference` + # the OTIO might not be compatible so return nothing and do it old way + try: + dirname = os.path.dirname(path) + otio_ex_ref_item = otio.schema.ImageSequenceReference( + target_url_base=dirname + os.sep, + name_prefix=file_head, + name_suffix=extension, + start_frame=frame_start, + frame_zero_padding=padding, + rate=fps, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) + ) + except AttributeError: + pass + + if not otio_ex_ref_item: + reformat_path = utils.get_reformated_path(path, padded=False) + # in case old OTIO or video file create `ExternalReference` + otio_ex_ref_item = otio.schema.ExternalReference( + target_url=reformat_path, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) + ) + + # add metadata to otio item + add_otio_metadata(otio_ex_ref_item, media_source, **metadata) + + return otio_ex_ref_item + + +def get_marker_color(tag): + icon = tag.icon() + pat = r'icons:Tag(?P\w+)\.\w+' + + res = re.search(pat, icon) + if res: + color = res.groupdict().get('color') + if color.lower() in self.marker_color_map: + return self.marker_color_map[color.lower()] + + return otio.schema.MarkerColor.RED + + +def create_otio_markers(otio_item, item): + for tag in item.tags(): + if not tag.visible(): + continue + + if tag.name() == 'Copy': + # Hiero adds this tag to a lot of clips + continue + + frame_rate = utils.get_rate(item) or self.project_fps + + marked_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + tag.inTime(), + frame_rate + ), + duration=otio.opentime.RationalTime( + int(tag.metadata().dict().get('tag.length', '0')), + frame_rate + ) + ) + # add tag metadata but remove "tag." string + metadata = {} + + for key, value in tag.metadata().dict().items(): + _key = key.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + _value = ast.literal_eval(value) + except (ValueError, SyntaxError): + _value = value + + metadata.update({_key: _value}) + + # Store the source item for future import assignment + metadata['hiero_source_type'] = item.__class__.__name__ + + marker = otio.schema.Marker( + name=tag.name(), + color=get_marker_color(tag), + marked_range=marked_range, + metadata=metadata + ) + + otio_item.markers.append(marker) + + +def create_otio_clip(track_item): + clip = track_item.source() + source_in = track_item.sourceIn() + duration = track_item.sourceDuration() + fps = utils.get_rate(track_item) or self.project_fps + name = track_item.name() + + media_reference = create_otio_reference(clip) + source_range = create_otio_time_range( + int(source_in), + int(duration), + fps + ) + + otio_clip = otio.schema.Clip( + name=name, + source_range=source_range, + media_reference=media_reference + ) + + # Add tags as markers + if self.include_tags: + create_otio_markers(otio_clip, track_item) + create_otio_markers(otio_clip, track_item.source()) + + return otio_clip + + +def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): + return otio.schema.Gap( + source_range=create_otio_time_range( + gap_start, + (clip_start - tl_start_frame) - gap_start, + fps + ) + ) + + +def _create_otio_timeline(): + project = get_current_hiero_project(remove_untitled=False) + metadata = _get_metadata(self.timeline) + + metadata.update({ + "openpype.timeline.width": int(self.timeline.format().width()), + "openpype.timeline.height": int(self.timeline.format().height()), + "openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa + "openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa + "openpype.project.lutSetting16Bit": project.lutSetting16Bit(), + "openpype.project.lutSetting8Bit": project.lutSetting8Bit(), + "openpype.project.lutSettingFloat": project.lutSettingFloat(), + "openpype.project.lutSettingLog": project.lutSettingLog(), + "openpype.project.lutSettingViewer": project.lutSettingViewer(), + "openpype.project.lutSettingWorkingSpace": project.lutSettingWorkingSpace(), # noqa + "openpype.project.lutUseOCIOForExport": project.lutUseOCIOForExport(), + "openpype.project.ocioConfigName": project.ocioConfigName(), + "openpype.project.ocioConfigPath": project.ocioConfigPath() + }) + + start_time = create_otio_rational_time( + self.timeline.timecodeStart(), self.project_fps) + + return otio.schema.Timeline( + name=self.timeline.name(), + global_start_time=start_time, + metadata=metadata + ) + + +def create_otio_track(track_type, track_name): + return otio.schema.Track( + name=track_name, + kind=self.track_types[track_type] + ) + + +def add_otio_gap(track_item, otio_track, prev_out): + gap_length = track_item.timelineIn() - prev_out + if prev_out != 0: + gap_length -= 1 + + gap = otio.opentime.TimeRange( + duration=otio.opentime.RationalTime( + gap_length, + self.project_fps + ) + ) + otio_gap = otio.schema.Gap(source_range=gap) + otio_track.append(otio_gap) + + +def add_otio_metadata(otio_item, media_source, **kwargs): + metadata = _get_metadata(media_source) + + # add additional metadata from kwargs + if kwargs: + metadata.update(kwargs) + + # add metadata to otio item metadata + for key, value in metadata.items(): + otio_item.metadata.update({key: value}) + + +def create_otio_timeline(): + + # get current timeline + self.timeline = hiero.ui.activeSequence() + self.project_fps = self.timeline.framerate().toFloat() + + # convert timeline to otio + otio_timeline = _create_otio_timeline() + + # loop all defined track types + for track in self.timeline.items(): + # skip if track is disabled + if not track.isEnabled(): + continue + + # convert track to otio + otio_track = create_otio_track( + type(track), track.name()) + + for itemindex, track_item in enumerate(track): + # skip offline track items + if not track_item.isMediaPresent(): + continue + + # skip if track item is disabled + if not track_item.isEnabled(): + continue + + # Add Gap if needed + if itemindex == 0: + # if it is first track item at track then add + # it to previouse item + prev_item = track_item + + else: + # get previouse item + prev_item = track_item.parent().items()[itemindex - 1] + + # calculate clip frame range difference from each other + clip_diff = track_item.timelineIn() - prev_item.timelineOut() + + # add gap if first track item is not starting + # at first timeline frame + if itemindex == 0 and track_item.timelineIn() > 0: + add_otio_gap(track_item, otio_track, 0) + + # or add gap if following track items are having + # frame range differences from each other + elif itemindex and clip_diff != 1: + add_otio_gap(track_item, otio_track, prev_item.timelineOut()) + + # create otio clip and add it to track + otio_clip = create_otio_clip(track_item) + otio_track.append(otio_clip) + + # Add tags as markers + if self.include_tags: + create_otio_markers(otio_track, track) + + # add track to otio timeline + otio_timeline.tracks.append(otio_track) + + return otio_timeline + + +def write_to_file(otio_timeline, path): + otio.adapters.write_to_file(otio_timeline, path) diff --git a/openpype/hosts/hiero/otio/hiero_import.py b/openpype/hosts/hiero/otio/hiero_import.py new file mode 100644 index 0000000000..257c434011 --- /dev/null +++ b/openpype/hosts/hiero/otio/hiero_import.py @@ -0,0 +1,545 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] + + +import os +import hiero.core +import hiero.ui + +import PySide2.QtWidgets as qw + +try: + from urllib import unquote + +except ImportError: + from urllib.parse import unquote # lint:ok + +import opentimelineio as otio + +_otio_old = False + + +def inform(messages): + if isinstance(messages, type('')): + messages = [messages] + + qw.QMessageBox.information( + hiero.ui.mainWindow(), + 'OTIO Import', + '\n'.join(messages), + qw.QMessageBox.StandardButton.Ok + ) + + +def get_transition_type(otio_item, otio_track): + _in, _out = otio_track.neighbors_of(otio_item) + + if isinstance(_in, otio.schema.Gap): + _in = None + + if isinstance(_out, otio.schema.Gap): + _out = None + + if _in and _out: + return 'dissolve' + + elif _in and not _out: + return 'fade_out' + + elif not _in and _out: + return 'fade_in' + + else: + return 'unknown' + + +def find_trackitem(otio_clip, hiero_track): + for item in hiero_track.items(): + if item.timelineIn() == otio_clip.range_in_parent().start_time.value: + if item.name() == otio_clip.name: + return item + + return None + + +def get_neighboring_trackitems(otio_item, otio_track, hiero_track): + _in, _out = otio_track.neighbors_of(otio_item) + trackitem_in = None + trackitem_out = None + + if _in: + trackitem_in = find_trackitem(_in, hiero_track) + + if _out: + trackitem_out = find_trackitem(_out, hiero_track) + + return trackitem_in, trackitem_out + + +def apply_transition(otio_track, otio_item, track): + warning = None + + # Figure out type of transition + transition_type = get_transition_type(otio_item, otio_track) + + # Figure out track kind for getattr below + kind = '' + if isinstance(track, hiero.core.AudioTrack): + kind = 'Audio' + + # Gather TrackItems involved in trasition + item_in, item_out = get_neighboring_trackitems( + otio_item, + otio_track, + track + ) + + # Create transition object + if transition_type == 'dissolve': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}DissolveTransition'.format(kind=kind) + ) + + try: + transition = transition_func( + item_in, + item_out, + otio_item.in_offset.value, + otio_item.out_offset.value + ) + + # Catch error raised if transition is bigger than TrackItem source + except RuntimeError as e: + transition = None + warning = ( + "Unable to apply transition \"{t.name}\": {e} " + "Ignoring the transition.").format(t=otio_item, e=str(e)) + + elif transition_type == 'fade_in': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}FadeInTransition'.format(kind=kind) + ) + + # Warn user if part of fade is outside of clip + if otio_item.in_offset.value: + warning = \ + 'Fist half of transition "{t.name}" is outside of clip and ' \ + 'not valid in Hiero. Only applied second half.' \ + .format(t=otio_item) + + transition = transition_func( + item_out, + otio_item.out_offset.value + ) + + elif transition_type == 'fade_out': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}FadeOutTransition'.format(kind=kind) + ) + transition = transition_func( + item_in, + otio_item.in_offset.value + ) + + # Warn user if part of fade is outside of clip + if otio_item.out_offset.value: + warning = \ + 'Second half of transition "{t.name}" is outside of clip ' \ + 'and not valid in Hiero. Only applied first half.' \ + .format(t=otio_item) + + else: + # Unknown transition + return + + # Apply transition to track + if transition: + track.addTransition(transition) + + # Inform user about missing or adjusted transitions + return warning + + +def prep_url(url_in): + url = unquote(url_in) + + if url.startswith('file://localhost/'): + return url + + url = 'file://localhost{sep}{url}'.format( + sep=url.startswith(os.sep) and '' or os.sep, + url=url.startswith(os.sep) and url[1:] or url + ) + + return url + + +def create_offline_mediasource(otio_clip, path=None): + global _otio_old + + hiero_rate = hiero.core.TimeBase( + otio_clip.source_range.start_time.rate + ) + + try: + legal_media_refs = ( + otio.schema.ExternalReference, + otio.schema.ImageSequenceReference + ) + except AttributeError: + _otio_old = True + legal_media_refs = ( + otio.schema.ExternalReference + ) + + if isinstance(otio_clip.media_reference, legal_media_refs): + source_range = otio_clip.available_range() + + else: + source_range = otio_clip.source_range + + if path is None: + path = otio_clip.name + + media = hiero.core.MediaSource.createOfflineVideoMediaSource( + prep_url(path), + source_range.start_time.value, + source_range.duration.value, + hiero_rate, + source_range.start_time.value + ) + + return media + + +def load_otio(otio_file, project=None, sequence=None): + otio_timeline = otio.adapters.read_from_file(otio_file) + build_sequence(otio_timeline, project=project, sequence=sequence) + + +marker_color_map = { + "PINK": "Magenta", + "RED": "Red", + "ORANGE": "Yellow", + "YELLOW": "Yellow", + "GREEN": "Green", + "CYAN": "Cyan", + "BLUE": "Blue", + "PURPLE": "Magenta", + "MAGENTA": "Magenta", + "BLACK": "Blue", + "WHITE": "Green" +} + + +def get_tag(tagname, tagsbin): + for tag in tagsbin.items(): + if tag.name() == tagname: + return tag + + if isinstance(tag, hiero.core.Bin): + tag = get_tag(tagname, tag) + + if tag is not None: + return tag + + return None + + +def add_metadata(metadata, hiero_item): + for key, value in metadata.get('Hiero', dict()).items(): + if key == 'source_type': + # Only used internally to reassign tag to correct Hiero item + continue + + if isinstance(value, dict): + add_metadata(value, hiero_item) + continue + + if value is not None: + if not key.startswith('tag.'): + key = 'tag.' + key + + hiero_item.metadata().setValue(key, str(value)) + + +def add_markers(otio_item, hiero_item, tagsbin): + if isinstance(otio_item, (otio.schema.Stack, otio.schema.Clip)): + markers = otio_item.markers + + elif isinstance(otio_item, otio.schema.Timeline): + markers = otio_item.tracks.markers + + else: + markers = [] + + for marker in markers: + meta = marker.metadata.get('Hiero', dict()) + if 'source_type' in meta: + if hiero_item.__class__.__name__ != meta.get('source_type'): + continue + + marker_color = marker.color + + _tag = get_tag(marker.name, tagsbin) + if _tag is None: + _tag = get_tag(marker_color_map[marker_color], tagsbin) + + if _tag is None: + _tag = hiero.core.Tag(marker_color_map[marker.color]) + + start = marker.marked_range.start_time.value + end = ( + marker.marked_range.start_time.value + + marker.marked_range.duration.value + ) + + if hasattr(hiero_item, 'addTagToRange'): + tag = hiero_item.addTagToRange(_tag, start, end) + + else: + tag = hiero_item.addTag(_tag) + + tag.setName(marker.name or marker_color_map[marker_color]) + # tag.setNote(meta.get('tag.note', '')) + + # Add metadata + add_metadata(marker.metadata, tag) + + +def create_track(otio_track, tracknum, track_kind): + if track_kind is None and hasattr(otio_track, 'kind'): + track_kind = otio_track.kind + + # Create a Track + if track_kind == otio.schema.TrackKind.Video: + track = hiero.core.VideoTrack( + otio_track.name or 'Video{n}'.format(n=tracknum) + ) + + else: + track = hiero.core.AudioTrack( + otio_track.name or 'Audio{n}'.format(n=tracknum) + ) + + return track + + +def create_clip(otio_clip, tagsbin, sequencebin): + # Create MediaSource + url = None + media = None + otio_media = otio_clip.media_reference + + if isinstance(otio_media, otio.schema.ExternalReference): + url = prep_url(otio_media.target_url) + media = hiero.core.MediaSource(url) + + elif not _otio_old: + if isinstance(otio_media, otio.schema.ImageSequenceReference): + url = prep_url(otio_media.abstract_target_url('#')) + media = hiero.core.MediaSource(url) + + if media is None or media.isOffline(): + media = create_offline_mediasource(otio_clip, url) + + # Reuse previous clip if possible + clip = None + for item in sequencebin.clips(): + if item.activeItem().mediaSource() == media: + clip = item.activeItem() + break + + if not clip: + # Create new Clip + clip = hiero.core.Clip(media) + + # Add Clip to a Bin + sequencebin.addItem(hiero.core.BinItem(clip)) + + # Add markers + add_markers(otio_clip, clip, tagsbin) + + return clip + + +def create_trackitem(playhead, track, otio_clip, clip): + source_range = otio_clip.source_range + + trackitem = track.createTrackItem(otio_clip.name) + trackitem.setPlaybackSpeed(source_range.start_time.rate) + trackitem.setSource(clip) + + time_scalar = 1. + + # Check for speed effects and adjust playback speed accordingly + for effect in otio_clip.effects: + if isinstance(effect, otio.schema.LinearTimeWarp): + time_scalar = effect.time_scalar + # Only reverse effect can be applied here + if abs(time_scalar) == 1.: + trackitem.setPlaybackSpeed( + trackitem.playbackSpeed() * time_scalar) + + elif isinstance(effect, otio.schema.FreezeFrame): + # For freeze frame, playback speed must be set after range + time_scalar = 0. + + # If reverse playback speed swap source in and out + if trackitem.playbackSpeed() < 0: + source_out = source_range.start_time.value + source_in = source_range.end_time_inclusive().value + + timeline_in = playhead + source_out + timeline_out = ( + timeline_in + + source_range.duration.value + ) - 1 + else: + # Normal playback speed + source_in = source_range.start_time.value + source_out = source_range.end_time_inclusive().value + + timeline_in = playhead + timeline_out = ( + timeline_in + + source_range.duration.value + ) - 1 + + # Set source and timeline in/out points + trackitem.setTimes( + timeline_in, + timeline_out, + source_in, + source_out + + ) + + # Apply playback speed for freeze frames + if abs(time_scalar) != 1.: + trackitem.setPlaybackSpeed(trackitem.playbackSpeed() * time_scalar) + + # Link audio to video when possible + if isinstance(track, hiero.core.AudioTrack): + for other in track.parent().trackItemsAt(playhead): + if other.source() == clip: + trackitem.link(other) + + return trackitem + + +def build_sequence( + otio_timeline, project=None, sequence=None, track_kind=None): + if project is None: + if sequence: + project = sequence.project() + + else: + # Per version 12.1v2 there is no way of getting active project + project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1] + + projectbin = project.clipsBin() + + if not sequence: + # Create a Sequence + sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + + # Set sequence settings from otio timeline if available + if ( + hasattr(otio_timeline, 'global_start_time') + and otio_timeline.global_start_time + ): + start_time = otio_timeline.global_start_time + sequence.setFramerate(start_time.rate) + sequence.setTimecodeStart(start_time.value) + + # Create a Bin to hold clips + projectbin.addItem(hiero.core.BinItem(sequence)) + + sequencebin = hiero.core.Bin(sequence.name()) + projectbin.addItem(sequencebin) + + else: + sequencebin = projectbin + + # Get tagsBin + tagsbin = hiero.core.project("Tag Presets").tagsBin() + + # Add timeline markers + add_markers(otio_timeline, sequence, tagsbin) + + if isinstance(otio_timeline, otio.schema.Timeline): + tracks = otio_timeline.tracks + + else: + tracks = [otio_timeline] + + for tracknum, otio_track in enumerate(tracks): + playhead = 0 + _transitions = [] + + # Add track to sequence + track = create_track(otio_track, tracknum, track_kind) + sequence.addTrack(track) + + # iterate over items in track + for _itemnum, otio_clip in enumerate(otio_track): + if isinstance(otio_clip, (otio.schema.Track, otio.schema.Stack)): + inform('Nested sequences/tracks are created separately.') + + # Add gap where the nested sequence would have been + playhead += otio_clip.source_range.duration.value + + # Process nested sequence + build_sequence( + otio_clip, + project=project, + track_kind=otio_track.kind + ) + + elif isinstance(otio_clip, otio.schema.Clip): + # Create a Clip + clip = create_clip(otio_clip, tagsbin, sequencebin) + + # Create TrackItem + trackitem = create_trackitem( + playhead, + track, + otio_clip, + clip + ) + + # Add markers + add_markers(otio_clip, trackitem, tagsbin) + + # Add trackitem to track + track.addTrackItem(trackitem) + + # Update playhead + playhead = trackitem.timelineOut() + 1 + + elif isinstance(otio_clip, otio.schema.Transition): + # Store transitions for when all clips in the track are created + _transitions.append((otio_track, otio_clip)) + + elif isinstance(otio_clip, otio.schema.Gap): + # Hiero has no fillers, slugs or blanks at the moment + playhead += otio_clip.source_range.duration.value + + # Apply transitions we stored earlier now that all clips are present + warnings = [] + for otio_track, otio_item in _transitions: + # Catch warnings form transitions in case + # of unsupported transitions + warning = apply_transition(otio_track, otio_item, track) + if warning: + warnings.append(warning) + + if warnings: + inform(warnings) diff --git a/openpype/hosts/hiero/otio/utils.py b/openpype/hosts/hiero/otio/utils.py new file mode 100644 index 0000000000..4c5d46bd51 --- /dev/null +++ b/openpype/hosts/hiero/otio/utils.py @@ -0,0 +1,80 @@ +import re +import opentimelineio as otio + + +def timecode_to_frames(timecode, framerate): + rt = otio.opentime.from_timecode(timecode, 24) + return int(otio.opentime.to_frames(rt)) + + +def frames_to_timecode(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_timecode(rt) + + +def frames_to_secons(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_seconds(rt) + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + if "%" in path: + padding_pattern = r"(\d+)" + padding = int(re.findall(padding_pattern, path).pop()) + num_pattern = r"(%\d+d)" + if padded: + path = re.sub(num_pattern, "%0{}d".format(padding), path) + else: + path = re.sub(num_pattern, "%d", path) + return path + + +def get_padding_from_path(path): + """ + Return padding number from DaVinci Resolve sequence path style + + Args: + path (str): path url or simple file name + + Returns: + int: padding number + + Example: + get_padding_from_path("plate.[0001-1008].exr") > 4 + + """ + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + return len(re.findall(padding_pattern, path).pop()) + + return None + + +def get_rate(item): + if not hasattr(item, 'framerate'): + return None + + num, den = item.framerate().toRational() + + try: + rate = float(num) / float(den) + except ZeroDivisionError: + return None + + if rate.is_integer(): + return rate + + return round(rate, 4) diff --git a/openpype/hosts/hiero/plugins/create/create_shot_clip.py b/openpype/hosts/hiero/plugins/create/create_shot_clip.py index 07b7a62b2a..25be9f090b 100644 --- a/openpype/hosts/hiero/plugins/create/create_shot_clip.py +++ b/openpype/hosts/hiero/plugins/create/create_shot_clip.py @@ -120,9 +120,9 @@ class CreateShotClip(phiero.Creator): "vSyncTrack": { "value": gui_tracks, # noqa "type": "QComboBox", - "label": "Master track", + "label": "Hero track", "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa + "toolTip": "Select driving track name which should be hero for all others", # noqa "order": 1} } }, diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index 4eadf28956..9e12fa360e 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -29,13 +29,19 @@ class LoadClip(phiero.SequenceLoader): clip_color_last = "green" clip_color = "red" - def load(self, context, name, namespace, options): + clip_name_template = "{asset}_{subset}_{representation}" + def load(self, context, name, namespace, options): + # add clip name template to options + options.update({ + "clipNameTemplate": self.clip_name_template + }) # in case loader uses multiselection if self.track and self.sequence: options.update({ "sequence": self.sequence, - "track": self.track + "track": self.track, + "clipNameTemplate": self.clip_name_template }) # load clip to timeline and get main variables @@ -45,7 +51,8 @@ class LoadClip(phiero.SequenceLoader): version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) + object_name = self.clip_name_template.format( + **context["representation"]["context"]) # add additional metadata from the version to imprint Avalon knob add_keys = [ diff --git a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..d12e7665bf --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py @@ -0,0 +1,59 @@ +import os +import pyblish.api +import openpype.api + + +class ExtractThumnail(openpype.api.Extractor): + """ + Extractor for track item's tumnails + """ + + label = "Extract Thumnail" + order = pyblish.api.ExtractorOrder + families = ["plate", "take"] + hosts = ["hiero"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + staging_dir = self.staging_dir(instance) + + self.create_thumbnail(staging_dir, instance) + + def create_thumbnail(self, staging_dir, instance): + track_item = instance.data["item"] + track_item_name = track_item.name() + + # frames + duration = track_item.sourceDuration() + frame_start = track_item.sourceIn() + self.log.debug( + "__ frame_start: `{}`, duration: `{}`".format( + frame_start, duration)) + + # get thumbnail frame from the middle + thumb_frame = int(frame_start + (duration / 2)) + + thumb_file = "{}thumbnail{}{}".format( + track_item_name, thumb_frame, ".png") + thumb_path = os.path.join(staging_dir, thumb_file) + + thumbnail = track_item.thumbnail(thumb_frame).save( + thumb_path, + format='png' + ) + self.log.debug( + "__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) + + self.log.info("Thumnail was generated to: {}".format(thumb_path)) + thumb_representation = { + 'files': thumb_file, + 'stagingDir': staging_dir, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + instance.data["representations"].append( + thumb_representation) diff --git a/openpype/hosts/hiero/plugins/publish/version_up_workfile.py b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py similarity index 90% rename from openpype/hosts/hiero/plugins/publish/version_up_workfile.py rename to openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py index ae03513d78..934e7112fa 100644 --- a/openpype/hosts/hiero/plugins/publish/version_up_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py @@ -2,7 +2,7 @@ from pyblish import api import openpype.api as pype -class VersionUpWorkfile(api.ContextPlugin): +class IntegrateVersionUpWorkfile(api.ContextPlugin): """Save as new workfile version""" order = api.IntegratorOrder + 10.1 diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index bdf007de06..8cccdec99a 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -1,221 +1,260 @@ -from compiler.ast import flatten -from pyblish import api +import pyblish +import openpype from openpype.hosts.hiero import api as phiero -import hiero -# from openpype.hosts.hiero.api import lib -# reload(lib) -# reload(phiero) +from openpype.hosts.hiero.otio import hiero_export + +# # developer reload modules +from pprint import pformat -class PreCollectInstances(api.ContextPlugin): +class PrecollectInstances(pyblish.api.ContextPlugin): """Collect all Track items selection.""" - order = api.CollectorOrder - 0.509 - label = "Pre-collect Instances" + order = pyblish.api.CollectorOrder - 0.59 + label = "Precollect Instances" hosts = ["hiero"] def process(self, context): - track_items = phiero.get_track_items( - selected=True, check_tagged=True, check_enabled=True) - # only return enabled track items - if not track_items: - track_items = phiero.get_track_items( - check_enabled=True, check_tagged=True) - # get sequence and video tracks - sequence = context.data["activeSequence"] - tracks = sequence.videoTracks() - - # add collection to context - tracks_effect_items = self.collect_sub_track_items(tracks) - - context.data["tracksEffectItems"] = tracks_effect_items - + otio_timeline = context.data["otioTimeline"] + selected_timeline_items = phiero.get_track_items( + selected=True, check_enabled=True, check_tagged=True) self.log.info( - "Processing enabled track items: {}".format(len(track_items))) + "Processing enabled track items: {}".format( + selected_timeline_items)) - for _ti in track_items: - data = dict() - clip = _ti.source() + for track_item in selected_timeline_items: - # get clips subtracks and anotations - annotations = self.clip_annotations(clip) - subtracks = self.clip_subtrack(_ti) - self.log.debug("Annotations: {}".format(annotations)) - self.log.debug(">> Subtracks: {}".format(subtracks)) + data = {} + clip_name = track_item.name() - # get pype tag data - tag_parsed_data = phiero.get_track_item_pype_data(_ti) - # self.log.debug(pformat(tag_parsed_data)) + # get openpype tag data + tag_data = phiero.get_track_item_pype_data(track_item) + self.log.debug("__ tag_data: {}".format(pformat(tag_data))) - if not tag_parsed_data: + if not tag_data: continue - if tag_parsed_data.get("id") != "pyblish.avalon.instance": + if tag_data.get("id") != "pyblish.avalon.instance": continue + + # solve handles length + tag_data["handleStart"] = min( + tag_data["handleStart"], int(track_item.handleInLength())) + tag_data["handleEnd"] = min( + tag_data["handleEnd"], int(track_item.handleOutLength())) + + # add audio to families + with_audio = False + if tag_data.pop("audio"): + with_audio = True + # add tag data to instance data data.update({ - k: v for k, v in tag_parsed_data.items() + k: v for k, v in tag_data.items() if k not in ("id", "applieswhole", "label") }) - asset = tag_parsed_data["asset"] - subset = tag_parsed_data["subset"] - review = tag_parsed_data.get("review") - audio = tag_parsed_data.get("audio") - - # remove audio attribute from data - data.pop("audio") + asset = tag_data["asset"] + subset = tag_data["subset"] # insert family into families - family = tag_parsed_data["family"] - families = [str(f) for f in tag_parsed_data["families"]] + family = tag_data["family"] + families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = _ti.parent() - media_source = _ti.source().mediaSource() - source_path = media_source.firstpath() - file_head = media_source.filenameHead() - file_info = media_source.fileinfos().pop() - source_first_frame = int(file_info.startFrame()) - - # apply only for feview and master track instance - if review: - families += ["review", "ftrack"] + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + label += " {}".format("[" + ", ".join(families) + "]") data.update({ - "name": "{} {} {}".format(asset, subset, families), + "name": "{}_{}".format(asset, subset), + "label": label, "asset": asset, - "item": _ti, + "item": track_item, "families": families, - - # tags - "tags": _ti.tags(), - - # track item attributes - "track": track.name(), - "trackItem": track, - - # version data - "versionData": { - "colorspace": _ti.sourceMediaColourTransform() - }, - - # source attribute - "source": source_path, - "sourceMedia": media_source, - "sourcePath": source_path, - "sourceFileHead": file_head, - "sourceFirst": source_first_frame, - - # clip's effect - "clipEffectItems": subtracks + "publish": tag_data["publish"], + "fps": context.data["fps"] }) + # otio clip data + otio_data = self.get_otio_clip_instance_data( + otio_timeline, track_item) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + data.update(otio_data) + self.log.debug("__ data: {}".format(pformat(data))) + + # add resolution + self.get_resolution_to_data(data, context) + + # create instance instance = context.create_instance(**data) + # create shot instance for shot attributes create/update + self.create_shot_instance(context, **data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) - if audio: - a_data = dict() + if not with_audio: + return - # add tag data to instance data - a_data.update({ - k: v for k, v in tag_parsed_data.items() - if k not in ("id", "applieswhole", "label") - }) + # create audio subset instance + self.create_audio_instance(context, **data) - # create main attributes - subset = "audioMain" - family = "audio" - families = ["clip", "ftrack"] - families.insert(0, str(family)) + # add audioReview attribute to plate instance data + # if reviewTrack is on + if tag_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True - name = "{} {} {}".format(asset, subset, families) + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" - a_data.update({ - "name": name, - "subset": subset, - "asset": asset, - "family": family, - "families": families, - "item": _ti, + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata[ + "openpype.source.width"], + "resolutionHeight": otio_clip_metadata[ + "openpype.source.height"], + "pixelAspect": otio_clip_metadata[ + "openpype.source.pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], + "resolutionHeight": otio_tl_metadata[ + "openpype.timeline.height"], + "pixelAspect": otio_tl_metadata[ + "openpype.timeline.pixelAspect"] + }) - # tags - "tags": _ti.tags(), - }) + def create_shot_instance(self, context, **data): + master_layer = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + asset = data.get("asset") + item = data.get("item") + clip_name = item.name() - a_instance = context.create_instance(**a_data) - self.log.info("Creating audio instance: {}".format(a_instance)) + if not master_layer: + return + + if not hierarchy_data: + return + + asset = data["asset"] + subset = "shotMain" + + # insert family into families + family = "shot" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": [] + }) + + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def create_audio_instance(self, context, **data): + master_layer = data.get("heroTrack") + + if not master_layer: + return + + asset = data.get("asset") + item = data.get("item") + clip_name = item.name() + + asset = data["asset"] + subset = "audioMain" + + # insert family into families + family = "audio" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": ["clip"] + }) + # remove review track attr if any + data.pop("reviewTrack") + + # create instance + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def get_otio_clip_instance_data(self, otio_timeline, track_item): + """ + Return otio objects for timeline, track and clip + + Args: + timeline_item_data (dict): timeline_item_data from list returned by + resolve.get_current_timeline_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + ti_track_name = track_item.parent().name() + timeline_range = self.create_otio_time_range_from_timeline_item_data( + track_item) + for otio_clip in otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if ti_track_name not in track_name: + continue + if otio_clip.name not in track_item.name(): + continue + if openpype.lib.is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + + # add pypedata marker to otio_clip metadata + for marker in otio_clip.markers: + if phiero.pype_tag_name in marker.name: + otio_clip.metadata.update(marker.metadata) + return {"otioClip": otio_clip} + + return None @staticmethod - def clip_annotations(clip): - """ - Returns list of Clip's hiero.core.Annotation - """ - annotations = [] - subTrackItems = flatten(clip.subTrackItems()) - annotations += [item for item in subTrackItems if isinstance( - item, hiero.core.Annotation)] - return annotations + def create_otio_time_range_from_timeline_item_data(track_item): + timeline = phiero.get_current_sequence() + frame_start = int(track_item.timelineIn()) + frame_duration = int(track_item.sourceDuration()) + fps = timeline.framerate().toFloat() - @staticmethod - def clip_subtrack(clip): - """ - Returns list of Clip's hiero.core.SubTrackItem - """ - subtracks = [] - subTrackItems = flatten(clip.parent().subTrackItems()) - for item in subTrackItems: - # avoid all anotation - if isinstance(item, hiero.core.Annotation): - continue - # # avoid all not anaibled - if not item.isEnabled(): - continue - subtracks.append(item) - return subtracks - - @staticmethod - def collect_sub_track_items(tracks): - """ - Returns dictionary with track index as key and list of subtracks - """ - # collect all subtrack items - sub_track_items = dict() - for track in tracks: - items = track.items() - - # skip if no clips on track > need track with effect only - if items: - continue - - # skip all disabled tracks - if not track.isEnabled(): - continue - - track_index = track.trackIndex() - _sub_track_items = flatten(track.subTrackItems()) - - # continue only if any subtrack items are collected - if len(_sub_track_items) < 1: - continue - - enabled_sti = list() - # loop all found subtrack items and check if they are enabled - for _sti in _sub_track_items: - # checking if not enabled - if not _sti.isEnabled(): - continue - if isinstance(_sti, hiero.core.Annotation): - continue - # collect the subtrack item - enabled_sti.append(_sti) - - # continue only if any subtrack items are collected - if len(enabled_sti) < 1: - continue - - # add collection of subtrackitems to dict - sub_track_items[track_index] = enabled_sti - - return sub_track_items + return hiero_export.create_otio_time_range( + frame_start, frame_duration, fps) diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py index ef7d07421b..bc4ef7e150 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py @@ -1,52 +1,57 @@ import os import pyblish.api +import hiero.ui from openpype.hosts.hiero import api as phiero from avalon import api as avalon +from pprint import pformat +from openpype.hosts.hiero.otio import hiero_export +from Qt.QtGui import QPixmap +import tempfile - -class PreCollectWorkfile(pyblish.api.ContextPlugin): +class PrecollectWorkfile(pyblish.api.ContextPlugin): """Inject the current working file into context""" - label = "Pre-collect Workfile" - order = pyblish.api.CollectorOrder - 0.51 + label = "Precollect Workfile" + order = pyblish.api.CollectorOrder - 0.6 def process(self, context): + asset = avalon.Session["AVALON_ASSET"] subset = "workfile" - project = phiero.get_current_project() - active_sequence = phiero.get_current_sequence() - video_tracks = active_sequence.videoTracks() - audio_tracks = active_sequence.audioTracks() - current_file = project.path() - staging_dir = os.path.dirname(current_file) - base_name = os.path.basename(current_file) + active_timeline = hiero.ui.activeSequence() + fps = active_timeline.framerate().toFloat() - # get workfile's colorspace properties - _clrs = {} - _clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa - _clrs["lutSetting16Bit"] = project.lutSetting16Bit() - _clrs["lutSetting8Bit"] = project.lutSetting8Bit() - _clrs["lutSettingFloat"] = project.lutSettingFloat() - _clrs["lutSettingLog"] = project.lutSettingLog() - _clrs["lutSettingViewer"] = project.lutSettingViewer() - _clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace() - _clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport() - _clrs["ocioConfigName"] = project.ocioConfigName() - _clrs["ocioConfigPath"] = project.ocioConfigPath() + # adding otio timeline to context + otio_timeline = hiero_export.create_otio_timeline() - # set main project attributes to context - context.data["activeProject"] = project - context.data["activeSequence"] = active_sequence - context.data["videoTracks"] = video_tracks - context.data["audioTracks"] = audio_tracks - context.data["currentFile"] = current_file - context.data["colorspace"] = _clrs + # get workfile thumnail paths + tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + thumbnail_name = "workfile_thumbnail.png" + thumbnail_path = os.path.join(tmp_staging, thumbnail_name) - self.log.info("currentFile: {}".format(current_file)) + # search for all windows with name of actual sequence + _windows = [w for w in hiero.ui.windowManager().windows() + if active_timeline.name() in w.windowTitle()] + + # export window to thumb path + QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png') + + # thumbnail + thumb_representation = { + 'files': thumbnail_name, + 'stagingDir': tmp_staging, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + + # get workfile paths + curent_file = project.path() + staging_dir, base_name = os.path.split(curent_file) # creating workfile representation - representation = { + workfile_representation = { 'name': 'hrox', 'ext': 'hrox', 'files': base_name, @@ -59,16 +64,21 @@ class PreCollectWorkfile(pyblish.api.ContextPlugin): "subset": "{}{}".format(asset, subset.capitalize()), "item": project, "family": "workfile", - - # version data - "versionData": { - "colorspace": _clrs - }, - - # source attribute - "sourcePath": current_file, - "representations": [representation] + "representations": [workfile_representation, thumb_representation] } + # create instance with workfile instance = context.create_instance(**instance_data) + + # update context with main project attributes + context_data = { + "activeProject": project, + "otioTimeline": otio_timeline, + "currentFile": curent_file, + "fps": fps, + } + context.data.update(context_data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug("__ instance.data: {}".format(pformat(instance.data))) + self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/openpype/hosts/hiero/plugins/publish/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_assetbuilds.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_resolution.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_clip_resolution.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py index 39387578d2..21e12e89fa 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py @@ -5,7 +5,7 @@ class CollectFrameRanges(pyblish.api.InstancePlugin): """ Collect all framranges. """ - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.1 label = "Collect Frame Ranges" hosts = ["hiero"] families = ["clip", "effect"] diff --git a/openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py index ba3e388c53..0696a58e39 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py @@ -39,8 +39,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin): if not set(self.families).intersection(families): continue - # exclude if not masterLayer True - if not instance.data.get("masterLayer"): + # exclude if not heroTrack True + if not instance.data.get("heroTrack"): continue # update families to include `shot` for hierarchy integration diff --git a/openpype/hosts/hiero/plugins/publish/collect_host_version.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_host_version.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_plates.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_plates.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_plates.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_plates.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_review.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py similarity index 99% rename from openpype/hosts/hiero/plugins/publish/collect_review.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py index a0ab00b355..b1d97a71d7 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_review.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py @@ -29,7 +29,7 @@ class CollectReview(api.InstancePlugin): Exception: description """ - review_track = instance.data.get("review") + review_track = instance.data.get("reviewTrack") video_tracks = instance.context.data["videoTracks"] for track in video_tracks: if review_track not in track.name(): diff --git a/openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_audio.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_audio.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/extract_audio.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_audio.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_clip_effects.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/extract_clip_effects.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_clip_effects.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_review_preparation.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py similarity index 98% rename from openpype/hosts/hiero/plugins/publish/extract_review_preparation.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py index 5456ddc3c4..aac476e27a 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_review_preparation.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py @@ -132,7 +132,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): ).format(**locals()) self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) - audio_check_output = openpype.api.subprocess(ffprob_cmd) + audio_check_output = openpype.api.run_subprocess(ffprob_cmd) self.log.debug( "audio_check_output: {}".format(audio_check_output)) @@ -167,7 +167,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): # try to get video native resolution data try: - resolution_output = openpype.api.subprocess(( + resolution_output = openpype.api.run_subprocess(( "\"{ffprobe_path}\" -i \"{full_input_path}\"" " -v error " "-select_streams v:0 -show_entries " @@ -280,7 +280,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): # run subprocess self.log.debug("Executing: {}".format(subprcs_cmd)) - output = openpype.api.subprocess(subprcs_cmd) + output = openpype.api.run_subprocess(subprcs_cmd) self.log.debug("Output: {}".format(output)) repre_new = { diff --git a/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_clip_effects.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/precollect_clip_effects.py diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py new file mode 100644 index 0000000000..f9cc158e79 --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py @@ -0,0 +1,223 @@ +from compiler.ast import flatten +from pyblish import api +from openpype.hosts.hiero import api as phiero +import hiero +# from openpype.hosts.hiero.api import lib +# reload(lib) +# reload(phiero) + + +class PreCollectInstances(api.ContextPlugin): + """Collect all Track items selection.""" + + order = api.CollectorOrder - 0.509 + label = "Pre-collect Instances" + hosts = ["hiero"] + + def process(self, context): + track_items = phiero.get_track_items( + selected=True, check_tagged=True, check_enabled=True) + # only return enabled track items + if not track_items: + track_items = phiero.get_track_items( + check_enabled=True, check_tagged=True) + # get sequence and video tracks + sequence = context.data["activeSequence"] + tracks = sequence.videoTracks() + + # add collection to context + tracks_effect_items = self.collect_sub_track_items(tracks) + + context.data["tracksEffectItems"] = tracks_effect_items + + self.log.info( + "Processing enabled track items: {}".format(len(track_items))) + + for _ti in track_items: + data = {} + clip = _ti.source() + + # get clips subtracks and anotations + annotations = self.clip_annotations(clip) + subtracks = self.clip_subtrack(_ti) + self.log.debug("Annotations: {}".format(annotations)) + self.log.debug(">> Subtracks: {}".format(subtracks)) + + # get pype tag data + tag_parsed_data = phiero.get_track_item_pype_data(_ti) + # self.log.debug(pformat(tag_parsed_data)) + + if not tag_parsed_data: + continue + + if tag_parsed_data.get("id") != "pyblish.avalon.instance": + continue + # add tag data to instance data + data.update({ + k: v for k, v in tag_parsed_data.items() + if k not in ("id", "applieswhole", "label") + }) + + asset = tag_parsed_data["asset"] + subset = tag_parsed_data["subset"] + review_track = tag_parsed_data.get("reviewTrack") + hiero_track = tag_parsed_data.get("heroTrack") + audio = tag_parsed_data.get("audio") + + # remove audio attribute from data + data.pop("audio") + + # insert family into families + family = tag_parsed_data["family"] + families = [str(f) for f in tag_parsed_data["families"]] + families.insert(0, str(family)) + + track = _ti.parent() + media_source = _ti.source().mediaSource() + source_path = media_source.firstpath() + file_head = media_source.filenameHead() + file_info = media_source.fileinfos().pop() + source_first_frame = int(file_info.startFrame()) + + # apply only for review and master track instance + if review_track and hiero_track: + families += ["review", "ftrack"] + + data.update({ + "name": "{} {} {}".format(asset, subset, families), + "asset": asset, + "item": _ti, + "families": families, + + # tags + "tags": _ti.tags(), + + # track item attributes + "track": track.name(), + "trackItem": track, + "reviewTrack": review_track, + + # version data + "versionData": { + "colorspace": _ti.sourceMediaColourTransform() + }, + + # source attribute + "source": source_path, + "sourceMedia": media_source, + "sourcePath": source_path, + "sourceFileHead": file_head, + "sourceFirst": source_first_frame, + + # clip's effect + "clipEffectItems": subtracks + }) + + instance = context.create_instance(**data) + + self.log.info("Creating instance.data: {}".format(instance.data)) + + if audio: + a_data = dict() + + # add tag data to instance data + a_data.update({ + k: v for k, v in tag_parsed_data.items() + if k not in ("id", "applieswhole", "label") + }) + + # create main attributes + subset = "audioMain" + family = "audio" + families = ["clip", "ftrack"] + families.insert(0, str(family)) + + name = "{} {} {}".format(asset, subset, families) + + a_data.update({ + "name": name, + "subset": subset, + "asset": asset, + "family": family, + "families": families, + "item": _ti, + + # tags + "tags": _ti.tags(), + }) + + a_instance = context.create_instance(**a_data) + self.log.info("Creating audio instance: {}".format(a_instance)) + + @staticmethod + def clip_annotations(clip): + """ + Returns list of Clip's hiero.core.Annotation + """ + annotations = [] + subTrackItems = flatten(clip.subTrackItems()) + annotations += [item for item in subTrackItems if isinstance( + item, hiero.core.Annotation)] + return annotations + + @staticmethod + def clip_subtrack(clip): + """ + Returns list of Clip's hiero.core.SubTrackItem + """ + subtracks = [] + subTrackItems = flatten(clip.parent().subTrackItems()) + for item in subTrackItems: + # avoid all anotation + if isinstance(item, hiero.core.Annotation): + continue + # # avoid all not anaibled + if not item.isEnabled(): + continue + subtracks.append(item) + return subtracks + + @staticmethod + def collect_sub_track_items(tracks): + """ + Returns dictionary with track index as key and list of subtracks + """ + # collect all subtrack items + sub_track_items = dict() + for track in tracks: + items = track.items() + + # skip if no clips on track > need track with effect only + if items: + continue + + # skip all disabled tracks + if not track.isEnabled(): + continue + + track_index = track.trackIndex() + _sub_track_items = flatten(track.subTrackItems()) + + # continue only if any subtrack items are collected + if len(_sub_track_items) < 1: + continue + + enabled_sti = list() + # loop all found subtrack items and check if they are enabled + for _sti in _sub_track_items: + # checking if not enabled + if not _sti.isEnabled(): + continue + if isinstance(_sti, hiero.core.Annotation): + continue + # collect the subtrack item + enabled_sti.append(_sti) + + # continue only if any subtrack items are collected + if len(enabled_sti) < 1: + continue + + # add collection of subtrackitems to dict + sub_track_items[track_index] = enabled_sti + + return sub_track_items diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py new file mode 100644 index 0000000000..ef7d07421b --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py @@ -0,0 +1,74 @@ +import os +import pyblish.api +from openpype.hosts.hiero import api as phiero +from avalon import api as avalon + + +class PreCollectWorkfile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + label = "Pre-collect Workfile" + order = pyblish.api.CollectorOrder - 0.51 + + def process(self, context): + asset = avalon.Session["AVALON_ASSET"] + subset = "workfile" + + project = phiero.get_current_project() + active_sequence = phiero.get_current_sequence() + video_tracks = active_sequence.videoTracks() + audio_tracks = active_sequence.audioTracks() + current_file = project.path() + staging_dir = os.path.dirname(current_file) + base_name = os.path.basename(current_file) + + # get workfile's colorspace properties + _clrs = {} + _clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa + _clrs["lutSetting16Bit"] = project.lutSetting16Bit() + _clrs["lutSetting8Bit"] = project.lutSetting8Bit() + _clrs["lutSettingFloat"] = project.lutSettingFloat() + _clrs["lutSettingLog"] = project.lutSettingLog() + _clrs["lutSettingViewer"] = project.lutSettingViewer() + _clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace() + _clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport() + _clrs["ocioConfigName"] = project.ocioConfigName() + _clrs["ocioConfigPath"] = project.ocioConfigPath() + + # set main project attributes to context + context.data["activeProject"] = project + context.data["activeSequence"] = active_sequence + context.data["videoTracks"] = video_tracks + context.data["audioTracks"] = audio_tracks + context.data["currentFile"] = current_file + context.data["colorspace"] = _clrs + + self.log.info("currentFile: {}".format(current_file)) + + # creating workfile representation + representation = { + 'name': 'hrox', + 'ext': 'hrox', + 'files': base_name, + "stagingDir": staging_dir, + } + + instance_data = { + "name": "{}_{}".format(asset, subset), + "asset": asset, + "subset": "{}{}".format(asset, subset.capitalize()), + "item": project, + "family": "workfile", + + # version data + "versionData": { + "colorspace": _clrs + }, + + # source attribute + "sourcePath": current_file, + "representations": [representation] + } + + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) diff --git a/openpype/hosts/hiero/plugins/publish/validate_audio.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_audio.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_audio.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_audio.py diff --git a/openpype/hosts/hiero/plugins/publish/validate_hierarchy.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_hierarchy.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_hierarchy.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_hierarchy.py diff --git a/openpype/hosts/hiero/plugins/publish/validate_names.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_names.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_names.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_names.py diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py index 90504ccd18..7e1a8df2dc 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py @@ -1,338 +1,28 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +#!/usr/bin/env python +# -*- coding: utf-8 -*- +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] import os -import re import hiero.core from hiero.core import util import opentimelineio as otio - - -marker_color_map = { - "magenta": otio.schema.MarkerColor.MAGENTA, - "red": otio.schema.MarkerColor.RED, - "yellow": otio.schema.MarkerColor.YELLOW, - "green": otio.schema.MarkerColor.GREEN, - "cyan": otio.schema.MarkerColor.CYAN, - "blue": otio.schema.MarkerColor.BLUE, -} - +from openpype.hosts.hiero.otio import hiero_export class OTIOExportTask(hiero.core.TaskBase): def __init__(self, initDict): """Initialize""" hiero.core.TaskBase.__init__(self, initDict) + self.otio_timeline = None def name(self): return str(type(self)) - def get_rate(self, item): - if not hasattr(item, 'framerate'): - item = item.sequence() - - num, den = item.framerate().toRational() - rate = float(num) / float(den) - - if rate.is_integer(): - return rate - - return round(rate, 2) - - def get_clip_ranges(self, trackitem): - # Get rate from source or sequence - if trackitem.source().mediaSource().hasVideo(): - rate_item = trackitem.source() - - else: - rate_item = trackitem.sequence() - - source_rate = self.get_rate(rate_item) - - # Reversed video/audio - if trackitem.playbackSpeed() < 0: - start = trackitem.sourceOut() - - else: - start = trackitem.sourceIn() - - source_start_time = otio.opentime.RationalTime( - start, - source_rate - ) - source_duration = otio.opentime.RationalTime( - trackitem.duration(), - source_rate - ) - - source_range = otio.opentime.TimeRange( - start_time=source_start_time, - duration=source_duration - ) - - hiero_clip = trackitem.source() - - available_range = None - if hiero_clip.mediaSource().isMediaPresent(): - start_time = otio.opentime.RationalTime( - hiero_clip.mediaSource().startTime(), - source_rate - ) - duration = otio.opentime.RationalTime( - hiero_clip.mediaSource().duration(), - source_rate - ) - available_range = otio.opentime.TimeRange( - start_time=start_time, - duration=duration - ) - - return source_range, available_range - - def add_gap(self, trackitem, otio_track, prev_out): - gap_length = trackitem.timelineIn() - prev_out - if prev_out != 0: - gap_length -= 1 - - rate = self.get_rate(trackitem.sequence()) - gap = otio.opentime.TimeRange( - duration=otio.opentime.RationalTime( - gap_length, - rate - ) - ) - otio_gap = otio.schema.Gap(source_range=gap) - otio_track.append(otio_gap) - - def get_marker_color(self, tag): - icon = tag.icon() - pat = r'icons:Tag(?P\w+)\.\w+' - - res = re.search(pat, icon) - if res: - color = res.groupdict().get('color') - if color.lower() in marker_color_map: - return marker_color_map[color.lower()] - - return otio.schema.MarkerColor.RED - - def add_markers(self, hiero_item, otio_item): - for tag in hiero_item.tags(): - if not tag.visible(): - continue - - if tag.name() == 'Copy': - # Hiero adds this tag to a lot of clips - continue - - frame_rate = self.get_rate(hiero_item) - - marked_range = otio.opentime.TimeRange( - start_time=otio.opentime.RationalTime( - tag.inTime(), - frame_rate - ), - duration=otio.opentime.RationalTime( - int(tag.metadata().dict().get('tag.length', '0')), - frame_rate - ) - ) - - metadata = dict( - Hiero=tag.metadata().dict() - ) - # Store the source item for future import assignment - metadata['Hiero']['source_type'] = hiero_item.__class__.__name__ - - marker = otio.schema.Marker( - name=tag.name(), - color=self.get_marker_color(tag), - marked_range=marked_range, - metadata=metadata - ) - - otio_item.markers.append(marker) - - def add_clip(self, trackitem, otio_track, itemindex): - hiero_clip = trackitem.source() - - # Add Gap if needed - if itemindex == 0: - prev_item = trackitem - - else: - prev_item = trackitem.parent().items()[itemindex - 1] - - clip_diff = trackitem.timelineIn() - prev_item.timelineOut() - - if itemindex == 0 and trackitem.timelineIn() > 0: - self.add_gap(trackitem, otio_track, 0) - - elif itemindex and clip_diff != 1: - self.add_gap(trackitem, otio_track, prev_item.timelineOut()) - - # Create Clip - source_range, available_range = self.get_clip_ranges(trackitem) - - otio_clip = otio.schema.Clip( - name=trackitem.name(), - source_range=source_range - ) - - # Add media reference - media_reference = otio.schema.MissingReference() - if hiero_clip.mediaSource().isMediaPresent(): - source = hiero_clip.mediaSource() - first_file = source.fileinfos()[0] - path = first_file.filename() - - if "%" in path: - path = re.sub(r"%\d+d", "%d", path) - if "#" in path: - path = re.sub(r"#+", "%d", path) - - media_reference = otio.schema.ExternalReference( - target_url=u'{}'.format(path), - available_range=available_range - ) - - otio_clip.media_reference = media_reference - - # Add Time Effects - playbackspeed = trackitem.playbackSpeed() - if playbackspeed != 1: - if playbackspeed == 0: - time_effect = otio.schema.FreezeFrame() - - else: - time_effect = otio.schema.LinearTimeWarp( - time_scalar=playbackspeed - ) - otio_clip.effects.append(time_effect) - - # Add tags as markers - if self._preset.properties()["includeTags"]: - self.add_markers(trackitem, otio_clip) - self.add_markers(trackitem.source(), otio_clip) - - otio_track.append(otio_clip) - - # Add Transition if needed - if trackitem.inTransition() or trackitem.outTransition(): - self.add_transition(trackitem, otio_track) - - def add_transition(self, trackitem, otio_track): - transitions = [] - - if trackitem.inTransition(): - if trackitem.inTransition().alignment().name == 'kFadeIn': - transitions.append(trackitem.inTransition()) - - if trackitem.outTransition(): - transitions.append(trackitem.outTransition()) - - for transition in transitions: - alignment = transition.alignment().name - - if alignment == 'kFadeIn': - in_offset_frames = 0 - out_offset_frames = ( - transition.timelineOut() - transition.timelineIn() - ) + 1 - - elif alignment == 'kFadeOut': - in_offset_frames = ( - trackitem.timelineOut() - transition.timelineIn() - ) + 1 - out_offset_frames = 0 - - elif alignment == 'kDissolve': - in_offset_frames = ( - transition.inTrackItem().timelineOut() - - transition.timelineIn() - ) - out_offset_frames = ( - transition.timelineOut() - - transition.outTrackItem().timelineIn() - ) - - else: - # kUnknown transition is ignored - continue - - rate = trackitem.source().framerate().toFloat() - in_time = otio.opentime.RationalTime(in_offset_frames, rate) - out_time = otio.opentime.RationalTime(out_offset_frames, rate) - - otio_transition = otio.schema.Transition( - name=alignment, # Consider placing Hiero name in metadata - transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve, - in_offset=in_time, - out_offset=out_time - ) - - if alignment == 'kFadeIn': - otio_track.insert(-1, otio_transition) - - else: - otio_track.append(otio_transition) - - - def add_tracks(self): - for track in self._sequence.items(): - if isinstance(track, hiero.core.AudioTrack): - kind = otio.schema.TrackKind.Audio - - else: - kind = otio.schema.TrackKind.Video - - otio_track = otio.schema.Track(name=track.name(), kind=kind) - - for itemindex, trackitem in enumerate(track): - if isinstance(trackitem.source(), hiero.core.Clip): - self.add_clip(trackitem, otio_track, itemindex) - - self.otio_timeline.tracks.append(otio_track) - - # Add tags as markers - if self._preset.properties()["includeTags"]: - self.add_markers(self._sequence, self.otio_timeline.tracks) - - def create_OTIO(self): - self.otio_timeline = otio.schema.Timeline() - - # Set global start time based on sequence - self.otio_timeline.global_start_time = otio.opentime.RationalTime( - self._sequence.timecodeStart(), - self._sequence.framerate().toFloat() - ) - self.otio_timeline.name = self._sequence.name() - - self.add_tracks() - def startTask(self): - self.create_OTIO() + self.otio_timeline = hiero_export.create_otio_timeline() def taskStep(self): return False @@ -350,7 +40,7 @@ class OTIOExportTask(hiero.core.TaskBase): util.filesystem.makeDirs(dirname) # write otio file - otio.adapters.write_to_file(self.otio_timeline, exportPath) + hiero_export.write_to_file(self.otio_timeline, exportPath) # Catch all exceptions and log error except Exception as e: @@ -370,7 +60,7 @@ class OTIOExportPreset(hiero.core.TaskPresetBase): """Initialise presets to default values""" hiero.core.TaskPresetBase.__init__(self, OTIOExportTask, name) - self.properties()["includeTags"] = True + self.properties()["includeTags"] = hiero_export.include_tags = True self.properties().update(properties) def supportedItems(self): diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py index 887ff05ec8..9b83eefedf 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py @@ -1,3 +1,9 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] + import hiero.ui import OTIOExportTask @@ -14,6 +20,7 @@ except ImportError: FormLayout = QFormLayout # lint:ok +from openpype.hosts.hiero.otio import hiero_export class OTIOExportUI(hiero.ui.TaskUIBase): def __init__(self, preset): @@ -27,7 +34,7 @@ class OTIOExportUI(hiero.ui.TaskUIBase): def includeMarkersCheckboxChanged(self, state): # Slot to handle change of checkbox state - self._preset.properties()["includeTags"] = state == QtCore.Qt.Checked + hiero_export.include_tags = state == QtCore.Qt.Checked def populateUI(self, widget, exportTemplate): layout = widget.layout() diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py index 67e6e78d35..3c09655f01 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py @@ -1,25 +1,3 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - from OTIOExportTask import OTIOExportTask from OTIOExportUI import OTIOExportUI diff --git a/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py b/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py index 1503a9e9ac..0f0a643909 100644 --- a/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py +++ b/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py @@ -1,42 +1,91 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] import hiero.ui import hiero.core -from otioimporter.OTIOImport import load_otio +import PySide2.QtWidgets as qw + +from openpype.hosts.hiero.otio.hiero_import import load_otio + + +class OTIOProjectSelect(qw.QDialog): + + def __init__(self, projects, *args, **kwargs): + super(OTIOProjectSelect, self).__init__(*args, **kwargs) + self.setWindowTitle('Please select active project') + self.layout = qw.QVBoxLayout() + + self.label = qw.QLabel( + 'Unable to determine which project to import sequence to.\n' + 'Please select one.' + ) + self.layout.addWidget(self.label) + + self.projects = qw.QComboBox() + self.projects.addItems(map(lambda p: p.name(), projects)) + self.layout.addWidget(self.projects) + + QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel + self.buttonBox = qw.QDialogButtonBox(QBtn) + self.buttonBox.accepted.connect(self.accept) + self.buttonBox.rejected.connect(self.reject) + + self.layout.addWidget(self.buttonBox) + self.setLayout(self.layout) + + +def get_sequence(view): + sequence = None + if isinstance(view, hiero.ui.TimelineEditor): + sequence = view.sequence() + + elif isinstance(view, hiero.ui.BinView): + for item in view.selection(): + if not hasattr(item, 'acitveItem'): + continue + + if isinstance(item.activeItem(), hiero.core.Sequence): + sequence = item.activeItem() + + return sequence def OTIO_menu_action(event): - otio_action = hiero.ui.createMenuAction( - 'Import OTIO', + # Menu actions + otio_import_action = hiero.ui.createMenuAction( + 'Import OTIO...', open_otio_file, icon=None ) - hiero.ui.registerAction(otio_action) + + otio_add_track_action = hiero.ui.createMenuAction( + 'New Track(s) from OTIO...', + open_otio_file, + icon=None + ) + otio_add_track_action.setEnabled(False) + + hiero.ui.registerAction(otio_import_action) + hiero.ui.registerAction(otio_add_track_action) + + view = hiero.ui.currentContextMenuView() + + if view: + sequence = get_sequence(view) + if sequence: + otio_add_track_action.setEnabled(True) + for action in event.menu.actions(): if action.text() == 'Import': - action.menu().addAction(otio_action) - break + action.menu().addAction(otio_import_action) + action.menu().addAction(otio_add_track_action) + + elif action.text() == 'New Track': + action.menu().addAction(otio_add_track_action) def open_otio_file(): @@ -45,8 +94,39 @@ def open_otio_file(): pattern='*.otio', requiredExtension='.otio' ) + + selection = None + sequence = None + + view = hiero.ui.currentContextMenuView() + if view: + sequence = get_sequence(view) + selection = view.selection() + + if sequence: + project = sequence.project() + + elif selection: + project = selection[0].project() + + elif len(hiero.core.projects()) > 1: + dialog = OTIOProjectSelect(hiero.core.projects()) + if dialog.exec_(): + project = hiero.core.projects()[dialog.projects.currentIndex()] + + else: + bar = hiero.ui.mainWindow().statusBar() + bar.showMessage( + 'OTIO Import aborted by user', + timeout=3000 + ) + return + + else: + project = hiero.core.projects()[-1] + for otio_file in files: - load_otio(otio_file) + load_otio(otio_file, project, sequence) # HieroPlayer is quite limited and can't create transitions etc. @@ -55,3 +135,7 @@ if not hiero.core.isHieroPlayer(): "kShowContextMenu/kBin", OTIO_menu_action ) + hiero.core.events.registerInterest( + "kShowContextMenu/kTimeline", + OTIO_menu_action + ) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index dd586ca02d..1f0f90811f 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -210,7 +210,7 @@ def validate_fps(): if current_fps != fps: - from ...widgets import popup + from openpype.widgets import popup # Find main window parent = hou.ui.mainQtWindow() @@ -219,8 +219,8 @@ def validate_fps(): else: dialog = popup.Popup2(parent=parent) dialog.setModal(True) - dialog.setWindowTitle("Maya scene not in line with project") - dialog.setMessage("The FPS is out of sync, please fix") + dialog.setWindowTitle("Houdini scene not in line with project") + dialog.setMessage("The FPS is out of sync, please fix it") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.XML b/openpype/hosts/houdini/startup/MainMenuCommon.xml similarity index 100% rename from openpype/hosts/houdini/startup/MainMenuCommon.XML rename to openpype/hosts/houdini/startup/MainMenuCommon.xml diff --git a/openpype/hosts/maya/api/expected_files.py b/openpype/hosts/maya/api/expected_files.py index 186b199796..c6232f6ca4 100644 --- a/openpype/hosts/maya/api/expected_files.py +++ b/openpype/hosts/maya/api/expected_files.py @@ -184,7 +184,7 @@ class AExpectedFiles: (str): sanitized camera name Example: - >>> sanizite_camera_name('test:camera_01') + >>> AExpectedFiles.sanizite_camera_name('test:camera_01') test_camera_01 """ @@ -230,7 +230,7 @@ class AExpectedFiles: if self.layer.startswith("rs_"): layer_name = self.layer[3:] - scene_data = { + return { "frameStart": int(self.get_render_attribute("startFrame")), "frameEnd": int(self.get_render_attribute("endFrame")), "frameStep": int(self.get_render_attribute("byFrameStep")), @@ -245,7 +245,6 @@ class AExpectedFiles: "filePrefix": file_prefix, "enabledAOVs": self.get_aovs(), } - return scene_data def _generate_single_file_sequence( self, layer_data, force_aov_name=None): @@ -685,8 +684,6 @@ class ExpectedFilesRedshift(AExpectedFiles): """Expected files for Redshift renderer. Attributes: - ext_mapping (list): Mapping redshift extension dropdown values - to strings. unmerged_aovs (list): Name of aovs that are not merged into resulting exr and we need them specified in expectedFiles output. @@ -695,8 +692,6 @@ class ExpectedFilesRedshift(AExpectedFiles): unmerged_aovs = ["Cryptomatte"] - ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] - def __init__(self, layer, render_instance): """Construtor.""" super(ExpectedFilesRedshift, self).__init__(layer, render_instance) @@ -785,12 +780,10 @@ class ExpectedFilesRedshift(AExpectedFiles): # anyway. return enabled_aovs - default_ext = self.ext_mapping[ - cmds.getAttr("redshiftOptions.imageFormat") - ] + default_ext = cmds.getAttr( + "redshiftOptions.imageFormat", asString=True) rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False) - # todo: find out how to detect multichannel exr for redshift for aov in rs_aovs: enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov))) for override in self.get_layer_overrides( diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index ae2d329a97..909993a173 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -1124,16 +1124,14 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None): def get_id(node): - """ - Get the `cbId` attribute of the given node + """Get the `cbId` attribute of the given node. + Args: node (str): the name of the node to retrieve the attribute from - Returns: str """ - if node is None: return @@ -1872,7 +1870,7 @@ def set_context_settings(): # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) - api.Session["AVALON_FPS"] = fps + api.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) # Set project resolution @@ -2688,3 +2686,69 @@ def show_message(title, msg): pass else: message_window.message(title=title, message=msg, parent=parent) + + +def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None): + """Yield edits as a set of actions.""" + + attributes = relationships.get("attributes", []) + shader_data = relationships.get("relationships", {}) + + shading_engines = cmds.ls(shader_nodes, type="objectSet", long=True) + assert shading_engines, "Error in retrieving objectSets from reference" + + # region compute lookup + shading_engines_by_id = defaultdict(list) + for shad in shading_engines: + shading_engines_by_id[get_id(shad)].append(shad) + # endregion + + # region assign shading engines and other sets + for data in shader_data.values(): + # collect all unique IDs of the set members + shader_uuid = data["uuid"] + member_uuids = [ + (member["uuid"], member.get("components")) + for member in data["members"]] + + filtered_nodes = list() + for _uuid, components in member_uuids: + nodes = nodes_by_id.get(_uuid, None) + if nodes is None: + continue + + if components: + # Assign to the components + nodes = [".".join([node, components]) for node in nodes] + + filtered_nodes.extend(nodes) + + id_shading_engines = shading_engines_by_id[shader_uuid] + if not id_shading_engines: + log.error("{} - No shader found with cbId " + "'{}'".format(label, shader_uuid)) + continue + elif len(id_shading_engines) > 1: + log.error("{} - Skipping shader assignment. " + "More than one shader found with cbId " + "'{}'. (found: {})".format(label, shader_uuid, + id_shading_engines)) + continue + + if not filtered_nodes: + log.warning("{} - No nodes found for shading engine " + "'{}'".format(label, id_shading_engines[0])) + continue + + yield {"action": "assign", + "uuid": data["uuid"], + "nodes": filtered_nodes, + "shader": id_shading_engines[0]} + + for data in attributes: + nodes = nodes_by_id.get(data["uuid"], []) + attr_value = data["attributes"] + yield {"action": "setattr", + "uuid": data["uuid"], + "nodes": nodes, + "attributes": attr_value} diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 907f9cf781..cbca091365 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -12,7 +12,7 @@ from openpype.hosts.maya.api import ( lib, plugin ) -from openpype.api import get_system_settings +from openpype.api import (get_system_settings, get_asset) class CreateRender(plugin.Creator): @@ -104,7 +104,7 @@ class CreateRender(plugin.Creator): # namespace is not empty, so we leave it untouched pass - while(cmds.namespace(exists=namespace_name)): + while cmds.namespace(exists=namespace_name): namespace_name = "_{}{}".format(str(instance), index) index += 1 @@ -125,7 +125,7 @@ class CreateRender(plugin.Creator): cmds.sets(sets, forceElement=instance) # if no render layers are present, create default one with - # asterix selector + # asterisk selector if not layers: render_layer = self._rs.createRenderLayer('Main') collection = render_layer.createCollection("defaultCollection") @@ -137,9 +137,7 @@ class CreateRender(plugin.Creator): if renderer.startswith('renderman'): renderer = 'renderman' - cmds.setAttr(self._image_prefix_nodes[renderer], - self._image_prefixes[renderer], - type="string") + self._set_default_renderer_settings(renderer) def _create_render_settings(self): # get pools @@ -318,3 +316,86 @@ class CreateRender(plugin.Creator): False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True ) # noqa return requests.get(*args, **kwargs) + + def _set_default_renderer_settings(self, renderer): + """Set basic settings based on renderer. + + Args: + renderer (str): Renderer name. + + """ + cmds.setAttr(self._image_prefix_nodes[renderer], + self._image_prefixes[renderer], + type="string") + + asset = get_asset() + + if renderer == "arnold": + # set format to exr + cmds.setAttr( + "defaultArnoldDriver.ai_translator", "exr", type="string") + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) + + # resolution + cmds.setAttr( + "defaultResolution.width", + asset["data"].get("resolutionWidth")) + cmds.setAttr( + "defaultResolution.height", + asset["data"].get("resolutionHeight")) + + if renderer == "vray": + vray_settings = cmds.ls(type="VRaySettingsNode") + if not vray_settings: + node = cmds.createNode("VRaySettingsNode") + else: + node = vray_settings[0] + + # set underscore as element separator instead of default `.` + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format( + node), + "_" + ) + # set format to exr + cmds.setAttr( + "{}.imageFormatStr".format(node), 5) + + # animType + cmds.setAttr( + "{}.animType".format(node), 1) + + # resolution + cmds.setAttr( + "{}.width".format(node), + asset["data"].get("resolutionWidth")) + cmds.setAttr( + "{}.height".format(node), + asset["data"].get("resolutionHeight")) + + if renderer == "redshift": + redshift_settings = cmds.ls(type="RedshiftOptions") + if not redshift_settings: + node = cmds.createNode("RedshiftOptions") + else: + node = redshift_settings[0] + + # set exr + cmds.setAttr("{}.imageFormat".format(node), 1) + # resolution + cmds.setAttr( + "defaultResolution.width", + asset["data"].get("resolutionWidth")) + cmds.setAttr( + "defaultResolution.height", + asset["data"].get("resolutionHeight")) + + # enable animation + cmds.setAttr("defaultRenderGlobals.outFormatControl", 0) + cmds.setAttr("defaultRenderGlobals.animation", 1) + cmds.setAttr("defaultRenderGlobals.putFrameBeforeExt", 1) + cmds.setAttr("defaultRenderGlobals.extensionPadding", 4) diff --git a/openpype/hosts/maya/plugins/load/load_look.py b/openpype/hosts/maya/plugins/load/load_look.py index c39bbc497e..fca612eff4 100644 --- a/openpype/hosts/maya/plugins/load/load_look.py +++ b/openpype/hosts/maya/plugins/load/load_look.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +"""Look loader.""" import openpype.hosts.maya.api.plugin from avalon import api, io import json diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 61e0290296..37a2b145d4 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -19,7 +19,6 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "rig", "camerarig"] representations = ["ma", "abc", "fbx", "mb"] - tool_names = ["loader"] label = "Reference" order = -10 diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 2bff6e0a77..d5d4a941e3 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -1,12 +1,21 @@ -from avalon.maya import lib -from avalon import api -from openpype.api import get_project_settings +# -*- coding: utf-8 -*- +"""Loader for Vray Proxy files. + +If there are Alembics published along vray proxy (in the same version), +loader will use them instead of native vray vrmesh format. + +""" import os + import maya.cmds as cmds +from avalon.maya import lib +from avalon import api, io +from openpype.api import get_project_settings + class VRayProxyLoader(api.Loader): - """Load VRayMesh proxy""" + """Load VRay Proxy with Alembic or VrayMesh.""" families = ["vrayproxy"] representations = ["vrmesh"] @@ -16,8 +25,17 @@ class VRayProxyLoader(api.Loader): icon = "code-fork" color = "orange" - def load(self, context, name, namespace, data): + def load(self, context, name=None, namespace=None, options=None): + # type: (dict, str, str, dict) -> None + """Loader entry point. + Args: + context (dict): Loaded representation context. + name (str): Name of container. + namespace (str): Optional namespace name. + options (dict): Optional loader options. + + """ from avalon.maya.pipeline import containerise from openpype.hosts.maya.api.lib import namespaced @@ -26,6 +44,9 @@ class VRayProxyLoader(api.Loader): except ValueError: family = "vrayproxy" + # get all representations for this version + self.fname = self._get_abc(context["version"]["_id"]) or self.fname + asset_name = context['asset']["name"] namespace = namespace or lib.unique_namespace( asset_name + "_", @@ -39,8 +60,8 @@ class VRayProxyLoader(api.Loader): with lib.maintained_selection(): cmds.namespace(addNamespace=namespace) with namespaced(namespace, new=False): - nodes, group_node = self.create_vray_proxy(name, - filename=self.fname) + nodes, group_node = self.create_vray_proxy( + name, filename=self.fname) self[:] = nodes if not nodes: @@ -63,7 +84,8 @@ class VRayProxyLoader(api.Loader): loader=self.__class__.__name__) def update(self, container, representation): - + # type: (dict, dict) -> None + """Update container with specified representation.""" node = container['objectName'] assert cmds.objExists(node), "Missing container" @@ -71,7 +93,8 @@ class VRayProxyLoader(api.Loader): vraymeshes = cmds.ls(members, type="VRayMesh") assert vraymeshes, "Cannot find VRayMesh in container" - filename = api.get_representation_path(representation) + # get all representations for this version + filename = self._get_abc(representation["parent"]) or api.get_representation_path(representation) # noqa: E501 for vray_mesh in vraymeshes: cmds.setAttr("{}.fileName".format(vray_mesh), @@ -84,7 +107,8 @@ class VRayProxyLoader(api.Loader): type="string") def remove(self, container): - + # type: (dict) -> None + """Remove loaded container.""" # Delete container and its contents if cmds.objExists(container['objectName']): members = cmds.sets(container['objectName'], query=True) or [] @@ -101,61 +125,62 @@ class VRayProxyLoader(api.Loader): "still has members: %s", namespace) def switch(self, container, representation): + # type: (dict, dict) -> None + """Switch loaded representation.""" self.update(container, representation) def create_vray_proxy(self, name, filename): + # type: (str, str) -> (list, str) """Re-create the structure created by VRay to support vrmeshes Args: - name(str): name of the asset + name (str): Name of the asset. + filename (str): File name of vrmesh. Returns: nodes(list) + """ - # Create nodes - vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name)) - mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) - vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True, - name="{}_VRMM".format(name)) - vray_mat_sg = cmds.sets(name="{}_VRSG".format(name), - empty=True, - renderable=True, - noSurfaceShader=True) + if name is None: + name = os.path.splitext(os.path.basename(filename))[0] - cmds.setAttr("{}.fileName".format(vray_mesh), - filename, - type="string") + parent = cmds.createNode("transform", name=name) + proxy = cmds.createNode( + "VRayProxy", name="{}Shape".format(name), parent=parent) + cmds.setAttr(proxy + ".fileName", filename, type="string") + cmds.connectAttr("time1.outTime", proxy + ".currentFrame") - # Create important connections - cmds.connectAttr("time1.outTime", - "{0}.currentFrame".format(vray_mesh)) - cmds.connectAttr("{}.fileName2".format(vray_mesh), - "{}.fileName".format(vray_mat)) - cmds.connectAttr("{}.instancing".format(vray_mesh), - "{}.instancing".format(vray_mat)) - cmds.connectAttr("{}.output".format(vray_mesh), - "{}.inMesh".format(mesh_shape)) - cmds.connectAttr("{}.overrideFileName".format(vray_mesh), - "{}.overrideFileName".format(vray_mat)) - cmds.connectAttr("{}.currentFrame".format(vray_mesh), - "{}.currentFrame".format(vray_mat)) + return [parent, proxy], parent - # Set surface shader input - cmds.connectAttr("{}.outColor".format(vray_mat), - "{}.surfaceShader".format(vray_mat_sg)) + def _get_abc(self, version_id): + # type: (str) -> str + """Get abc representation file path if present. - # Connect mesh to shader - cmds.sets([mesh_shape], addElement=vray_mat_sg) + If here is published Alembic (abc) representation published along + vray proxy, get is file path. - group_node = cmds.group(empty=True, name="{}_GRP".format(name)) - mesh_transform = cmds.listRelatives(mesh_shape, - parent=True, fullPath=True) - cmds.parent(mesh_transform, group_node) - nodes = [vray_mesh, mesh_shape, vray_mat, vray_mat_sg, group_node] + Args: + version_id (str): Version hash id. - # Fix: Force refresh so the mesh shows correctly after creation - cmds.refresh() - cmds.setAttr("{}.geomType".format(vray_mesh), 2) + Returns: + str: Path to file. + None: If abc not found. - return nodes, group_node + """ + self.log.debug( + "Looking for abc in published representations of this version.") + abc_rep = io.find_one( + { + "type": "representation", + "parent": io.ObjectId(version_id), + "name": "abc" + }) + + if abc_rep: + self.log.debug("Found, we'll link alembic to vray proxy.") + file_name = api.get_representation_path(abc_rep) + self.log.debug("File: {}".format(self.fname)) + return file_name + + return "" diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index 238213c000..bf24b463ac 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -348,6 +348,13 @@ class CollectLook(pyblish.api.InstancePlugin): history = [] for material in materials: history.extend(cmds.listHistory(material)) + + # handle VrayPluginNodeMtl node - see #1397 + vray_plugin_nodes = cmds.ls( + history, type="VRayPluginNodeMtl", long=True) + for vray_node in vray_plugin_nodes: + history.extend(cmds.listHistory(vray_node)) + files = cmds.ls(history, type="file", long=True) files.extend(cmds.ls(history, type="aiImage", long=True)) diff --git a/openpype/hosts/maya/plugins/publish/collect_remove_marked.py b/openpype/hosts/maya/plugins/publish/collect_remove_marked.py index a45c8e45a7..69e69f6630 100644 --- a/openpype/hosts/maya/plugins/publish/collect_remove_marked.py +++ b/openpype/hosts/maya/plugins/publish/collect_remove_marked.py @@ -2,14 +2,9 @@ import pyblish.api class CollectRemoveMarked(pyblish.api.ContextPlugin): - """Collect model data + """Remove marked data - Ensures always only a single frame is extracted (current frame). - - Note: - This is a workaround so that the `pype.model` family can use the - same pointcache extractor implementation as animation and pointcaches. - This always enforces the "current" frame to be published. + Remove instances that have 'remove' in their instance.data """ diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 75749a952e..647a46e240 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -358,9 +358,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): options["extendFrames"] = extend_frames options["overrideExistingFrame"] = override_frames - maya_render_plugin = "MayaPype" - if attributes.get("useMayaBatch", True): - maya_render_plugin = "MayaBatch" + maya_render_plugin = "MayaBatch" options["mayaRenderPlugin"] = maya_render_plugin diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py b/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py new file mode 100644 index 0000000000..236797ca3c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Collect Vray Proxy.""" +import pyblish.api + + +class CollectVrayProxy(pyblish.api.InstancePlugin): + """Collect Vray Proxy instance. + + Add `pointcache` family for it. + """ + order = pyblish.api.CollectorOrder + 0.01 + label = 'Collect Vray Proxy' + families = ["vrayproxy"] + + def process(self, instance): + """Collector entry point.""" + if not instance.data.get('families'): + instance.data["families"] = [] diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 358fca6c2a..0dc91d67a9 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -96,19 +96,25 @@ class ExtractPlayblast(openpype.api.Extractor): # Remove panel key since it's internal value to capture_gui preset.pop("panel", None) - self.log.info('using viewport preset: {}'.format(preset)) path = capture.capture(**preset) - playblast = self._fix_playblast_output_path(path) - self.log.info("file list {}".format(playblast)) + self.log.debug("playblast path {}".format(path)) - collected_frames = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_frames) - input_path = os.path.join( - stagingdir, collections[0].format('{head}{padding}{tail}')) - self.log.info("input {}".format(input_path)) + collected_files = os.listdir(stagingdir) + collections, remainder = clique.assemble(collected_files) + + self.log.debug("filename {}".format(filename)) + frame_collection = None + for collection in collections: + filebase = collection.format('{head}').rstrip(".") + self.log.debug("collection head {}".format(filebase)) + if filebase in filename: + frame_collection = collection + self.log.info( + "we found collection of interest {}".format( + str(frame_collection))) if "representations" not in instance.data: instance.data["representations"] = [] @@ -119,12 +125,11 @@ class ExtractPlayblast(openpype.api.Extractor): # Add camera node name to representation data camera_node_name = pm.ls(camera)[0].getTransform().name() - representation = { 'name': 'png', 'ext': 'png', - 'files': collected_frames, + 'files': list(frame_collection), "stagingDir": stagingdir, "frameStart": start, "frameEnd": end, @@ -135,44 +140,6 @@ class ExtractPlayblast(openpype.api.Extractor): } instance.data["representations"].append(representation) - def _fix_playblast_output_path(self, filepath): - """Workaround a bug in maya.cmds.playblast to return correct filepath. - - When the `viewer` argument is set to False and maya.cmds.playblast - does not automatically open the playblasted file the returned - filepath does not have the file's extension added correctly. - - To workaround this we just glob.glob() for any file extensions and - assume the latest modified file is the correct file and return it. - """ - # Catch cancelled playblast - if filepath is None: - self.log.warning("Playblast did not result in output path. " - "Playblast is probably interrupted.") - return None - - # Fix: playblast not returning correct filename (with extension) - # Lets assume the most recently modified file is the correct one. - if not os.path.exists(filepath): - directory = os.path.dirname(filepath) - filename = os.path.basename(filepath) - # check if the filepath is has frame based filename - # example : capture.####.png - parts = filename.split(".") - if len(parts) == 3: - query = os.path.join(directory, "{}.*.{}".format(parts[0], - parts[-1])) - files = glob.glob(query) - else: - files = glob.glob("{}.*".format(filepath)) - - if not files: - raise RuntimeError("Couldn't find playblast from: " - "{0}".format(filepath)) - filepath = max(files, key=os.path.getmtime) - - return filepath - @contextlib.contextmanager def maintained_time(): diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index dea52f2154..ba716c0d18 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -18,7 +18,8 @@ class ExtractAlembic(openpype.api.Extractor): label = "Extract Pointcache (Alembic)" hosts = ["maya"] families = ["pointcache", - "model"] + "model", + "vrayproxy"] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py index d0c6c4eb14..7c9e201986 100644 --- a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -74,6 +74,8 @@ class ExtractRedshiftProxy(openpype.api.Extractor): 'files': repr_files, "stagingDir": staging_dir, } + if anim_on: + representation["frameStart"] = instance.data["proxyFrameStart"] instance.data["representations"].append(representation) self.log.info("Extracted instance '%s' to: %s" diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 35d720726b..016efa6499 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -26,15 +26,11 @@ class ExtractThumbnail(openpype.api.Extractor): def process(self, instance): self.log.info("Extracting capture..") - start = cmds.currentTime(query=True) - end = cmds.currentTime(query=True) - self.log.info("start: {}, end: {}".format(start, end)) - camera = instance.data['review_camera'] capture_preset = "" capture_preset = ( - instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast'] + instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] ) try: @@ -50,8 +46,8 @@ class ExtractThumbnail(openpype.api.Extractor): # preset['compression'] = "qt" preset['quality'] = 50 preset['compression'] = "jpg" - preset['start_frame'] = start - preset['end_frame'] = end + preset['start_frame'] = instance.data["frameStart"] + preset['end_frame'] = instance.data["frameStart"] preset['camera_options'] = { "displayGateMask": False, "displayResolution": False, diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index ba676bee83..9aeaad7ff1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -1,8 +1,9 @@ -import os +# -*- coding: utf-8 -*- +"""Maya validator for render settings.""" import re +from collections import OrderedDict from maya import cmds, mel -import pymel.core as pm import pyblish.api import openpype.api @@ -60,6 +61,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): 'renderman': '_..' } + redshift_AOV_prefix = "/_" + # WARNING: There is bug? in renderman, translating token # to something left behind mayas default image prefix. So instead # `SceneName_v01` it translates to: @@ -120,25 +123,59 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): "doesn't have: '' or " "'' token".format(prefix)) - if len(cameras) > 1: - if not re.search(cls.R_CAMERA_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' token".format(prefix)) + if len(cameras) > 1 and not re.search(cls.R_CAMERA_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' token".format(prefix)) # renderer specific checks if renderer == "vray": - # no vray checks implemented yet - pass - elif renderer == "redshift": + vray_settings = cmds.ls(type="VRaySettingsNode") + if not vray_settings: + node = cmds.createNode("VRaySettingsNode") + else: + node = vray_settings[0] + + if cmds.getAttr( + "{}.fileNameRenderElementSeparator".format(node)) != "_": + invalid = False + cls.log.error("AOV separator is not set correctly.") + + if renderer == "redshift": if re.search(cls.R_AOV_TOKEN, prefix): invalid = True - cls.log.error("Do not use AOV token [ {} ] - " - "Redshift automatically append AOV name and " - "it doesn't make much sense with " - "Multipart EXR".format(prefix)) + cls.log.error(("Do not use AOV token [ {} ] - " + "Redshift is using image prefixes per AOV so " + "it doesn't make much sense using it in global" + "image prefix").format(prefix)) + # get redshift AOVs + rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False) + for aov in rs_aovs: + aov_prefix = cmds.getAttr("{}.filePrefix".format(aov)) + # check their image prefix + if aov_prefix != cls.redshift_AOV_prefix: + cls.log.error(("AOV ({}) image prefix is not set " + "correctly {} != {}").format( + cmds.getAttr("{}.name".format(aov)), + cmds.getAttr("{}.filePrefix".format(aov)), + aov_prefix + )) + invalid = True + # get aov format + aov_ext = cmds.getAttr( + "{}.fileFormat".format(aov), asString=True) - elif renderer == "renderman": + default_ext = cmds.getAttr( + "redshiftOptions.imageFormat", asString=True) + + if default_ext != aov_ext: + cls.log.error(("AOV file format is not the same " + "as the one set globally " + "{} != {}").format(default_ext, + aov_ext)) + invalid = True + + if renderer == "renderman": file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") @@ -151,7 +188,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Wrong directory prefix [ {} ]".format( dir_prefix)) - else: + if renderer == "arnold": multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs") if multipart: if re.search(cls.R_AOV_TOKEN, prefix): @@ -177,6 +214,43 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Expecting padding of {} ( {} )".format( cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING)) + # load validation definitions from settings + validation_settings = ( + instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501 + "{}_render_attributes".format(renderer)) + ) + + # go through definitions and test if such node.attribute exists. + # if so, compare its value from the one required. + for attr, value in OrderedDict(validation_settings).items(): + # first get node of that type + cls.log.debug("{}: {}".format(attr, value)) + node_type = attr.split(".")[0] + attribute_name = ".".join(attr.split(".")[1:]) + nodes = cmds.ls(type=node_type) + + if not isinstance(nodes, list): + cls.log.warning("No nodes of '{}' found.".format(node_type)) + continue + + for node in nodes: + try: + render_value = cmds.getAttr( + "{}.{}".format(node, attribute_name)) + except RuntimeError: + invalid = True + cls.log.error( + "Cannot get value of {}.{}".format( + node, attribute_name)) + else: + if value != render_value: + invalid = True + cls.log.error( + ("Invalid value {} set on {}.{}. " + "Expecting {}").format( + render_value, node, attribute_name, value) + ) + return invalid @classmethod @@ -210,3 +284,29 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cmds.setAttr("rmanGlobals.imageOutputDir", cls.RendermanDirPrefix, type="string") + + if renderer == "vray": + vray_settings = cmds.ls(type="VRaySettingsNode") + if not vray_settings: + node = cmds.createNode("VRaySettingsNode") + else: + node = vray_settings[0] + + cmds.setAttr( + "{}.fileNameRenderElementSeparator".format( + node), + "_" + ) + + if renderer == "redshift": + # get redshift AOVs + rs_aovs = cmds.ls(type="RedshiftAOV", referencedNodes=False) + for aov in rs_aovs: + # fix AOV prefixes + cmds.setAttr( + "{}.filePrefix".format(aov), cls.redshift_AOV_prefix) + # fix AOV file format + default_ext = cmds.getAttr( + "redshiftOptions.imageFormat", asString=True) + cmds.setAttr( + "{}.fileFormat".format(aov), default_ext) diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index d556a89fa3..6d27c66882 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -10,7 +10,6 @@ print("starting OpenPype usersetup") settings = get_project_settings(os.environ['AVALON_PROJECT']) shelf_preset = settings['maya'].get('project_shelf') - if shelf_preset: project = os.environ["AVALON_PROJECT"] @@ -23,7 +22,7 @@ if shelf_preset: print(import_string) exec(import_string) -cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") + cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") print("finished OpenPype usersetup") diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index c80507e7ea..bd7a95f916 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -106,7 +106,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( instance, old_value, new_value)) - from avalon.api.nuke import ( + from avalon.nuke import ( viewer_update_and_undo_stop, add_publish_knob ) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 34337f726f..ea6476485b 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1,6 +1,8 @@ import os import re import sys +import six +import platform from collections import OrderedDict @@ -19,7 +21,6 @@ from openpype.api import ( get_hierarchy, get_asset, get_current_project_settings, - config, ApplicationManager ) @@ -29,36 +30,34 @@ from .utils import set_context_favorites log = Logger().get_logger(__name__) -self = sys.modules[__name__] -self._project = None -self.workfiles_launched = False -self._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") +opnl = sys.modules[__name__] +opnl._project = None +opnl.project_name = os.getenv("AVALON_PROJECT") +opnl.workfiles_launched = False +opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") -def get_node_imageio_setting(**kwarg): +def get_created_node_imageio_setting(**kwarg): ''' Get preset data for dataflow (fileType, compression, bitDepth) ''' - log.info(kwarg) - host = str(kwarg.get("host", "nuke")) + log.debug(kwarg) nodeclass = kwarg.get("nodeclass", None) creator = kwarg.get("creator", None) - project_name = os.getenv("AVALON_PROJECT") - assert any([host, nodeclass]), nuke.message( + assert any([creator, nodeclass]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) - imageio_nodes = (get_anatomy_settings(project_name) - ["imageio"] - .get(host, None) - ["nodes"] - ["requiredNodes"] - ) + imageio = get_anatomy_settings(opnl.project_name)["imageio"] + imageio_nodes = imageio["nuke"]["nodes"]["requiredNodes"] + imageio_node = None for node in imageio_nodes: log.info(node) - if node["nukeNodeClass"] == nodeclass: - if creator in node["plugins"]: - imageio_node = node + if (node["nukeNodeClass"] != nodeclass) and ( + creator not in node["plugins"]): + continue + + imageio_node = node log.info("ImageIO node: {}".format(imageio_node)) return imageio_node @@ -67,12 +66,9 @@ def get_node_imageio_setting(**kwarg): def get_imageio_input_colorspace(filename): ''' Get input file colorspace based on regex in settings. ''' - imageio_regex_inputs = (get_anatomy_settings(os.getenv("AVALON_PROJECT")) - ["imageio"] - ["nuke"] - ["regexInputs"] - ["inputs"] - ) + imageio_regex_inputs = ( + get_anatomy_settings(opnl.project_name) + ["imageio"]["nuke"]["regexInputs"]["inputs"]) preset_clrsp = None for regexInput in imageio_regex_inputs: @@ -104,40 +100,39 @@ def check_inventory_versions(): """ # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): - if each.Class() == 'Read': - container = avalon.nuke.parse_container(each) + container = avalon.nuke.parse_container(each) - if container: - node = nuke.toNode(container["objectName"]) - avalon_knob_data = avalon.nuke.read( - node) + if container: + node = nuke.toNode(container["objectName"]) + avalon_knob_data = avalon.nuke.read( + node) - # get representation from io - representation = io.find_one({ - "type": "representation", - "_id": io.ObjectId(avalon_knob_data["representation"]) - }) + # get representation from io + representation = io.find_one({ + "type": "representation", + "_id": io.ObjectId(avalon_knob_data["representation"]) + }) - # Get start frame from version data - version = io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + # Get start frame from version data + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) - # get all versions in list - versions = io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') - max_version = max(versions) + max_version = max(versions) - # check the available version and do match - # change color of node if not max verion - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) - else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) + # check the available version and do match + # change color of node if not max verion + if version.get("name") not in [max_version]: + node["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + node["tile_color"].setValue(int("0x4ecd25ff", 16)) def writes_version_sync(): @@ -153,34 +148,33 @@ def writes_version_sync(): except Exception: return - for each in nuke.allNodes(): - if each.Class() == 'Write': - # check if the node is avalon tracked - if self._node_tab_name not in each.knobs(): + for each in nuke.allNodes(filter="Write"): + # check if the node is avalon tracked + if opnl._node_tab_name not in each.knobs(): + continue + + avalon_knob_data = avalon.nuke.read( + each) + + try: + if avalon_knob_data['families'] not in ["render"]: + log.debug(avalon_knob_data['families']) continue - avalon_knob_data = avalon.nuke.read( - each) + node_file = each['file'].value() - try: - if avalon_knob_data['families'] not in ["render"]: - log.debug(avalon_knob_data['families']) - continue + node_version = "v" + get_version_from_path(node_file) + log.debug("node_version: {}".format(node_version)) - node_file = each['file'].value() - - node_version = "v" + get_version_from_path(node_file) - log.debug("node_version: {}".format(node_version)) - - node_new_file = node_file.replace(node_version, new_version) - each['file'].setValue(node_new_file) - if not os.path.isdir(os.path.dirname(node_new_file)): - log.warning("Path does not exist! I am creating it.") - os.makedirs(os.path.dirname(node_new_file)) - except Exception as e: - log.warning( - "Write node: `{}` has no version in path: {}".format( - each.name(), e)) + node_new_file = node_file.replace(node_version, new_version) + each['file'].setValue(node_new_file) + if not os.path.isdir(os.path.dirname(node_new_file)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(node_new_file)) + except Exception as e: + log.warning( + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -201,24 +195,22 @@ def check_subsetname_exists(nodes, subset_name): Returns: bool: True of False """ - result = next((True for n in nodes - if subset_name in avalon.nuke.read(n).get("subset", "")), False) - return result + return next((True for n in nodes + if subset_name in avalon.nuke.read(n).get("subset", "")), + False) def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' - data = dict() - data['avalon'] = avalon.nuke.read( - node) - + data = {'avalon': avalon.nuke.read(node)} data_preset = { - "class": data['avalon']['family'], - "preset": data['avalon']['families'] + "nodeclass": data['avalon']['family'], + "families": [data['avalon']['families']], + "creator": data['avalon']['creator'] } - nuke_imageio_writes = get_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) application = lib.get_application(os.environ["AVALON_APP_NAME"]) data.update({ @@ -324,7 +316,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): node (obj): group node with avalon data as Knobs ''' - imageio_writes = get_node_imageio_setting(**data) + imageio_writes = get_created_node_imageio_setting(**data) app_manager = ApplicationManager() app_name = os.environ.get("AVALON_APP_NAME") if app_name: @@ -367,8 +359,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): # adding dataflow template log.debug("imageio_writes: `{}`".format(imageio_writes)) for knob in imageio_writes["knobs"]: - if knob["name"] not in ["_id", "_previous"]: - _data.update({knob["name"]: knob["value"]}) + _data.update({knob["name"]: knob["value"]}) _data = anlib.fix_data_for_node_create(_data) @@ -382,21 +373,16 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): prev_node = None with GN: - connections = list() if input: + input_name = str(input.name()).replace(" ", "") # if connected input node was defined - connections.append({ - "node": input, - "inputName": input.name()}) prev_node = nuke.createNode( - "Input", "name {}".format(input.name())) - prev_node.hideControlPanel() + "Input", "name {}".format(input_name)) else: # generic input node connected to nothing prev_node = nuke.createNode( "Input", "name {}".format("rgba")) - prev_node.hideControlPanel() - + prev_node.hideControlPanel() # creating pre-write nodes `prenodes` if prenodes: for name, klass, properties, set_output_to in prenodes: @@ -425,18 +411,12 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): input_node = nuke.createNode( "Input", "name {}".format(node_name)) input_node.hideControlPanel() - connections.append({ - "node": nuke.toNode(node_name), - "inputName": node_name}) now_node.setInput(1, input_node) elif isinstance(set_output_to, str): input_node = nuke.createNode( "Input", "name {}".format(node_name)) input_node.hideControlPanel() - connections.append({ - "node": nuke.toNode(set_output_to), - "inputName": set_output_to}) now_node.setInput(0, input_node) else: @@ -506,7 +486,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): add_deadline_tab(GN) # open the our Tab as default - GN[self._node_tab_name].setFlag(0) + GN[opnl._node_tab_name].setFlag(0) # set tile color tile_color = _data.get("tile_color", "0xff0000ff") @@ -629,7 +609,7 @@ class WorkfileSettings(object): root_node=None, nodes=None, **kwargs): - self._project = kwargs.get( + opnl._project = kwargs.get( "project") or io.find_one({"type": "project"}) self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"] self._asset_entity = get_asset(self._asset) @@ -672,7 +652,7 @@ class WorkfileSettings(object): ] erased_viewers = [] - for v in [n for n in nuke.allNodes(filter="Viewer")]: + for v in nuke.allNodes(filter="Viewer"): v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"])) if str(viewer_dict["viewerProcess"]) \ not in v['viewerProcess'].value(): @@ -716,7 +696,7 @@ class WorkfileSettings(object): log.error(msg) nuke.message(msg) - log.debug(">> root_dict: {}".format(root_dict)) + log.warning(">> root_dict: {}".format(root_dict)) # first set OCIO if self._root_node["colorManagement"].value() \ @@ -738,41 +718,41 @@ class WorkfileSettings(object): # third set ocio custom path if root_dict.get("customOCIOConfigPath"): - self._root_node["customOCIOConfigPath"].setValue( - str(root_dict["customOCIOConfigPath"]).format( - **os.environ - ).replace("\\", "/") - ) - log.debug("nuke.root()['{}'] changed to: {}".format( - "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) - root_dict.pop("customOCIOConfigPath") + unresolved_path = root_dict["customOCIOConfigPath"] + ocio_paths = unresolved_path[platform.system().lower()] + + resolved_path = None + for ocio_p in ocio_paths: + resolved_path = str(ocio_p).format(**os.environ) + if not os.path.exists(resolved_path): + continue + + if resolved_path: + self._root_node["customOCIOConfigPath"].setValue( + str(resolved_path).replace("\\", "/") + ) + log.debug("nuke.root()['{}'] changed to: {}".format( + "customOCIOConfigPath", resolved_path)) + root_dict.pop("customOCIOConfigPath") # then set the rest for knob, value in root_dict.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value, dict): + continue if self._root_node[knob].value() not in value: self._root_node[knob].setValue(str(value)) log.debug("nuke.root()['{}'] changed to: {}".format( knob, value)) - def set_writes_colorspace(self, write_dict): + def set_writes_colorspace(self): ''' Adds correct colorspace to write node dict - Arguments: - write_dict (dict): nuke write node as dictionary - ''' - # scene will have fixed colorspace following presets for the project - if not isinstance(write_dict, dict): - msg = "set_root_colorspace(): argument should be dictionary" - log.error(msg) - return - from avalon.nuke import read - for node in nuke.allNodes(): - - if node.Class() in ["Viewer", "Dot"]: - continue + for node in nuke.allNodes(filter="Group"): # get data from avalon knob avalon_knob_data = read(node) @@ -788,49 +768,63 @@ class WorkfileSettings(object): if avalon_knob_data.get("families"): families.append(avalon_knob_data.get("families")) - # except disabled nodes but exclude backdrops in test - for fmly, knob in write_dict.items(): - write = None - if (fmly in families): - # Add all nodes in group instances. - if node.Class() == "Group": - node.begin() - for x in nuke.allNodes(): - if x.Class() == "Write": - write = x - node.end() - elif node.Class() == "Write": - write = node - else: - log.warning("Wrong write node Class") + data_preset = { + "nodeclass": avalon_knob_data["family"], + "families": families, + "creator": avalon_knob_data['creator'] + } - write["colorspace"].setValue(str(knob["colorspace"])) - log.info( - "Setting `{0}` to `{1}`".format( - write.name(), - knob["colorspace"])) + nuke_imageio_writes = get_created_node_imageio_setting( + **data_preset) - def set_reads_colorspace(self, reads): + log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes)) + + if not nuke_imageio_writes: + return + + write_node = None + + # get into the group node + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write_node = x + node.end() + + if not write_node: + return + + # write all knobs to node + for knob in nuke_imageio_writes["knobs"]: + value = knob["value"] + if isinstance(value, six.text_type): + value = str(value) + if str(value).startswith("0x"): + value = int(value, 16) + + write_node[knob["name"]].setValue(value) + + + def set_reads_colorspace(self, read_clrs_inputs): """ Setting colorspace to Read nodes Looping trought all read nodes and tries to set colorspace based on regex rules in presets """ - changes = dict() + changes = {} for n in nuke.allNodes(): file = nuke.filename(n) - if not n.Class() == "Read": + if n.Class() != "Read": continue - # load nuke presets for Read's colorspace - read_clrs_presets = config.get_init_presets()["colorspace"].get( - "nuke", {}).get("read", {}) - # check if any colorspace presets for read is mathing - preset_clrsp = next((read_clrs_presets[k] - for k in read_clrs_presets - if bool(re.search(k, file))), - None) + preset_clrsp = None + + for input in read_clrs_inputs: + if not bool(re.search(input["regex"], file)): + continue + preset_clrsp = input["colorspace"] + log.debug(preset_clrsp) if preset_clrsp is not None: current = n["colorspace"].value() @@ -864,13 +858,15 @@ class WorkfileSettings(object): def set_colorspace(self): ''' Setting colorpace following presets ''' - nuke_colorspace = config.get_init_presets( - )["colorspace"].get("nuke", None) + # get imageio + imageio = get_anatomy_settings(opnl.project_name)["imageio"] + nuke_colorspace = imageio["nuke"] try: - self.set_root_colorspace(nuke_colorspace["root"]) + self.set_root_colorspace(nuke_colorspace["workfile"]) except AttributeError: - msg = "set_colorspace(): missing `root` settings in template" + msg = "set_colorspace(): missing `workfile` settings in template" + nuke.message(msg) try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) @@ -880,15 +876,14 @@ class WorkfileSettings(object): log.error(msg) try: - self.set_writes_colorspace(nuke_colorspace["write"]) - except AttributeError: - msg = "set_colorspace(): missing `write` settings in template" - nuke.message(msg) - log.error(msg) + self.set_writes_colorspace() + except AttributeError as _error: + nuke.message(_error) + log.error(_error) - reads = nuke_colorspace.get("read") - if reads: - self.set_reads_colorspace(reads) + read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", []) + if read_clrs_inputs: + self.set_reads_colorspace(read_clrs_inputs) try: for key in nuke_colorspace: @@ -1070,15 +1065,14 @@ class WorkfileSettings(object): def set_favorites(self): work_dir = os.getenv("AVALON_WORKDIR") asset = os.getenv("AVALON_ASSET") - project = os.getenv("AVALON_PROJECT") favorite_items = OrderedDict() # project # get project's root and split to parts projects_root = os.path.normpath(work_dir.split( - project)[0]) + opnl.project_name)[0]) # add project name - project_dir = os.path.join(projects_root, project) + "/" + project_dir = os.path.join(projects_root, opnl.project_name) + "/" # add to favorites favorite_items.update({"Project dir": project_dir.replace("\\", "/")}) @@ -1128,13 +1122,13 @@ def get_write_node_template_attr(node): data['avalon'] = avalon.nuke.read( node) data_preset = { - "class": data['avalon']['family'], - "families": data['avalon']['families'], - "preset": data['avalon']['families'] # omit < 2.0.0v + "nodeclass": data['avalon']['family'], + "families": [data['avalon']['families']], + "creator": data['avalon']['creator'] } # get template data - nuke_imageio_writes = get_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) # collecting correct data correct_data = OrderedDict({ @@ -1230,8 +1224,7 @@ class ExporterReview: """ anlib.reset_selection() ipn_orig = None - for v in [n for n in nuke.allNodes() - if "Viewer" == n.Class()]: + for v in nuke.allNodes(filter="Viewer"): ip = v['input_process'].getValue() ipn = v['input_process_node'].getValue() if "VIEWER_INPUT" not in ipn and ip: @@ -1644,8 +1637,8 @@ def launch_workfiles_app(): if not open_at_start: return - if not self.workfiles_launched: - self.workfiles_launched = True + if not opnl.workfiles_launched: + opnl.workfiles_launched = True workfiles.show(os.environ["AVALON_WORKDIR"]) diff --git a/openpype/hosts/nuke/api/menu.py b/openpype/hosts/nuke/api/menu.py index 2317066528..021ea04159 100644 --- a/openpype/hosts/nuke/api/menu.py +++ b/openpype/hosts/nuke/api/menu.py @@ -26,9 +26,9 @@ def install(): menu.addCommand( name, workfiles.show, - index=(rm_item[0]) + index=2 ) - + menu.addSeparator(index=3) # replace reset resolution from avalon core to pype's name = "Reset Resolution" new_name = "Set Resolution" @@ -63,16 +63,7 @@ def install(): # add colorspace menu item name = "Set Colorspace" menu.addCommand( - name, lambda: WorkfileSettings().set_colorspace(), - index=(rm_item[0] + 2) - ) - log.debug("Adding menu item: {}".format(name)) - - # add workfile builder menu item - name = "Build Workfile" - menu.addCommand( - name, lambda: BuildWorkfile().process(), - index=(rm_item[0] + 7) + name, lambda: WorkfileSettings().set_colorspace() ) log.debug("Adding menu item: {}".format(name)) @@ -80,11 +71,20 @@ def install(): name = "Apply All Settings" menu.addCommand( name, - lambda: WorkfileSettings().set_context_settings(), - index=(rm_item[0] + 3) + lambda: WorkfileSettings().set_context_settings() ) log.debug("Adding menu item: {}".format(name)) + menu.addSeparator() + + # add workfile builder menu item + name = "Build Workfile" + menu.addCommand( + name, lambda: BuildWorkfile().process() + ) + log.debug("Adding menu item: {}".format(name)) + + # adding shortcuts add_shortcuts_from_presets() diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 38d1a0c2ed..6e1a2ddd96 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -77,10 +77,14 @@ class CreateWritePrerender(plugin.PypeCreator): write_data = { "nodeclass": self.n_class, "families": [self.family], - "avalon": self.data, - "creator": self.__class__.__name__ + "avalon": self.data } + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + if self.presets.get('fpath_template'): self.log.info("Adding template path from preset") write_data.update( diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 72f851f19c..04983e9c75 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -80,10 +80,14 @@ class CreateWriteRender(plugin.PypeCreator): write_data = { "nodeclass": self.n_class, "families": [self.family], - "avalon": self.data, - "creator": self.__class__.__name__ + "avalon": self.data } + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + if self.presets.get('fpath_template'): self.log.info("Adding template path from preset") write_data.update( diff --git a/openpype/hosts/nuke/plugins/load/load_mov.py b/openpype/hosts/nuke/plugins/load/load_mov.py index 92726913af..e2c9acaa9c 100644 --- a/openpype/hosts/nuke/plugins/load/load_mov.py +++ b/openpype/hosts/nuke/plugins/load/load_mov.py @@ -1,6 +1,5 @@ import nuke -import contextlib - +from avalon.vendor import qargparse from avalon import api, io from openpype.api import get_current_project_settings from openpype.hosts.nuke.api.lib import ( @@ -8,41 +7,6 @@ from openpype.hosts.nuke.api.lib import ( ) -@contextlib.contextmanager -def preserve_trim(node): - """Preserve the relative trim of the Loader tool. - - This tries to preserve the loader's trim (trim in and trim out) after - the context by reapplying the "amount" it trims on the clip's length at - start and end. - - """ - # working script frame range - script_start = nuke.root()["first_frame"].value() - - start_at_frame = None - offset_frame = None - if node['frame_mode'].value() == "start at": - start_at_frame = node['frame'].value() - if node['frame_mode'].value() == "offset": - offset_frame = node['frame'].value() - - try: - yield - finally: - if start_at_frame: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start)) - print("start frame of Read was set to" - "{}".format(script_start)) - - if offset_frame: - node['frame_mode'].setValue("offset") - node['frame'].setValue(str((script_start + offset_frame))) - print("start frame of Read was set to" - "{}".format(script_start)) - - def add_review_presets_config(): returning = { "families": list(), @@ -79,14 +43,30 @@ class LoadMov(api.Loader): script_start = nuke.root()["first_frame"].value() + # options gui + defaults = { + "start_at_workfile": True + } + + options = [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=True + ) + ] + node_name_template = "{class_name}_{ext}" - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options): from avalon.nuke import ( containerise, viewer_update_and_undo_stop ) + start_at_workfile = options.get( + "start_at_workfile", self.defaults["start_at_workfile"]) + version = context['version'] version_data = version.get("data", {}) repr_id = context["representation"]["_id"] @@ -135,22 +115,28 @@ class LoadMov(api.Loader): read_name = self.node_name_template.format(**name_data) - # Create the Loader with the filename path set + read_node = nuke.createNode( + "Read", + "name {}".format(read_name) + ) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing with viewer_update_and_undo_stop(): - read_node = nuke.createNode( - "Read", - "name {}".format(read_name) - ) read_node["file"].setValue(file) read_node["origfirst"].setValue(first) read_node["first"].setValue(first) read_node["origlast"].setValue(last) read_node["last"].setValue(last) - - # start at script start read_node['frame_mode'].setValue("start at") - read_node['frame'].setValue(str(self.script_start)) + + if start_at_workfile: + # start at workfile start + read_node['frame'].setValue(str(self.script_start)) + else: + # start at version frame start + read_node['frame'].setValue(str(orig_first - handle_start)) if colorspace: read_node["colorspace"].setValue(str(colorspace)) @@ -264,29 +250,29 @@ class LoadMov(api.Loader): # create handles offset (only to last, because of mov) last += handle_start + handle_end - # Update the loader's path whilst preserving some values - with preserve_trim(read_node): - read_node["file"].setValue(file) - self.log.info("__ node['file']: {}".format( - read_node["file"].value())) + read_node["file"].setValue(file) - # Set the global in to the start frame of the sequence - read_node["origfirst"].setValue(first) - read_node["first"].setValue(first) - read_node["origlast"].setValue(last) - read_node["last"].setValue(last) + # Set the global in to the start frame of the sequence + read_node["origfirst"].setValue(first) + read_node["first"].setValue(first) + read_node["origlast"].setValue(last) + read_node["last"].setValue(last) + read_node['frame_mode'].setValue("start at") - # start at script start - read_node['frame_mode'].setValue("start at") + if int(self.script_start) == int(read_node['frame'].value()): + # start at workfile start read_node['frame'].setValue(str(self.script_start)) + else: + # start at version frame start + read_node['frame'].setValue(str(orig_first - handle_start)) - if colorspace: - read_node["colorspace"].setValue(str(colorspace)) + if colorspace: + read_node["colorspace"].setValue(str(colorspace)) - preset_clrsp = get_imageio_input_colorspace(file) + preset_clrsp = get_imageio_input_colorspace(file) - if preset_clrsp is not None: - read_node["colorspace"].setValue(preset_clrsp) + if preset_clrsp is not None: + read_node["colorspace"].setValue(preset_clrsp) updated_dict = {} updated_dict.update({ @@ -319,8 +305,8 @@ class LoadMov(api.Loader): from avalon.nuke import viewer_update_and_undo_stop - node = nuke.toNode(container['objectName']) - assert node.Class() == "Read", "Must be Read" + read_node = nuke.toNode(container['objectName']) + assert read_node.Class() == "Read", "Must be Read" with viewer_update_and_undo_stop(): - nuke.delete(node) + nuke.delete(read_node) diff --git a/openpype/hosts/nuke/plugins/load/load_sequence.py b/openpype/hosts/nuke/plugins/load/load_sequence.py index df7aa55cd1..9cbd1d4466 100644 --- a/openpype/hosts/nuke/plugins/load/load_sequence.py +++ b/openpype/hosts/nuke/plugins/load/load_sequence.py @@ -1,74 +1,11 @@ import nuke -import contextlib - +from avalon.vendor import qargparse from avalon import api, io from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace ) -@contextlib.contextmanager -def preserve_trim(node): - """Preserve the relative trim of the Loader tool. - - This tries to preserve the loader's trim (trim in and trim out) after - the context by reapplying the "amount" it trims on the clip's length at - start and end. - - """ - # working script frame range - script_start = nuke.root()["first_frame"].value() - - start_at_frame = None - offset_frame = None - if node['frame_mode'].value() == "start at": - start_at_frame = node['frame'].value() - if node['frame_mode'].value() == "offset": - offset_frame = node['frame'].value() - - try: - yield - finally: - if start_at_frame: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start)) - print("start frame of Read was set to" - "{}".format(script_start)) - - if offset_frame: - node['frame_mode'].setValue("offset") - node['frame'].setValue(str((script_start + offset_frame))) - print("start frame of Read was set to" - "{}".format(script_start)) - - -def loader_shift(node, frame, relative=False): - """Shift global in time by i preserving duration - - This moves the loader by i frames preserving global duration. When relative - is False it will shift the global in to the start frame. - - Args: - loader (tool): The fusion loader tool. - frame (int): The amount of frames to move. - relative (bool): When True the shift is relative, else the shift will - change the global in to frame. - - Returns: - int: The resulting relative frame change (how much it moved) - - """ - # working script frame range - script_start = nuke.root()["first_frame"].value() - - if relative: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start)) - else: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(frame)) - - class LoadSequence(api.Loader): """Load image sequence into Nuke""" @@ -80,14 +17,32 @@ class LoadSequence(api.Loader): icon = "file-video-o" color = "white" + script_start = nuke.root()["first_frame"].value() + + # option gui + defaults = { + "start_at_workfile": True + } + + options = [ + qargparse.Boolean( + "start_at_workfile", + help="Load at workfile start frame", + default=True + ) + ] + node_name_template = "{class_name}_{ext}" - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options): from avalon.nuke import ( containerise, viewer_update_and_undo_stop ) + start_at_workfile = options.get( + "start_at_workfile", self.defaults["start_at_workfile"]) + version = context['version'] version_data = version.get("data", {}) repr_id = context["representation"]["_id"] @@ -139,28 +94,31 @@ class LoadSequence(api.Loader): read_name = self.node_name_template.format(**name_data) # Create the Loader with the filename path set + read_node = nuke.createNode( + "Read", + "name {}".format(read_name)) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera - r = nuke.createNode( - "Read", - "name {}".format(read_name)) - r["file"].setValue(file) + read_node["file"].setValue(file) # Set colorspace defined in version data colorspace = context["version"]["data"].get("colorspace") if colorspace: - r["colorspace"].setValue(str(colorspace)) + read_node["colorspace"].setValue(str(colorspace)) preset_clrsp = get_imageio_input_colorspace(file) if preset_clrsp is not None: - r["colorspace"].setValue(preset_clrsp) + read_node["colorspace"].setValue(preset_clrsp) - loader_shift(r, first, relative=True) - r["origfirst"].setValue(int(first)) - r["first"].setValue(int(first)) - r["origlast"].setValue(int(last)) - r["last"].setValue(int(last)) + # set start frame depending on workfile or version + self.loader_shift(read_node, start_at_workfile) + read_node["origfirst"].setValue(int(first)) + read_node["first"].setValue(int(first)) + read_node["origlast"].setValue(int(last)) + read_node["last"].setValue(int(last)) # add additional metadata from the version to imprint Avalon knob add_keys = ["frameStart", "frameEnd", @@ -177,48 +135,20 @@ class LoadSequence(api.Loader): data_imprint.update({"objectName": read_name}) - r["tile_color"].setValue(int("0x4ecd25ff", 16)) + read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) if version_data.get("retime", None): speed = version_data.get("speed", 1) time_warp_nodes = version_data.get("timewarps", []) - self.make_retimes(r, speed, time_warp_nodes) + self.make_retimes(read_node, speed, time_warp_nodes) - return containerise(r, + return containerise(read_node, name=name, namespace=namespace, context=context, loader=self.__class__.__name__, data=data_imprint) - def make_retimes(self, node, speed, time_warp_nodes): - ''' Create all retime and timewarping nodes with coppied animation ''' - if speed != 1: - rtn = nuke.createNode( - "Retime", - "speed {}".format(speed)) - rtn["before"].setValue("continue") - rtn["after"].setValue("continue") - rtn["input.first_lock"].setValue(True) - rtn["input.first"].setValue( - self.handle_start + self.first_frame - ) - - if time_warp_nodes != []: - for timewarp in time_warp_nodes: - twn = nuke.createNode(timewarp["Class"], - "name {}".format(timewarp["name"])) - if isinstance(timewarp["lookup"], list): - # if array for animation - twn["lookup"].setAnimated() - for i, value in enumerate(timewarp["lookup"]): - twn["lookup"].setValueAt( - (self.first_frame + i) + value, - (self.first_frame + i)) - else: - # if static value `int` - twn["lookup"].setValue(timewarp["lookup"]) - def switch(self, container, representation): self.update(container, representation) @@ -235,9 +165,9 @@ class LoadSequence(api.Loader): update_container ) - node = nuke.toNode(container['objectName']) + read_node = nuke.toNode(container['objectName']) - assert node.Class() == "Read", "Must be Read" + assert read_node.Class() == "Read", "Must be Read" repr_cont = representation["context"] @@ -284,23 +214,23 @@ class LoadSequence(api.Loader): self.log.warning( "Missing start frame for updated version" "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + "{} ({})".format(read_node['name'].value(), representation)) first = 0 first -= self.handle_start last += self.handle_end - # Update the loader's path whilst preserving some values - with preserve_trim(node): - node["file"].setValue(file) - self.log.info("__ node['file']: {}".format(node["file"].value())) + read_node["file"].setValue(file) - # Set the global in to the start frame of the sequence - loader_shift(node, first, relative=True) - node["origfirst"].setValue(int(first)) - node["first"].setValue(int(first)) - node["origlast"].setValue(int(last)) - node["last"].setValue(int(last)) + # set start frame depending on workfile or version + self.loader_shift( + read_node, + bool("start at" in read_node['frame_mode'].value())) + + read_node["origfirst"].setValue(int(first)) + read_node["first"].setValue(int(first)) + read_node["origlast"].setValue(int(last)) + read_node["last"].setValue(int(last)) updated_dict = {} updated_dict.update({ @@ -317,20 +247,20 @@ class LoadSequence(api.Loader): "outputDir": version_data.get("outputDir"), }) - # change color of node + # change color of read_node if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) + read_node["tile_color"].setValue(int("0xd84f20ff", 16)) else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) + read_node["tile_color"].setValue(int("0x4ecd25ff", 16)) if version_data.get("retime", None): speed = version_data.get("speed", 1) time_warp_nodes = version_data.get("timewarps", []) - self.make_retimes(node, speed, time_warp_nodes) + self.make_retimes(read_node, speed, time_warp_nodes) # Update the imprinted representation update_container( - node, + read_node, updated_dict ) self.log.info("udated to version: {}".format(version.get("name"))) @@ -339,8 +269,48 @@ class LoadSequence(api.Loader): from avalon.nuke import viewer_update_and_undo_stop - node = nuke.toNode(container['objectName']) - assert node.Class() == "Read", "Must be Read" + read_node = nuke.toNode(container['objectName']) + assert read_node.Class() == "Read", "Must be Read" with viewer_update_and_undo_stop(): - nuke.delete(node) + nuke.delete(read_node) + + def make_retimes(self, speed, time_warp_nodes): + ''' Create all retime and timewarping nodes with coppied animation ''' + if speed != 1: + rtn = nuke.createNode( + "Retime", + "speed {}".format(speed)) + rtn["before"].setValue("continue") + rtn["after"].setValue("continue") + rtn["input.first_lock"].setValue(True) + rtn["input.first"].setValue( + self.handle_start + self.first_frame + ) + + if time_warp_nodes != []: + for timewarp in time_warp_nodes: + twn = nuke.createNode(timewarp["Class"], + "name {}".format(timewarp["name"])) + if isinstance(timewarp["lookup"], list): + # if array for animation + twn["lookup"].setAnimated() + for i, value in enumerate(timewarp["lookup"]): + twn["lookup"].setValueAt( + (self.first_frame + i) + value, + (self.first_frame + i)) + else: + # if static value `int` + twn["lookup"].setValue(timewarp["lookup"]) + + def loader_shift(self, read_node, workfile_start=False): + """ Set start frame of read node to a workfile start + + Args: + read_node (nuke.Node): The nuke's read node + workfile_start (bool): set workfile start frame if true + + """ + if workfile_start: + read_node['frame_mode'].setValue("start at") + read_node['frame'].setValue(str(self.script_start)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 9c7f1b5e95..4257ed3131 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -34,7 +34,8 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node instance.data["families"].append("slate") + instance.data["versionData"]["families"].append("slate") self.log.info( "Slate node is in node graph: `{}`".format(slate.name())) self.log.debug( - "__ instance: `{}`".format(instance)) + "__ instance.data: `{}`".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index 21b5ea9243..1df8502959 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -74,6 +74,9 @@ class CreateImage(openpype.api.Creator): for group in groups: long_names = [] + group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ + replace(stub.LOADED_ICON, '') + if group.long_name: for directory in group.long_name[::-1]: name = directory.replace(stub.PUBLISH_ICON, '').\ diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index 101e7bb572..aa4b2e7219 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -164,14 +164,26 @@ def create_media_pool_item(fpath: str, # try to search in bin if the clip does not exist existing_mpi = get_media_pool_item(fpath, root_bin) - if not existing_mpi: - media_pool_item = media_storage.AddItemsToMediaPool(fpath) - print(media_pool_item) - # pop the returned dict on first item as resolve data object is such - return media_pool_item.pop(1.0) - else: + if existing_mpi: return existing_mpi + dirname, file = os.path.split(fpath) + _name, ext = os.path.splitext(file) + + # add all data in folder to mediapool + media_pool_items = media_storage.AddItemListToMediaPool( + os.path.normpath(dirname)) + + if not media_pool_items: + return False + + # if any are added then look into them for the right extension + media_pool_item = [mpi for mpi in media_pool_items + if ext in mpi.GetClipProperty("File Path")] + + # return only first found + return media_pool_item.pop() + def get_media_pool_item(fpath, root: object = None) -> object: """ @@ -189,7 +201,7 @@ def get_media_pool_item(fpath, root: object = None) -> object: fname = os.path.basename(fpath) for _mpi in root.GetClipList(): - _mpi_name = _mpi.GetClipProperty("File Name")["File Name"] + _mpi_name = _mpi.GetClipProperty("File Name") _mpi_name = get_reformated_path(_mpi_name, first=True) if fname in _mpi_name: return _mpi @@ -215,8 +227,8 @@ def create_timeline_item(media_pool_item: object, # get all variables project = get_current_project() media_pool = project.GetMediaPool() - clip_property = media_pool_item.GetClipProperty() - clip_name = clip_property["File Name"] + _clip_property = media_pool_item.GetClipProperty + clip_name = _clip_property("File Name") timeline = timeline or get_current_timeline() # if timeline was used then switch it to current timeline @@ -231,7 +243,6 @@ def create_timeline_item(media_pool_item: object, clip_data.update({"endFrame": source_end}) print(clip_data) - print(clip_property) # add to timeline media_pool.AppendToTimeline([clip_data]) @@ -257,8 +268,8 @@ def get_timeline_item(media_pool_item: object, Returns: object: resolve.TimelineItem """ - clip_property = media_pool_item.GetClipProperty() - clip_name = clip_property["File Name"] + _clip_property = media_pool_item.GetClipProperty + clip_name = _clip_property("File Name") output_timeline_item = None timeline = timeline or get_current_timeline() @@ -267,8 +278,8 @@ def get_timeline_item(media_pool_item: object, for _ti_data in get_current_timeline_items(): _ti_clip = _ti_data["clip"]["item"] - _ti_clip_property = _ti_clip.GetMediaPoolItem().GetClipProperty() - if clip_name in _ti_clip_property["File Name"]: + _ti_clip_property = _ti_clip.GetMediaPoolItem().GetClipProperty + if clip_name in _ti_clip_property("File Name"): output_timeline_item = _ti_clip return output_timeline_item @@ -302,7 +313,7 @@ def get_current_timeline_items( selecting_color = selecting_color or "Chocolate" project = get_current_project() timeline = get_current_timeline() - selected_clips = list() + selected_clips = [] # get all tracks count filtered by track type selected_track_count = timeline.GetTrackCount(track_type) @@ -541,15 +552,15 @@ def create_compound_clip(clip_data, name, folder): clip_attributes = get_clip_attributes(clip_item) mp_item = clip_item.GetMediaPoolItem() - mp_props = mp_item.GetClipProperty() + _mp_props = mp_item.GetClipProperty - mp_first_frame = int(mp_props["Start"]) - mp_last_frame = int(mp_props["End"]) + mp_first_frame = int(_mp_props("Start")) + mp_last_frame = int(_mp_props("End")) # initialize basic source timing for otio ci_l_offset = clip_item.GetLeftOffset() ci_duration = clip_item.GetDuration() - rate = float(mp_props["FPS"]) + rate = float(_mp_props("FPS")) # source rational times mp_in_rc = opentime.RationalTime((ci_l_offset), rate) @@ -606,7 +617,7 @@ def create_compound_clip(clip_data, name, folder): cct.SetMetadata(self.pype_tag_name, clip_attributes) # reset start timecode of the compound clip - cct.SetClipProperty("Start TC", mp_props["Start TC"]) + cct.SetClipProperty("Start TC", _mp_props("Start TC")) # swap clips on timeline swap_clips(clip_item, cct, in_frame, out_frame) @@ -632,8 +643,8 @@ def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame): bool: True if successfully replaced """ - clip_prop = to_clip.GetClipProperty() - to_clip_name = clip_prop["File Name"] + _clip_prop = to_clip.GetClipProperty + to_clip_name = _clip_prop("File Name") # add clip item as take to timeline take = from_clip.AddTake( to_clip, @@ -698,7 +709,7 @@ def get_clip_attributes(clip): """ mp_item = clip.GetMediaPoolItem() - data = { + return { "clipIn": clip.GetStart(), "clipOut": clip.GetEnd(), "clipLeftOffset": clip.GetLeftOffset(), @@ -708,7 +719,6 @@ def get_clip_attributes(clip): "sourceId": mp_item.GetMediaId(), "sourceProperties": mp_item.GetClipProperty() } - return data def set_project_manager_to_folder_name(folder_name): @@ -840,12 +850,12 @@ def get_reformated_path(path, padded=False, first=False): get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr """ - num_pattern = r"(\[\d+\-\d+\])" - padding_pattern = r"(\d+)(?=-)" first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]") if "[" in path: + padding_pattern = r"(\d+)(?=-)" padding = len(re.findall(padding_pattern, path).pop()) + num_pattern = r"(\[\d+\-\d+\])" if padded: path = re.sub(num_pattern, f"%0{padding}d", path) elif first: diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 4712d0a8b9..f1c55a6180 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -379,9 +379,10 @@ class ClipLoader: # create mediaItem in active project bin # create clip media + media_pool_item = lib.create_media_pool_item( self.data["path"], self.active_bin) - clip_property = media_pool_item.GetClipProperty() + _clip_property = media_pool_item.GetClipProperty # get handles handle_start = self.data["versionData"].get("handleStart") @@ -391,10 +392,10 @@ class ClipLoader: if handle_end is None: handle_end = int(self.data["assetData"]["handleEnd"]) - source_in = int(clip_property["Start"]) - source_out = int(clip_property["End"]) + source_in = int(_clip_property("Start")) + source_out = int(_clip_property("End")) - if clip_property["Type"] == "Video": + if _clip_property("Type") == "Video": source_in += handle_start source_out -= handle_end @@ -420,8 +421,7 @@ class ClipLoader: # create clip media media_pool_item = lib.create_media_pool_item( self.data["path"], self.active_bin) - clip_property = media_pool_item.GetClipProperty() - clip_name = clip_property["File Name"] + _clip_property = media_pool_item.GetClipProperty # get handles handle_start = self.data["versionData"].get("handleStart") @@ -431,8 +431,8 @@ class ClipLoader: if handle_end is None: handle_end = int(self.data["assetData"]["handleEnd"]) - source_in = int(clip_property["Start"]) - source_out = int(clip_property["End"]) + source_in = int(_clip_property("Start")) + source_out = int(_clip_property("End")) resolve.swap_clips( timeline_item, @@ -783,6 +783,8 @@ class PublishClip: # add review track only to hero track if hero_track and self.review_layer: self.tag_data.update({"reviewTrack": self.review_layer}) + else: + self.tag_data.update({"reviewTrack": None}) def _solve_tag_hierarchy_data(self, hierarchy_formating_data): @@ -819,7 +821,7 @@ class PublishClip: def _create_parents(self): """ Create parents and return it in list. """ - self.parents = list() + self.parents = [] patern = re.compile(self.parents_search_patern) par_split = [patern.findall(t).pop() diff --git a/openpype/hosts/resolve/otio/davinci_export.py b/openpype/hosts/resolve/otio/davinci_export.py index 7912b1abd8..2c276d9888 100644 --- a/openpype/hosts/resolve/otio/davinci_export.py +++ b/openpype/hosts/resolve/otio/davinci_export.py @@ -33,8 +33,11 @@ def create_otio_time_range(start_frame, frame_duration, fps): def create_otio_reference(media_pool_item): metadata = _get_metadata_media_pool_item(media_pool_item) - mp_clip_property = media_pool_item.GetClipProperty() - path = mp_clip_property["File Path"] + print("media pool item: {}".format(media_pool_item.GetName())) + + _mp_clip_property = media_pool_item.GetClipProperty + + path = _mp_clip_property("File Path") reformat_path = utils.get_reformated_path(path, padded=True) padding = utils.get_padding_from_path(path) @@ -45,13 +48,12 @@ def create_otio_reference(media_pool_item): }) # get clip property regarding to type - mp_clip_property = media_pool_item.GetClipProperty() - fps = float(mp_clip_property["FPS"]) - if mp_clip_property["Type"] == "Video": - frame_start = int(mp_clip_property["Start"]) - frame_duration = int(mp_clip_property["Frames"]) + fps = float(_mp_clip_property("FPS")) + if _mp_clip_property("Type") == "Video": + frame_start = int(_mp_clip_property("Start")) + frame_duration = int(_mp_clip_property("Frames")) else: - audio_duration = str(mp_clip_property["Duration"]) + audio_duration = str(_mp_clip_property("Duration")) frame_start = 0 frame_duration = int(utils.timecode_to_frames( audio_duration, float(fps))) @@ -124,10 +126,10 @@ def create_otio_markers(track_item, fps): def create_otio_clip(track_item): media_pool_item = track_item.GetMediaPoolItem() - mp_clip_property = media_pool_item.GetClipProperty() + _mp_clip_property = media_pool_item.GetClipProperty if not self.project_fps: - fps = mp_clip_property["FPS"] + fps = float(_mp_clip_property("FPS")) else: fps = self.project_fps @@ -140,9 +142,9 @@ def create_otio_clip(track_item): fps ) - if mp_clip_property["Type"] == "Audio": + if _mp_clip_property("Type") == "Audio": return_clips = list() - audio_chanels = mp_clip_property["Audio Ch"] + audio_chanels = _mp_clip_property("Audio Ch") for channel in range(0, int(audio_chanels)): clip = otio.schema.Clip( name=f"{name}_{channel}", diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index e2e1c50365..e20384ee6c 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,7 +1,10 @@ from avalon import io, api from openpype.hosts import resolve from copy import deepcopy - +from importlib import reload +from openpype.hosts.resolve.api import lib, plugin +reload(plugin) +reload(lib) class LoadClip(resolve.TimelineItemLoader): """Load a subset to timeline as clip diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py index c38cbc4f73..95b891d95a 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_instances.py @@ -37,8 +37,16 @@ class PrecollectInstances(pyblish.api.ContextPlugin): continue media_pool_item = timeline_item.GetMediaPoolItem() - clip_property = media_pool_item.GetClipProperty() - self.log.debug(f"clip_property: {clip_property}") + source_duration = int(media_pool_item.GetClipProperty("Frames")) + + # solve handles length + handle_start = min( + tag_data["handleStart"], int(timeline_item.GetLeftOffset())) + handle_end = min( + tag_data["handleEnd"], int( + source_duration - timeline_item.GetRightOffset())) + + self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end)) # add tag data to instance data data.update({ @@ -60,7 +68,9 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "item": timeline_item, "families": families, "publish": resolve.get_publish_attribute(timeline_item), - "fps": context.data["fps"] + "fps": context.data["fps"], + "handleStart": handle_start, + "handleEnd": handle_end }) # otio clip data diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py new file mode 100644 index 0000000000..cfdbe890e5 --- /dev/null +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -0,0 +1,22 @@ +#! python3 +import avalon.api as avalon +import openpype +import openpype.hosts.resolve as bmdvr + + +def file_processing(fpath): + media_pool_item = bmdvr.create_media_pool_item(fpath) + print(media_pool_item) + + track_item = bmdvr.create_timeline_item(media_pool_item) + print(track_item) + + +if __name__ == "__main__": + path = "C:/CODE/__openpype_projects/jtest03dev/shots/sq01/mainsq01sh030/publish/plate/plateMain/v006/jt3d_mainsq01sh030_plateMain_v006.0996.exr" + + openpype.install() + # activate resolve from openpype + avalon.install(bmdvr) + + file_processing(path) \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_clear_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_clear_instances.py deleted file mode 100644 index 097e730251..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_clear_instances.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Optional: - instance.data["remove"] -> mareker for removing -""" -import pyblish.api - - -class CollectClearInstances(pyblish.api.InstancePlugin): - """Clear all marked instances""" - - order = pyblish.api.CollectorOrder + 0.4999 - label = "Clear Instances" - hosts = ["standalonepublisher"] - - def process(self, instance): - self.log.debug( - f"Instance: `{instance}` | " - f"families: `{instance.data['families']}`") - if instance.data.get("remove"): - self.log.info(f"Removing: {instance}") - instance.context.remove(instance) diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index cbc86f7b03..539cebe646 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -77,8 +77,9 @@ def set_context_settings(asset_doc=None): handle_start = handles handle_end = handles - frame_start -= int(handle_start) - frame_end += int(handle_end) + # Always start from 0 Mark In and set only Mark Out + mark_in = 0 + mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end - execute_george("tv_markin {} set".format(frame_start - 1)) - execute_george("tv_markout {} set".format(frame_end - 1)) + execute_george("tv_markin {} set".format(mark_in)) + execute_george("tv_markout {} set".format(mark_out)) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py new file mode 100644 index 0000000000..f291c363b8 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -0,0 +1,37 @@ +import pyblish.api + + +class CollectOutputFrameRange(pyblish.api.ContextPlugin): + """Collect frame start/end from context. + + When instances are collected context does not contain `frameStart` and + `frameEnd` keys yet. They are collected in global plugin + `CollectAvalonEntities`. + """ + label = "Collect output frame range" + order = pyblish.api.CollectorOrder + hosts = ["tvpaint"] + + def process(self, context): + for instance in context: + frame_start = instance.data.get("frameStart") + frame_end = instance.data.get("frameEnd") + if frame_start is not None and frame_end is not None: + self.log.debug( + "Instance {} already has set frames {}-{}".format( + str(instance), frame_start, frame_end + ) + ) + return + + frame_start = context.data.get("frameStart") + frame_end = context.data.get("frameEnd") + + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = frame_end + + self.log.info( + "Set frames {}-{} on instance {} ".format( + frame_start, frame_end, str(instance) + ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index cc236734e5..27bd8e9ede 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -86,9 +86,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance.data["publish"] = any_visible - instance.data["frameStart"] = context.data["sceneMarkIn"] + 1 - instance.data["frameEnd"] = context.data["sceneMarkOut"] + 1 - self.log.debug("Created instance: {}\n{}".format( instance, json.dumps(instance.data, indent=4) )) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 0d125a1a50..007b5c41f1 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -1,8 +1,6 @@ import os import shutil -import time import tempfile -import multiprocessing import pyblish.api from avalon.tvpaint import lib @@ -45,10 +43,64 @@ class ExtractSequence(pyblish.api.Extractor): ) family_lowered = instance.data["family"].lower() - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] + mark_in = instance.context.data["sceneMarkIn"] + mark_out = instance.context.data["sceneMarkOut"] + # Frame start/end may be stored as float + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) - filename_template = self._get_filename_template(frame_end) + # Handles are not stored per instance but on Context + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + + # --- Fallbacks ---------------------------------------------------- + # This is required if validations of ranges are ignored. + # - all of this code won't change processing if range to render + # match to range of expected output + + # Prepare output frames + output_frame_start = frame_start - handle_start + output_frame_end = frame_end + handle_end + + # Change output frame start to 0 if handles cause it's negative number + if output_frame_start < 0: + self.log.warning(( + "Frame start with handles has negative value." + " Changed to \"0\". Frames start: {}, Handle Start: {}" + ).format(frame_start, handle_start)) + output_frame_start = 0 + + # Check Marks range and output range + output_range = output_frame_end - output_frame_start + marks_range = mark_out - mark_in + + # Lower Mark Out if mark range is bigger than output + # - do not rendered not used frames + if output_range < marks_range: + new_mark_out = mark_out - (marks_range - output_range) + self.log.warning(( + "Lowering render range to {} frames. Changed Mark Out {} -> {}" + ).format(marks_range + 1, mark_out, new_mark_out)) + # Assign new mark out to variable + mark_out = new_mark_out + + # Lower output frame end so representation has right `frameEnd` value + elif output_range > marks_range: + new_output_frame_end = ( + output_frame_end - (output_range - marks_range) + ) + self.log.warning(( + "Lowering representation range to {} frames." + " Changed frame end {} -> {}" + ).format(output_range + 1, mark_out, new_mark_out)) + output_frame_end = new_output_frame_end + + # ------------------------------------------------------------------- + + filename_template = self._get_filename_template( + # Use the biggest number + max(mark_out, frame_end) + ) ext = os.path.splitext(filename_template)[1].replace(".", "") self.log.debug("Using file template \"{}\"".format(filename_template)) @@ -57,7 +109,9 @@ class ExtractSequence(pyblish.api.Extractor): output_dir = instance.data.get("stagingDir") if not output_dir: # Create temp folder if staging dir is not set - output_dir = tempfile.mkdtemp().replace("\\", "/") + output_dir = ( + tempfile.mkdtemp(prefix="tvpaint_render_") + ).replace("\\", "/") instance.data["stagingDir"] = output_dir self.log.debug( @@ -65,23 +119,36 @@ class ExtractSequence(pyblish.api.Extractor): ) if instance.data["family"] == "review": - repre_files, thumbnail_fullpath = self.render_review( - filename_template, output_dir, frame_start, frame_end + output_filenames, thumbnail_fullpath = self.render_review( + filename_template, output_dir, mark_in, mark_out ) else: # Render output - repre_files, thumbnail_fullpath = self.render( - filename_template, output_dir, frame_start, frame_end, + output_filenames, thumbnail_fullpath = self.render( + filename_template, output_dir, + mark_in, mark_out, filtered_layers ) + # Sequence of one frame + if not output_filenames: + self.log.warning("Extractor did not create any output.") + return + + repre_files = self._rename_output_files( + filename_template, output_dir, + mark_in, mark_out, + output_frame_start, output_frame_end + ) + # Fill tags and new families tags = [] if family_lowered in ("review", "renderlayer"): tags.append("review") # Sequence of one frame - if len(repre_files) == 1: + single_file = len(repre_files) == 1 + if single_file: repre_files = repre_files[0] new_repre = { @@ -89,10 +156,13 @@ class ExtractSequence(pyblish.api.Extractor): "ext": ext, "files": repre_files, "stagingDir": output_dir, - "frameStart": frame_start, - "frameEnd": frame_end, "tags": tags } + + if not single_file: + new_repre["frameStart"] = output_frame_start + new_repre["frameEnd"] = output_frame_end + self.log.debug("Creating new representation: {}".format(new_repre)) instance.data["representations"].append(new_repre) @@ -133,9 +203,45 @@ class ExtractSequence(pyblish.api.Extractor): return "{{frame:0>{}}}".format(frame_padding) + ".png" - def render_review( - self, filename_template, output_dir, frame_start, frame_end + def _rename_output_files( + self, filename_template, output_dir, + mark_in, mark_out, output_frame_start, output_frame_end ): + # Use differnet ranges based on Mark In and output Frame Start values + # - this is to make sure that filename renaming won't affect files that + # are not renamed yet + mark_start_is_less = bool(mark_in < output_frame_start) + if mark_start_is_less: + marks_range = range(mark_out, mark_in - 1, -1) + frames_range = range(output_frame_end, output_frame_start - 1, -1) + else: + # This is less possible situation as frame start will be in most + # cases higher than Mark In. + marks_range = range(mark_in, mark_out + 1) + frames_range = range(output_frame_start, output_frame_end + 1) + + repre_filepaths = [] + for mark, frame in zip(marks_range, frames_range): + new_filename = filename_template.format(frame=frame) + new_filepath = os.path.join(output_dir, new_filename) + + repre_filepaths.append(new_filepath) + + if mark != frame: + old_filename = filename_template.format(frame=mark) + old_filepath = os.path.join(output_dir, old_filename) + os.rename(old_filepath, new_filepath) + + # Reverse repre files order if output + if mark_start_is_less: + repre_filepaths = list(reversed(repre_filepaths)) + + return [ + os.path.basename(path) + for path in repre_filepaths + ] + + def render_review(self, filename_template, output_dir, mark_in, mark_out): """ Export images from TVPaint using `tv_savesequence` command. Args: @@ -144,8 +250,8 @@ class ExtractSequence(pyblish.api.Extractor): keyword argument `{frame}` or index argument (for same value). Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. - first_frame (int): Starting frame from which export will begin. - last_frame (int): On which frame export will end. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. Retruns: tuple: With 2 items first is list of filenames second is path to @@ -154,10 +260,8 @@ class ExtractSequence(pyblish.api.Extractor): self.log.debug("Preparing data for rendering.") first_frame_filepath = os.path.join( output_dir, - filename_template.format(frame=frame_start) + filename_template.format(frame=mark_in) ) - mark_in = frame_start - 1 - mark_out = frame_end - 1 george_script_lines = [ "tv_SaveMode \"PNG\"", @@ -170,13 +274,22 @@ class ExtractSequence(pyblish.api.Extractor): ] lib.execute_george_through_file("\n".join(george_script_lines)) - output = [] first_frame_filepath = None - for frame in range(frame_start, frame_end + 1): + output_filenames = [] + for frame in range(mark_in, mark_out + 1): filename = filename_template.format(frame=frame) - output.append(filename) + output_filenames.append(filename) + + filepath = os.path.join(output_dir, filename) + if not os.path.exists(filepath): + raise AssertionError( + "Output was not rendered. File was not found {}".format( + filepath + ) + ) + if first_frame_filepath is None: - first_frame_filepath = os.path.join(output_dir, filename) + first_frame_filepath = filepath thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") if first_frame_filepath and os.path.exists(first_frame_filepath): @@ -184,11 +297,10 @@ class ExtractSequence(pyblish.api.Extractor): thumbnail_obj = Image.new("RGB", source_img.size, (255, 255, 255)) thumbnail_obj.paste(source_img) thumbnail_obj.save(thumbnail_filepath) - return output, thumbnail_filepath - def render( - self, filename_template, output_dir, frame_start, frame_end, layers - ): + return output_filenames, thumbnail_filepath + + def render(self, filename_template, output_dir, mark_in, mark_out, layers): """ Export images from TVPaint. Args: @@ -197,8 +309,8 @@ class ExtractSequence(pyblish.api.Extractor): keyword argument `{frame}` or index argument (for same value). Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. - first_frame (int): Starting frame from which export will begin. - last_frame (int): On which frame export will end. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. layers (list): List of layers to be exported. Retruns: @@ -219,14 +331,11 @@ class ExtractSequence(pyblish.api.Extractor): # Sort layer positions in reverse order sorted_positions = list(reversed(sorted(layers_by_position.keys()))) if not sorted_positions: - return + return [], None self.log.debug("Collecting pre/post behavior of individual layers.") behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids) - mark_in_index = frame_start - 1 - mark_out_index = frame_end - 1 - tmp_filename_template = "pos_{pos}." + filename_template files_by_position = {} @@ -239,25 +348,47 @@ class ExtractSequence(pyblish.api.Extractor): tmp_filename_template, output_dir, behavior, - mark_in_index, - mark_out_index + mark_in, + mark_out ) - files_by_position[position] = files_by_frames + if files_by_frames: + files_by_position[position] = files_by_frames + else: + self.log.warning(( + "Skipped layer \"{}\". Probably out of Mark In/Out range." + ).format(layer["name"])) + + if not files_by_position: + layer_names = set(layer["name"] for layer in layers) + joined_names = ", ".join( + ["\"{}\"".format(name) for name in layer_names] + ) + self.log.warning( + "Layers {} do not have content in range {} - {}".format( + joined_names, mark_in, mark_out + ) + ) + return [], None output_filepaths = self._composite_files( files_by_position, - mark_in_index, - mark_out_index, + mark_in, + mark_out, filename_template, output_dir ) self._cleanup_tmp_files(files_by_position) - thumbnail_src_filepath = None - thumbnail_filepath = None - if output_filepaths: - thumbnail_src_filepath = tuple(sorted(output_filepaths))[0] + output_filenames = [ + os.path.basename(filepath) + for filepath in output_filepaths + ] + thumbnail_src_filepath = None + if output_filepaths: + thumbnail_src_filepath = output_filepaths[0] + + thumbnail_filepath = None if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath): source_img = Image.open(thumbnail_src_filepath) thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") @@ -265,11 +396,7 @@ class ExtractSequence(pyblish.api.Extractor): thumbnail_obj.paste(source_img) thumbnail_obj.save(thumbnail_filepath) - repre_files = [ - os.path.basename(path) - for path in output_filepaths - ] - return repre_files, thumbnail_filepath + return output_filenames, thumbnail_filepath def _render_layer( self, @@ -283,6 +410,22 @@ class ExtractSequence(pyblish.api.Extractor): layer_id = layer["layer_id"] frame_start_index = layer["frame_start"] frame_end_index = layer["frame_end"] + + pre_behavior = behavior["pre"] + post_behavior = behavior["post"] + + # Check if layer is before mark in + if frame_end_index < mark_in_index: + # Skip layer if post behavior is "none" + if post_behavior == "none": + return {} + + # Check if layer is after mark out + elif frame_start_index > mark_out_index: + # Skip layer if pre behavior is "none" + if pre_behavior == "none": + return {} + exposure_frames = lib.get_exposure_frames( layer_id, frame_start_index, frame_end_index ) @@ -341,8 +484,6 @@ class ExtractSequence(pyblish.api.Extractor): self.log.debug("Filled frames {}".format(str(_debug_filled_frames))) # Fill frames by pre/post behavior of layer - pre_behavior = behavior["pre"] - post_behavior = behavior["post"] self.log.debug(( "Completing image sequence of layer by pre/post behavior." " PRE: {} | POST: {}" @@ -530,17 +671,12 @@ class ExtractSequence(pyblish.api.Extractor): filepath = position_data[frame_idx] images_by_frame[frame_idx].append(filepath) - process_count = os.cpu_count() - if process_count > 1: - process_count -= 1 - - processes = {} output_filepaths = [] missing_frame_paths = [] random_frame_path = None for frame_idx in sorted(images_by_frame.keys()): image_filepaths = images_by_frame[frame_idx] - output_filename = filename_template.format(frame=frame_idx + 1) + output_filename = filename_template.format(frame=frame_idx) output_filepath = os.path.join(output_dir, output_filename) output_filepaths.append(output_filepath) @@ -553,45 +689,15 @@ class ExtractSequence(pyblish.api.Extractor): if len(image_filepaths) == 1: os.rename(image_filepaths[0], output_filepath) - # Prepare process for compositing of images + # Composite images else: - processes[frame_idx] = multiprocessing.Process( - target=composite_images, - args=(image_filepaths, output_filepath) - ) + composite_images(image_filepaths, output_filepath) # Store path of random output image that will 100% exist after all # multiprocessing as mockup for missing frames if random_frame_path is None: random_frame_path = output_filepath - self.log.info( - "Running {} compositing processes - this mey take a while.".format( - len(processes) - ) - ) - # Wait until all compositing processes are done - running_processes = {} - while True: - for idx in tuple(running_processes.keys()): - process = running_processes[idx] - if not process.is_alive(): - running_processes.pop(idx).join() - - if processes and len(running_processes) != process_count: - indexes = list(processes.keys()) - for _ in range(process_count - len(running_processes)): - if not indexes: - break - idx = indexes.pop(0) - running_processes[idx] = processes.pop(idx) - running_processes[idx].start() - - if not running_processes and not processes: - break - - time.sleep(0.01) - self.log.debug( "Creating transparent images for frames without render {}.".format( str(missing_frame_paths) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 73486d1005..e2ef81e4a4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -14,37 +14,54 @@ class ValidateMarksRepair(pyblish.api.Action): def process(self, context, plugin): expected_data = ValidateMarks.get_expected_data(context) - expected_data["markIn"] -= 1 - expected_data["markOut"] -= 1 - - lib.execute_george("tv_markin {} set".format(expected_data["markIn"])) + lib.execute_george( + "tv_markin {} set".format(expected_data["markIn"]) + ) lib.execute_george( "tv_markout {} set".format(expected_data["markOut"]) ) class ValidateMarks(pyblish.api.ContextPlugin): - """Validate mark in and out are enabled.""" + """Validate mark in and out are enabled and it's duration. - label = "Validate Marks" + Mark In/Out does not have to match frameStart and frameEnd but duration is + important. + """ + + label = "Validate Mark In/Out" order = pyblish.api.ValidatorOrder optional = True actions = [ValidateMarksRepair] @staticmethod def get_expected_data(context): + scene_mark_in = context.data["sceneMarkIn"] + + # Data collected in `CollectAvalonEntities` + frame_end = context.data["frameEnd"] + frame_start = context.data["frameStart"] + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + + # Calculate expeted Mark out (Mark In + duration - 1) + expected_mark_out = ( + scene_mark_in + + (frame_end - frame_start) + + handle_start + handle_end + ) return { - "markIn": int(context.data["frameStart"]), + "markIn": scene_mark_in, "markInState": True, - "markOut": int(context.data["frameEnd"]), + "markOut": expected_mark_out, "markOutState": True } def process(self, context): current_data = { - "markIn": context.data["sceneMarkIn"] + 1, + "markIn": context.data["sceneMarkIn"], "markInState": context.data["sceneMarkInState"], - "markOut": context.data["sceneMarkOut"] + 1, + "markOut": context.data["sceneMarkOut"], "markOutState": context.data["sceneMarkOutState"] } expected_data = self.get_expected_data(context) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py new file mode 100644 index 0000000000..a9279bf6e0 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -0,0 +1,162 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class PointCacheAlembicLoader(api.Loader): + """Load Point Cache from Alembic""" + + families = ["model", "pointcache"] + label = "Import Alembic Point Cache" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + + options.geometry_cache_settings.set_editor_property( + 'flatten_tracks', False) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + + options.geometry_cache_settings.set_editor_property( + 'flatten_tracks', False) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py new file mode 100644 index 0000000000..b652af0b89 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -0,0 +1,156 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class SkeletalMeshAlembicLoader(api.Loader): + """Load Unreal SkeletalMesh from Alembic""" + + families = ["pointcache"] + label = "Import Alembic Skeletal Mesh" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py new file mode 100644 index 0000000000..12b9320f72 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -0,0 +1,156 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class StaticMeshAlembicLoader(api.Loader): + """Load Unreal StaticMesh from Alembic""" + + families = ["model"] + label = "Import Alembic Static Mesh" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index dbea1d5951..dcb566fa4c 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -1,7 +1,6 @@ import os from avalon import api, pipeline -from avalon import unreal as avalon_unreal from avalon.unreal import lib from avalon.unreal import pipeline as unreal_pipeline import unreal diff --git a/openpype/launcher_actions.py b/openpype/launcher_actions.py deleted file mode 100644 index cf68dfb5c1..0000000000 --- a/openpype/launcher_actions.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import sys - -from avalon import api, pipeline - -PACKAGE_DIR = os.path.dirname(__file__) -PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins", "launcher") -ACTIONS_DIR = os.path.join(PLUGINS_DIR, "actions") - - -def register_launcher_actions(): - """Register specific actions which should be accessible in the launcher""" - - actions = [] - ext = ".py" - sys.path.append(ACTIONS_DIR) - - for f in os.listdir(ACTIONS_DIR): - file, extention = os.path.splitext(f) - if ext in extention: - module = __import__(file) - klass = getattr(module, file) - actions.append(klass) - - if actions is []: - return - - for action in actions: - print("Using launcher action from config @ '{}'".format(action.name)) - pipeline.register_plugin(api.Action, action) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index f46c81bf7a..838c5aa7a1 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -26,7 +26,8 @@ from .terminal import Terminal from .execute import ( get_pype_execute_args, execute, - run_subprocess + run_subprocess, + CREATE_NO_WINDOW ) from .log import PypeLogger, timeit from .mongo import ( @@ -79,6 +80,16 @@ from .avalon_context import ( change_timer_to_current_context ) +from .local_settings import ( + IniSettingRegistry, + JSONSettingRegistry, + OpenPypeSecureRegistry, + OpenPypeSettingsRegistry, + get_local_site_id, + change_openpype_mongo_url, + get_openpype_username +) + from .applications import ( ApplicationLaunchFailed, ApplictionExecutableNotFound, @@ -102,7 +113,9 @@ from .profiles_filtering import filter_profiles from .plugin_tools import ( TaskNotSetError, get_subset_name, + prepare_template_data, filter_pyblish_plugins, + set_plugin_attributes_from_settings, source_hash, get_unique_layer_name, get_background_layers, @@ -112,15 +125,6 @@ from .plugin_tools import ( should_decompress ) -from .local_settings import ( - IniSettingRegistry, - JSONSettingRegistry, - OpenPypeSecureRegistry, - OpenPypeSettingsRegistry, - get_local_site_id, - change_openpype_mongo_url -) - from .path_tools import ( version_up, get_version_from_path, @@ -135,6 +139,7 @@ from .editorial import ( trim_media_range, range_from_frames, frames_to_secons, + frames_to_timecode, make_sequence_collection ) @@ -179,6 +184,14 @@ __all__ = [ "change_timer_to_current_context", + "IniSettingRegistry", + "JSONSettingRegistry", + "OpenPypeSecureRegistry", + "OpenPypeSettingsRegistry", + "get_local_site_id", + "change_openpype_mongo_url", + "get_openpype_username", + "ApplicationLaunchFailed", "ApplictionExecutableNotFound", "ApplicationNotFound", @@ -198,6 +211,7 @@ __all__ = [ "TaskNotSetError", "get_subset_name", "filter_pyblish_plugins", + "set_plugin_attributes_from_settings", "source_hash", "get_unique_layer_name", "get_background_layers", @@ -224,13 +238,6 @@ __all__ = [ "validate_mongo_connection", "OpenPypeMongoConnection", - "IniSettingRegistry", - "JSONSettingRegistry", - "OpenPypeSecureRegistry", - "OpenPypeSettingsRegistry", - "get_local_site_id", - "change_openpype_mongo_url", - "timeit", "is_overlapping_otio_ranges", @@ -240,5 +247,6 @@ __all__ = [ "trim_media_range", "range_from_frames", "frames_to_secons", + "frames_to_timecode", "make_sequence_collection" ] diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 730d4230b6..c5c192f51b 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -25,6 +25,7 @@ from . import ( PypeLogger, Anatomy ) +from .local_settings import get_openpype_username from .avalon_context import ( get_workdir_data, get_workdir_with_workdir_data @@ -262,14 +263,32 @@ class Application: class ApplicationManager: - def __init__(self): - self.log = PypeLogger().get_logger(self.__class__.__name__) + """Load applications and tools and store them by their full name. + + Args: + system_settings (dict): Preloaded system settings. When passed manager + will always use these values. Gives ability to create manager + using different settings. + """ + def __init__(self, system_settings=None): + self.log = PypeLogger.get_logger(self.__class__.__name__) self.app_groups = {} self.applications = {} self.tool_groups = {} self.tools = {} + self._system_settings = system_settings + + self.refresh() + + def set_system_settings(self, system_settings): + """Ability to change init system settings. + + This will trigger refresh of manager. + """ + self._system_settings = system_settings + self.refresh() def refresh(self): @@ -279,9 +298,12 @@ class ApplicationManager: self.tool_groups.clear() self.tools.clear() - settings = get_system_settings( - clear_metadata=False, exclude_locals=False - ) + if self._system_settings is not None: + settings = copy.deepcopy(self._system_settings) + else: + settings = get_system_settings( + clear_metadata=False, exclude_locals=False + ) app_defs = settings["applications"] for group_name, variant_defs in app_defs.items(): @@ -1225,7 +1247,7 @@ def _prepare_last_workfile(data, workdir): file_template = anatomy.templates["work"]["file"] workdir_data.update({ "version": 1, - "user": os.environ.get("OPENPYPE_USERNAME") or getpass.getuser(), + "user": get_openpype_username(), "ext": extensions[0] }) diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py index 1dbc4d7954..bf9a0cb506 100644 --- a/openpype/lib/editorial.py +++ b/openpype/lib/editorial.py @@ -137,6 +137,11 @@ def frames_to_secons(frames, framerate): return _ot.to_seconds(rt) +def frames_to_timecode(frames, framerate): + rt = _ot.from_frames(frames, framerate) + return _ot.to_timecode(rt) + + def make_sequence_collection(path, otio_range, metadata): """ Make collection from path otio range and otio metadata. diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index 441dcfa754..12fba23e82 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -6,6 +6,9 @@ from .log import PypeLogger as Logger log = logging.getLogger(__name__) +# MSDN process creation flag (Windows only) +CREATE_NO_WINDOW = 0x08000000 + def execute(args, silent=False, diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 56bdd047c9..67845c77cf 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- """Package to deal with saving and retrieving user specific settings.""" import os +import json +import getpass +import platform from datetime import datetime from abc import ABCMeta, abstractmethod -import json # TODO Use pype igniter logic instead of using duplicated code # disable lru cache in Python 2 @@ -24,11 +26,11 @@ try: except ImportError: import ConfigParser as configparser -import platform - import six import appdirs +from openpype.settings import get_local_settings + from .import validate_mongo_connection _PLACEHOLDER = object() @@ -538,3 +540,25 @@ def change_openpype_mongo_url(new_mongo_url): if existing_value is not None: registry.delete_item(key) registry.set_item(key, new_mongo_url) + + +def get_openpype_username(): + """OpenPype username used for templates and publishing. + + May be different than machine's username. + + Always returns "OPENPYPE_USERNAME" environment if is set then tries local + settings and last option is to use `getpass.getuser()` which returns + machine username. + """ + username = os.environ.get("OPENPYPE_USERNAME") + if not username: + local_settings = get_local_settings() + username = ( + local_settings + .get("general", {}) + .get("username") + ) + if not username: + username = getpass.getuser() + return username diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 9745279e28..39b6c67080 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -123,6 +123,8 @@ class PypeFormatter(logging.Formatter): if record.exc_info is not None: line_len = len(str(record.exc_info[1])) + if line_len > 30: + line_len = 30 out = "{}\n{}\n{}\n{}\n{}".format( out, line_len * "=", diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 9a2d30d1a7..a5254af0da 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -73,6 +73,23 @@ def get_subset_name( ("family", family), ("task", task_name) ) + return template.format(**prepare_template_data(fill_pairs)) + + +def prepare_template_data(fill_pairs): + """ + Prepares formatted data for filling template. + + It produces mutliple variants of keys (key, Key, KEY) to control + format of filled template. + + Args: + fill_pairs (iterable) of tuples (key, value) + Returns: + (dict) + ('host', 'maya') > {'host':'maya', 'Host': 'Maya', 'HOST': 'MAYA'} + + """ fill_data = {} for key, value in fill_pairs: # Handle cases when value is `None` (standalone publisher) @@ -94,7 +111,7 @@ def get_subset_name( capitalized += value[1:] fill_data[key.capitalize()] = capitalized - return template.format(**fill_data) + return fill_data def filter_pyblish_plugins(plugins): @@ -150,6 +167,95 @@ def filter_pyblish_plugins(plugins): setattr(plugin, option, value) +def set_plugin_attributes_from_settings( + plugins, superclass, host_name=None, project_name=None +): + """Change attribute values on Avalon plugins by project settings. + + This function should be used only in host context. Modify + behavior of plugins. + + Args: + plugins (list): Plugins discovered by origin avalon discover method. + superclass (object): Superclass of plugin type (e.g. Cretor, Loader). + host_name (str): Name of host for which plugins are loaded and from. + Value from environment `AVALON_APP` is used if not entered. + project_name (str): Name of project for which settings will be loaded. + Value from environment `AVALON_PROJECT` is used if not entered. + """ + + # determine host application to use for finding presets + if host_name is None: + host_name = os.environ.get("AVALON_APP") + + if project_name is None: + project_name = os.environ.get("AVALON_PROJECT") + + # map plugin superclass to preset json. Currenly suppoted is load and + # create (avalon.api.Loader and avalon.api.Creator) + plugin_type = None + if superclass.__name__.split(".")[-1] in ("Loader", "SubsetLoader"): + plugin_type = "load" + elif superclass.__name__.split(".")[-1] == "Creator": + plugin_type = "create" + + if not host_name or not project_name or plugin_type is None: + msg = "Skipped attributes override from settings." + if not host_name: + msg += " Host name is not defined." + + if not project_name: + msg += " Project name is not defined." + + if plugin_type is None: + msg += " Plugin type is unsupported for class {}.".format( + superclass.__name__ + ) + + print(msg) + return + + print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type)) + + project_settings = get_project_settings(project_name) + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + for plugin in plugins: + plugin_name = plugin.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + continue + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + setattr(plugin, "active", False) + print(" - is disabled by preset") + else: + setattr(plugin, option, value) + print(" - setting `{}`: `{}`".format(option, value)) + + def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py index d7c6d99fe6..bae48c540b 100644 --- a/openpype/modules/__init__.py +++ b/openpype/modules/__init__.py @@ -18,10 +18,6 @@ from .webserver import ( WebServerModule, IWebServerRoutes ) -from .user import ( - UserModule, - IUserModule -) from .idle_manager import ( IdleManager, IIdleManager @@ -60,9 +56,6 @@ __all__ = ( "WebServerModule", "IWebServerRoutes", - "UserModule", - "IUserModule", - "IdleManager", "IIdleManager", diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 38a6b9b246..69159fda1a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -64,7 +64,6 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index ba1ffdcf30..37041a84b1 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -273,7 +273,6 @@ class HarmonySubmitDeadline( "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 3aea837bb1..a5841f406c 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -47,7 +47,7 @@ payload_skeleton_template = { "BatchName": None, # Top-level group name "Name": None, # Job name, as seen in Monitor "UserName": None, - "Plugin": "MayaPype", + "Plugin": "MayaBatch", "Frames": "{start}-{end}x{step}", "Comment": None, "Priority": 50, @@ -396,7 +396,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): step=int(self._instance.data["byFrameStep"])) self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get( - "mayaRenderPlugin", "MayaPype") + "mayaRenderPlugin", "MayaBatch") self.payload_skeleton["JobInfo"]["BatchName"] = filename # Job name, as seen in Monitor @@ -441,7 +441,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 2e30e624ef..7faa3393e5 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -31,6 +31,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): group = "" department = "" limit_groups = {} + use_gpu = False def process(self, instance): instance.data["toBeRenderedOn"] = "deadline" @@ -206,6 +207,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Resolve relative references "ProjectPath": script_path, "AWSAssetFile0": render_path, + + # using GPU by default + "UseGpu": self.use_gpu, + # Only the specific write node is rendered. "WriteNode": exe_node_name }, @@ -375,7 +380,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): list: captured groups list """ captured_groups = [] - for lg_name, list_node_class in self.deadline_limit_groups.items(): + for lg_name, list_node_class in self.limit_groups.items(): for node_class in list_node_class: for node in nuke.allNodes(recurseGroups=True): # ignore all nodes not member of defined class diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 8248bf532e..12d687bbf2 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -2,9 +2,9 @@ import json from openpype.api import ProjectSettings -from openpype.modules.ftrack.lib import ServerAction -from openpype.modules.ftrack.lib.avalon_sync import ( - get_pype_attr, +from openpype.modules.ftrack.lib import ( + ServerAction, + get_openpype_attr, CUST_ATTR_AUTO_SYNC ) @@ -159,7 +159,7 @@ class PrepareProjectServer(ServerAction): for key, entity in project_anatom_settings["attributes"].items(): attribute_values_by_key[key] = entity.value - cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True) + cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) for attr in hier_cust_attrs: key = attr["key"] diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 347b227dd3..3bb01798e4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -18,12 +18,15 @@ from avalon import schema from avalon.api import AvalonMongoDB from openpype.modules.ftrack.lib import ( + get_openpype_attr, + CUST_ATTR_ID_KEY, + CUST_ATTR_AUTO_SYNC, + avalon_sync, + BaseEvent ) from openpype.modules.ftrack.lib.avalon_sync import ( - CUST_ATTR_ID_KEY, - CUST_ATTR_AUTO_SYNC, EntitySchemas ) @@ -125,7 +128,7 @@ class SyncToAvalonEvent(BaseEvent): @property def avalon_cust_attrs(self): if self._avalon_cust_attrs is None: - self._avalon_cust_attrs = avalon_sync.get_pype_attr( + self._avalon_cust_attrs = get_openpype_attr( self.process_session, query_keys=self.cust_attr_query_keys ) return self._avalon_cust_attrs diff --git a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py index c326c56a7c..45cc9adf55 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py +++ b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py @@ -1,7 +1,10 @@ import collections import ftrack_api -from openpype.modules.ftrack.lib import BaseAction, statics_icon -from openpype.modules.ftrack.lib.avalon_sync import get_pype_attr +from openpype.modules.ftrack.lib import ( + BaseAction, + statics_icon, + get_openpype_attr +) class CleanHierarchicalAttrsAction(BaseAction): @@ -52,7 +55,7 @@ class CleanHierarchicalAttrsAction(BaseAction): ) entity_ids_joined = ", ".join(all_entities_ids) - attrs, hier_attrs = get_pype_attr(session) + attrs, hier_attrs = get_openpype_attr(session) for attr in hier_attrs: configuration_key = attr["key"] diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index 63025d35b3..63605eda5e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -2,10 +2,20 @@ import collections import json import arrow import ftrack_api -from openpype.modules.ftrack.lib import BaseAction, statics_icon -from openpype.modules.ftrack.lib.avalon_sync import ( - CUST_ATTR_ID_KEY, CUST_ATTR_GROUP, default_custom_attributes_definition +from openpype.modules.ftrack.lib import ( + BaseAction, + statics_icon, + + CUST_ATTR_ID_KEY, + CUST_ATTR_GROUP, + CUST_ATTR_TOOLS, + CUST_ATTR_APPLICATIONS, + + default_custom_attributes_definition, + app_definitions_from_app_manager, + tool_definitions_from_app_manager ) + from openpype.api import get_system_settings from openpype.lib import ApplicationManager @@ -370,24 +380,12 @@ class CustomAttributes(BaseAction): exc_info=True ) - def app_defs_from_app_manager(self): - app_definitions = [] - for app_name, app in self.app_manager.applications.items(): - if app.enabled and app.is_host: - app_definitions.append({ - app_name: app.full_label - }) - - if not app_definitions: - app_definitions.append({"empty": "< Empty >"}) - return app_definitions - def applications_attribute(self, event): - apps_data = self.app_defs_from_app_manager() + apps_data = app_definitions_from_app_manager(self.app_manager) applications_custom_attr_data = { "label": "Applications", - "key": "applications", + "key": CUST_ATTR_APPLICATIONS, "type": "enumerator", "entity_type": "show", "group": CUST_ATTR_GROUP, @@ -399,19 +397,11 @@ class CustomAttributes(BaseAction): self.process_attr_data(applications_custom_attr_data, event) def tools_attribute(self, event): - tools_data = [] - for tool_name, tool in self.app_manager.tools.items(): - tools_data.append({ - tool_name: tool.label - }) - - # Make sure there is at least one item - if not tools_data: - tools_data.append({"empty": "< Empty >"}) + tools_data = tool_definitions_from_app_manager(self.app_manager) tools_custom_attr_data = { "label": "Tools", - "key": "tools_env", + "key": CUST_ATTR_TOOLS, "type": "enumerator", "is_hierarchical": True, "group": CUST_ATTR_GROUP, diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index bd25f995fe..5298c06371 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -4,10 +4,8 @@ from openpype.api import ProjectSettings from openpype.modules.ftrack.lib import ( BaseAction, - statics_icon -) -from openpype.modules.ftrack.lib.avalon_sync import ( - get_pype_attr, + statics_icon, + get_openpype_attr, CUST_ATTR_AUTO_SYNC ) @@ -162,7 +160,7 @@ class PrepareProjectLocal(BaseAction): for key, entity in project_anatom_settings["attributes"].items(): attribute_values_by_key[key] = entity.value - cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True) + cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) for attr in hier_cust_attrs: key = attr["key"] diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index d242268048..ee139a500e 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -1,4 +1,5 @@ import os +import json import collections from abc import ABCMeta, abstractmethod import six @@ -8,10 +9,10 @@ from openpype.modules import ( ITrayModule, IPluginPaths, ITimersManager, - IUserModule, ILaunchHookPaths, ISettingsChangeListener ) +from openpype.settings import SaveWarningExc FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -32,7 +33,6 @@ class FtrackModule( ITrayModule, IPluginPaths, ITimersManager, - IUserModule, ILaunchHookPaths, ISettingsChangeListener ): @@ -123,15 +123,107 @@ class FtrackModule( if self.tray_module: self.tray_module.stop_timer_manager() - def on_pype_user_change(self, username): - """Implementation of IUserModule interface.""" - if self.tray_module: - self.tray_module.changed_user() - - def on_system_settings_save(self, *_args, **_kwargs): + def on_system_settings_save( + self, old_value, new_value, changes, new_value_metadata + ): """Implementation of ISettingsChangeListener interface.""" - # Ignore - return + if not self.ftrack_url: + raise SaveWarningExc(( + "Ftrack URL is not set." + " Can't propagate changes to Ftrack server." + )) + + ftrack_changes = changes.get("modules", {}).get("ftrack", {}) + url_change_msg = None + if "ftrack_server" in ftrack_changes: + url_change_msg = ( + "Ftrack URL was changed." + " This change may need to restart OpenPype to take affect." + ) + + try: + session = self.create_ftrack_session() + except Exception: + self.log.warning("Couldn't create ftrack session.", exc_info=True) + + if url_change_msg: + raise SaveWarningExc(url_change_msg) + + raise SaveWarningExc(( + "Saving of attributes to ftrack wasn't successful," + " try running Create/Update Avalon Attributes in ftrack." + )) + + from .lib import ( + get_openpype_attr, + CUST_ATTR_APPLICATIONS, + CUST_ATTR_TOOLS, + app_definitions_from_app_manager, + tool_definitions_from_app_manager + ) + from openpype.api import ApplicationManager + query_keys = [ + "id", + "key", + "config" + ] + custom_attributes = get_openpype_attr( + session, + split_hierarchical=False, + query_keys=query_keys + ) + app_attribute = None + tool_attribute = None + for custom_attribute in custom_attributes: + key = custom_attribute["key"] + if key == CUST_ATTR_APPLICATIONS: + app_attribute = custom_attribute + elif key == CUST_ATTR_TOOLS: + tool_attribute = custom_attribute + + app_manager = ApplicationManager(new_value_metadata) + missing_attributes = [] + if not app_attribute: + missing_attributes.append(CUST_ATTR_APPLICATIONS) + else: + config = json.loads(app_attribute["config"]) + new_data = app_definitions_from_app_manager(app_manager) + prepared_data = [] + for item in new_data: + for key, label in item.items(): + prepared_data.append({ + "menu": label, + "value": key + }) + + config["data"] = json.dumps(prepared_data) + app_attribute["config"] = json.dumps(config) + + if not tool_attribute: + missing_attributes.append(CUST_ATTR_TOOLS) + else: + config = json.loads(tool_attribute["config"]) + new_data = tool_definitions_from_app_manager(app_manager) + prepared_data = [] + for item in new_data: + for key, label in item.items(): + prepared_data.append({ + "menu": label, + "value": key + }) + config["data"] = json.dumps(prepared_data) + tool_attribute["config"] = json.dumps(config) + + session.commit() + + if missing_attributes: + raise SaveWarningExc(( + "Couldn't find custom attribute/s ({}) to update." + " Try running Create/Update Avalon Attributes in ftrack." + ).format(", ".join(missing_attributes))) + + if url_change_msg: + raise SaveWarningExc(url_change_msg) def on_project_settings_save(self, *_args, **_kwargs): """Implementation of ISettingsChangeListener interface.""" @@ -139,7 +231,7 @@ class FtrackModule( return def on_project_anatomy_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): """Implementation of ISettingsChangeListener interface.""" if not project_name: @@ -150,32 +242,49 @@ class FtrackModule( return import ftrack_api - from openpype.modules.ftrack.lib import avalon_sync + from openpype.modules.ftrack.lib import get_openpype_attr + + try: + session = self.create_ftrack_session() + except Exception: + self.log.warning("Couldn't create ftrack session.", exc_info=True) + raise SaveWarningExc(( + "Saving of attributes to ftrack wasn't successful," + " try running Create/Update Avalon Attributes in ftrack." + )) - session = self.create_ftrack_session() project_entity = session.query( "Project where full_name is \"{}\"".format(project_name) ).first() if not project_entity: - self.log.warning(( - "Ftrack project with names \"{}\" was not found." - " Skipping settings attributes change callback." - )) - return + msg = ( + "Ftrack project with name \"{}\" was not found in Ftrack." + " Can't push attribute changes." + ).format(project_name) + self.log.warning(msg) + raise SaveWarningExc(msg) project_id = project_entity["id"] - cust_attr, hier_attr = avalon_sync.get_pype_attr(session) + cust_attr, hier_attr = get_openpype_attr(session) cust_attr_by_key = {attr["key"]: attr for attr in cust_attr} hier_attrs_by_key = {attr["key"]: attr for attr in hier_attr} + + failed = {} + missing = {} for key, value in attributes_changes.items(): configuration = hier_attrs_by_key.get(key) if not configuration: configuration = cust_attr_by_key.get(key) if not configuration: + self.log.warning( + "Custom attribute \"{}\" was not found.".format(key) + ) + missing[key] = value continue + # TODO add add permissions check # TODO add value validations # - value type and list items entity_key = collections.OrderedDict() @@ -189,10 +298,45 @@ class FtrackModule( "value", ftrack_api.symbol.NOT_SET, value - ) ) - session.commit() + try: + session.commit() + self.log.debug( + "Changed project custom attribute \"{}\" to \"{}\"".format( + key, value + ) + ) + except Exception: + self.log.warning( + "Failed to set \"{}\" to \"{}\"".format(key, value), + exc_info=True + ) + session.rollback() + failed[key] = value + + if not failed and not missing: + return + + error_msg = ( + "Values were not updated on Ftrack which may cause issues." + " try running Create/Update Avalon Attributes in ftrack " + " and resave project settings." + ) + if missing: + error_msg += "\nMissing Custom attributes on Ftrack: {}.".format( + ", ".join([ + '"{}"'.format(key) + for key in missing.keys() + ]) + ) + if failed: + joined_failed = ", ".join([ + '"{}": "{}"'.format(key, value) + for key, value in failed.items() + ]) + error_msg += "\nFailed to set: {}".format(joined_failed) + raise SaveWarningExc(error_msg) def create_ftrack_session(self, **session_kwargs): import ftrack_api diff --git a/openpype/modules/ftrack/lib/__init__.py b/openpype/modules/ftrack/lib/__init__.py index 82b6875590..ce6d5284b6 100644 --- a/openpype/modules/ftrack/lib/__init__.py +++ b/openpype/modules/ftrack/lib/__init__.py @@ -1,7 +1,21 @@ +from .constants import ( + CUST_ATTR_ID_KEY, + CUST_ATTR_AUTO_SYNC, + CUST_ATTR_GROUP, + CUST_ATTR_TOOLS, + CUST_ATTR_APPLICATIONS +) from . settings import ( get_ftrack_url_from_settings, get_ftrack_event_mongo_info ) +from .custom_attributes import ( + default_custom_attributes_definition, + app_definitions_from_app_manager, + tool_definitions_from_app_manager, + get_openpype_attr +) + from . import avalon_sync from . import credentials from .ftrack_base_handler import BaseHandler @@ -10,9 +24,20 @@ from .ftrack_action_handler import BaseAction, ServerAction, statics_icon __all__ = ( + "CUST_ATTR_ID_KEY", + "CUST_ATTR_AUTO_SYNC", + "CUST_ATTR_GROUP", + "CUST_ATTR_TOOLS", + "CUST_ATTR_APPLICATIONS", + "get_ftrack_url_from_settings", "get_ftrack_event_mongo_info", + "default_custom_attributes_definition", + "app_definitions_from_app_manager", + "tool_definitions_from_app_manager", + "get_openpype_attr", + "avalon_sync", "credentials", diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 79e1366a0d..f58e858a5a 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -14,17 +14,21 @@ else: from avalon.api import AvalonMongoDB import avalon + from openpype.api import ( Logger, Anatomy, get_anatomy_settings ) +from openpype.lib import ApplicationManager + +from .constants import CUST_ATTR_ID_KEY +from .custom_attributes import get_openpype_attr from bson.objectid import ObjectId from bson.errors import InvalidId from pymongo import UpdateOne import ftrack_api -from openpype.lib import ApplicationManager log = Logger.get_logger(__name__) @@ -36,23 +40,6 @@ EntitySchemas = { "config": "openpype:config-2.0" } -# Group name of custom attributes -CUST_ATTR_GROUP = "openpype" - -# name of Custom attribute that stores mongo_id from avalon db -CUST_ATTR_ID_KEY = "avalon_mongo_id" -CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" - - -def default_custom_attributes_definition(): - json_file_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "custom_attributes.json" - ) - with open(json_file_path, "r") as json_stream: - data = json.load(json_stream) - return data - def check_regex(name, entity_type, in_schema=None, schema_patterns=None): schema_name = "asset-3.0" @@ -91,39 +78,6 @@ def join_query_keys(keys): return ",".join(["\"{}\"".format(key) for key in keys]) -def get_pype_attr(session, split_hierarchical=True, query_keys=None): - custom_attributes = [] - hier_custom_attributes = [] - if not query_keys: - query_keys = [ - "id", - "entity_type", - "object_type_id", - "is_hierarchical", - "default" - ] - # TODO remove deprecated "pype" group from query - cust_attrs_query = ( - "select {}" - " from CustomAttributeConfiguration" - # Kept `pype` for Backwards Compatiblity - " where group.name in (\"pype\", \"{}\")" - ).format(", ".join(query_keys), CUST_ATTR_GROUP) - all_avalon_attr = session.query(cust_attrs_query).all() - for cust_attr in all_avalon_attr: - if split_hierarchical and cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - - custom_attributes.append(cust_attr) - - if split_hierarchical: - # return tuple - return custom_attributes, hier_custom_attributes - - return custom_attributes - - def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None): """Python type that should value of custom attribute have. @@ -921,7 +875,7 @@ class SyncEntitiesFactory: def set_cutom_attributes(self): self.log.debug("* Preparing custom attributes") # Get custom attributes and values - custom_attrs, hier_attrs = get_pype_attr( + custom_attrs, hier_attrs = get_openpype_attr( self.session, query_keys=self.cust_attr_query_keys ) ent_types = self.session.query("select id, name from ObjectType").all() @@ -2508,7 +2462,7 @@ class SyncEntitiesFactory: if new_entity_id not in p_chilren: self.entities_dict[parent_id]["children"].append(new_entity_id) - cust_attr, _ = get_pype_attr(self.session) + cust_attr, _ = get_openpype_attr(self.session) for _attr in cust_attr: key = _attr["key"] if key not in av_entity["data"]: diff --git a/openpype/modules/ftrack/lib/constants.py b/openpype/modules/ftrack/lib/constants.py new file mode 100644 index 0000000000..73d5112e6d --- /dev/null +++ b/openpype/modules/ftrack/lib/constants.py @@ -0,0 +1,12 @@ +# Group name of custom attributes +CUST_ATTR_GROUP = "openpype" + +# name of Custom attribute that stores mongo_id from avalon db +CUST_ATTR_ID_KEY = "avalon_mongo_id" +# Auto sync of project +CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" + +# Applications custom attribute name +CUST_ATTR_APPLICATIONS = "applications" +# Environment tools custom attribute +CUST_ATTR_TOOLS = "tools_env" diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py new file mode 100644 index 0000000000..f6b82c90b1 --- /dev/null +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -0,0 +1,83 @@ +import os +import json + +from .constants import CUST_ATTR_GROUP + + +def default_custom_attributes_definition(): + json_file_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "custom_attributes.json" + ) + with open(json_file_path, "r") as json_stream: + data = json.load(json_stream) + return data + + +def app_definitions_from_app_manager(app_manager): + _app_definitions = [] + for app_name, app in app_manager.applications.items(): + if app.enabled and app.is_host: + _app_definitions.append( + (app_name, app.full_label) + ) + + # Sort items by label + app_definitions = [] + for key, label in sorted(_app_definitions, key=lambda item: item[1]): + app_definitions.append({key: label}) + + if not app_definitions: + app_definitions.append({"empty": "< Empty >"}) + return app_definitions + + +def tool_definitions_from_app_manager(app_manager): + _tools_data = [] + for tool_name, tool in app_manager.tools.items(): + _tools_data.append( + (tool_name, tool.label) + ) + + # Sort items by label + tools_data = [] + for key, label in sorted(_tools_data, key=lambda item: item[1]): + tools_data.append({key: label}) + + # Make sure there is at least one item + if not tools_data: + tools_data.append({"empty": "< Empty >"}) + return tools_data + + +def get_openpype_attr(session, split_hierarchical=True, query_keys=None): + custom_attributes = [] + hier_custom_attributes = [] + if not query_keys: + query_keys = [ + "id", + "entity_type", + "object_type_id", + "is_hierarchical", + "default" + ] + # TODO remove deprecated "pype" group from query + cust_attrs_query = ( + "select {}" + " from CustomAttributeConfiguration" + # Kept `pype` for Backwards Compatiblity + " where group.name in (\"pype\", \"{}\")" + ).format(", ".join(query_keys), CUST_ATTR_GROUP) + all_avalon_attr = session.query(cust_attrs_query).all() + for cust_attr in all_avalon_attr: + if split_hierarchical and cust_attr["is_hierarchical"]: + hier_custom_attributes.append(cust_attr) + continue + + custom_attributes.append(cust_attr) + + if split_hierarchical: + # return tuple + return custom_attributes, hier_custom_attributes + + return custom_attributes diff --git a/openpype/hosts/maya/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py similarity index 64% rename from openpype/hosts/maya/plugins/publish/collect_ftrack_family.py rename to openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index b2b66b1875..e6daed9a33 100644 --- a/openpype/hosts/maya/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -2,14 +2,9 @@ import pyblish.api class CollectFtrackFamilies(pyblish.api.InstancePlugin): - """Collect model data - - Ensures always only a single frame is extracted (current frame). - - Note: - This is a workaround so that the `pype.model` family can use the - same pointcache extractor implementation as animation and pointcaches. - This always enforces the "current" frame to be published. + """Collect family for ftrack publishing + + Add ftrack family to those instance that should be published to ftrack """ @@ -23,6 +18,7 @@ class CollectFtrackFamilies(pyblish.api.InstancePlugin): "rig", "camera" ] + hosts = ["maya"] def process(self, instance): diff --git a/openpype/modules/launcher_action.py b/openpype/modules/launcher_action.py index da0468d495..5ed8585b6a 100644 --- a/openpype/modules/launcher_action.py +++ b/openpype/modules/launcher_action.py @@ -22,7 +22,6 @@ class LauncherAction(PypeModule, ITrayAction): # Register actions if self.tray_initialized: from openpype.tools.launcher import actions - # actions.register_default_actions() actions.register_config_actions() actions_paths = self.manager.collect_plugin_paths()["actions"] actions.register_actions_from_paths(actions_paths) diff --git a/openpype/modules/settings_action.py b/openpype/modules/settings_action.py index 371e190c12..5651868f68 100644 --- a/openpype/modules/settings_action.py +++ b/openpype/modules/settings_action.py @@ -16,18 +16,20 @@ class ISettingsChangeListener: } """ @abstractmethod - def on_system_settings_save(self, old_value, new_value, changes): + def on_system_settings_save( + self, old_value, new_value, changes, new_value_metadata + ): pass @abstractmethod def on_project_settings_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): pass @abstractmethod def on_project_anatomy_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): pass @@ -78,16 +80,20 @@ class SettingsAction(PypeModule, ITrayAction): # Store if was visible was_visible = self.settings_window.isVisible() + was_minimized = self.settings_window.isMinimized() # Show settings gui self.settings_window.show() + if was_minimized: + self.settings_window.showNormal() + # Pull window to the front. self.settings_window.raise_() self.settings_window.activateWindow() # Reset content if was not visible - if not was_visible: + if not was_visible and not was_minimized: self.settings_window.reset() diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py index a60595ba93..2e9632134c 100644 --- a/openpype/modules/sync_server/providers/abstract_provider.py +++ b/openpype/modules/sync_server/providers/abstract_provider.py @@ -7,6 +7,8 @@ log = Logger().get_logger("SyncServer") @six.add_metaclass(abc.ABCMeta) class AbstractProvider: + CODE = '' + LABEL = '' def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None @@ -25,6 +27,17 @@ class AbstractProvider: (boolean) """ + @classmethod + @abc.abstractmethod + def get_configurable_items(cls): + """ + Returns filtered dict of editable properties + + + Returns: + (dict) + """ + @abc.abstractmethod def upload_file(self, source_path, path, server, collection, file, representation, site, diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index f1ea24f601..18d679b833 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -1,22 +1,33 @@ from __future__ import print_function import os.path -from googleapiclient.discovery import build -import google.oauth2.service_account as service_account -from googleapiclient import errors -from .abstract_provider import AbstractProvider -from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload +import time +import sys +import six +import platform + from openpype.api import Logger from openpype.api import get_system_settings -from ..utils import time_function -import time +from .abstract_provider import AbstractProvider +from ..utils import time_function, ResumableError, EditableScopes +log = Logger().get_logger("SyncServer") + +try: + from googleapiclient.discovery import build + import google.oauth2.service_account as service_account + from googleapiclient import errors + from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload +except (ImportError, SyntaxError): + if six.PY3: + six.reraise(*sys.exc_info()) + + # handle imports from Python 2 hosts - in those only basic methods are used + log.warning("Import failed, imported from Python 2, operations will fail.") SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.readonly'] # for write|delete -log = Logger().get_logger("SyncServer") - class GDriveHandler(AbstractProvider): """ @@ -42,15 +53,20 @@ class GDriveHandler(AbstractProvider): } } """ + CODE = 'gdrive' + LABEL = 'Google Drive' + FOLDER_STR = 'application/vnd.google-apps.folder' MY_DRIVE_STR = 'My Drive' # name of root folder of regular Google drive - CHUNK_SIZE = 2097152 # must be divisible by 256! + CHUNK_SIZE = 2097152 # must be divisible by 256! used for upload chunks def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None self.active = False self.project_name = project_name self.site_name = site_name + self.service = None + self.root = None self.presets = presets if not self.presets: @@ -58,12 +74,16 @@ class GDriveHandler(AbstractProvider): format(site_name)) return - if not os.path.exists(self.presets["credentials_url"]): - log.info("Sync Server: No credentials for Gdrive provider! ") + cred_path = self.presets.get("credentials_url", {}).\ + get(platform.system().lower()) or '' + if not os.path.exists(cred_path): + msg = "Sync Server: No credentials for gdrive provider " + \ + "for '{}' on path '{}'!".format(site_name, cred_path) + log.info(msg) return - self.service = self._get_gd_service() - self.root = self._prepare_root_info() + self.service = self._get_gd_service(cred_path) + self._tree = tree self.active = True @@ -73,7 +93,34 @@ class GDriveHandler(AbstractProvider): Returns: (boolean) """ - return self.active + return self.service is not None + + @classmethod + def get_configurable_items(cls): + """ + Returns filtered dict of editable properties. + + + Returns: + (dict) + """ + # {platform} tells that value is multiplatform and only specific OS + # should be returned + editable = { + # credentials could be override on Project or User level + 'credentials_url': { + 'scope': [EditableScopes.PROJECT, + EditableScopes.LOCAL], + 'label': "Credentials url", + 'type': 'text', + 'namespace': '{project_settings}/global/sync_server/sites/{site}/credentials_url/{platform}' # noqa: E501 + }, + # roots could be override only on Project leve, User cannot + 'root': {'scope': [EditableScopes.PROJECT], + 'label': "Roots", + 'type': 'dict'} + } + return editable def get_roots_config(self, anatomy=None): """ @@ -530,7 +577,7 @@ class GDriveHandler(AbstractProvider): return return provider_presets - def _get_gd_service(self): + def _get_gd_service(self, credentials_path): """ Authorize client with 'credentials.json', uses service account. Service account needs to have target folder shared with. @@ -539,11 +586,18 @@ class GDriveHandler(AbstractProvider): Returns: None """ - creds = service_account.Credentials.from_service_account_file( - self.presets["credentials_url"], - scopes=SCOPES) - service = build('drive', 'v3', - credentials=creds, cache_discovery=False) + service = None + try: + creds = service_account.Credentials.from_service_account_file( + credentials_path, + scopes=SCOPES) + service = build('drive', 'v3', + credentials=creds, cache_discovery=False) + except Exception: + log.error("Connection failed, " + + "check '{}' credentials file".format(credentials_path), + exc_info=True) + return service def _prepare_root_info(self): @@ -555,39 +609,47 @@ class GDriveHandler(AbstractProvider): Returns: (dicts) of dicts where root folders are keys + throws ResumableError in case of errors.HttpError """ roots = {} config_roots = self.get_roots_config() - for path in config_roots.values(): - if self.MY_DRIVE_STR in path: - roots[self.MY_DRIVE_STR] = self.service.files()\ - .get(fileId='root').execute() - else: - shared_drives = [] - page_token = None + try: + for path in config_roots.values(): + if self.MY_DRIVE_STR in path: + roots[self.MY_DRIVE_STR] = self.service.files()\ + .get(fileId='root')\ + .execute() + else: + shared_drives = [] + page_token = None - while True: - response = self.service.drives().list( - pageSize=100, - pageToken=page_token).execute() - shared_drives.extend(response.get('drives', [])) - page_token = response.get('nextPageToken', None) - if page_token is None: - break + while True: + response = self.service.drives().list( + pageSize=100, + pageToken=page_token).execute() + shared_drives.extend(response.get('drives', [])) + page_token = response.get('nextPageToken', None) + if page_token is None: + break - folders = path.split('/') - if len(folders) < 2: - raise ValueError("Wrong root folder definition {}". - format(path)) + folders = path.split('/') + if len(folders) < 2: + raise ValueError("Wrong root folder definition {}". + format(path)) - for shared_drive in shared_drives: - if folders[1] in shared_drive["name"]: - roots[shared_drive["name"]] = { - "name": shared_drive["name"], - "id": shared_drive["id"]} - if self.MY_DRIVE_STR not in roots: # add My Drive always - roots[self.MY_DRIVE_STR] = self.service.files() \ - .get(fileId='root').execute() + for shared_drive in shared_drives: + if folders[1] in shared_drive["name"]: + roots[shared_drive["name"]] = { + "name": shared_drive["name"], + "id": shared_drive["id"]} + if self.MY_DRIVE_STR not in roots: # add My Drive always + roots[self.MY_DRIVE_STR] = self.service.files() \ + .get(fileId='root').execute() + except errors.HttpError: + log.warning("HttpError in sync loop, " + "trying next loop", + exc_info=True) + raise ResumableError return roots @@ -608,6 +670,9 @@ class GDriveHandler(AbstractProvider): (dictionary) path as a key, folder id as a value """ log.debug("build_tree len {}".format(len(folders))) + if not self.root: # build only when necessary, could be expensive + self.root = self._prepare_root_info() + root_ids = [] default_root_id = None tree = {} diff --git a/openpype/modules/sync_server/providers/lib.py b/openpype/modules/sync_server/providers/lib.py index 58947e115d..816ccca981 100644 --- a/openpype/modules/sync_server/providers/lib.py +++ b/openpype/modules/sync_server/providers/lib.py @@ -65,6 +65,17 @@ class ProviderFactory: info = self._get_creator_info(provider) return info[1] + def get_provider_configurable_items(self, provider): + """ + Returns dict of modifiable properties for 'provider'. + + Provider contains information which its properties and on what + level could be override + """ + provider_info = self._get_creator_info(provider) + + return provider_info[0].get_configurable_items() + def _get_creator_info(self, provider): """ Collect all necessary info for provider. Currently only creator @@ -91,5 +102,5 @@ factory = ProviderFactory() # there is implementing 'GDriveHandler' class # 7 denotes number of files that could be synced in single loop - learned by # trial and error -factory.register_provider('gdrive', GDriveHandler, 7) -factory.register_provider('local_drive', LocalDriveHandler, 10) +factory.register_provider(GDriveHandler.CODE, GDriveHandler, 7) +factory.register_provider(LocalDriveHandler.CODE, LocalDriveHandler, 50) diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 1f4fca80eb..3b3e699d00 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -7,22 +7,43 @@ import time from openpype.api import Logger, Anatomy from .abstract_provider import AbstractProvider +from ..utils import EditableScopes + log = Logger().get_logger("SyncServer") class LocalDriveHandler(AbstractProvider): + CODE = 'local_drive' + LABEL = 'Local drive' + """ Handles required operations on mounted disks with OS """ def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None self.active = False self.project_name = project_name self.site_name = site_name + self._editable_properties = {} self.active = self.is_active() def is_active(self): return True + @classmethod + def get_configurable_items(cls): + """ + Returns filtered dict of editable properties + + Returns: + (dict) + """ + editable = { + 'root': {'scope': [EditableScopes.LOCAL], + 'label': "Roots", + 'type': 'dict'} + } + return editable + def upload_file(self, source_path, target_path, server, collection, file, representation, site, overwrite=False, direction="Upload"): diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index e97c0e8844..638a4a367f 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -8,7 +8,7 @@ from concurrent.futures._base import CancelledError from .providers import lib from openpype.lib import PypeLogger -from .utils import SyncStatus +from .utils import SyncStatus, ResumableError log = PypeLogger().get_logger("SyncServer") @@ -206,14 +206,14 @@ def _get_configured_sites_from_setting(module, project_name, project_setting): all_sites = module._get_default_site_configs() all_sites.update(project_setting.get("sites")) for site_name, config in all_sites.items(): - handler = initiated_handlers. \ - get((config["provider"], site_name)) + provider = module.get_provider_for_site(site=site_name) + handler = initiated_handlers.get((provider, site_name)) if not handler: - handler = lib.factory.get_provider(config["provider"], + handler = lib.factory.get_provider(provider, project_name, site_name, presets=config) - initiated_handlers[(config["provider"], site_name)] = \ + initiated_handlers[(provider, site_name)] = \ handler if handler.is_active(): @@ -232,6 +232,7 @@ class SyncServerThread(threading.Thread): self.loop = None self.is_running = False self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self.timer = None def run(self): self.is_running = True @@ -266,13 +267,16 @@ class SyncServerThread(threading.Thread): Returns: """ - try: - while self.is_running and not self.module.is_paused(): + while self.is_running and not self.module.is_paused(): + try: import time start_time = None self.module.set_sync_project_settings() # clean cache for collection, preset in self.module.sync_project_settings.\ items(): + if collection not in self.module.get_enabled_projects(): + continue + start_time = time.time() local_site, remote_site = self._working_sites(collection) if not all([local_site, remote_site]): @@ -294,13 +298,14 @@ class SyncServerThread(threading.Thread): processed_file_path = set() site_preset = preset.get('sites')[remote_site] - remote_provider = site_preset['provider'] + remote_provider = \ + self.module.get_provider_for_site(site=remote_site) handler = lib.factory.get_provider(remote_provider, collection, remote_site, presets=site_preset) limit = lib.factory.get_provider_batch_limit( - site_preset['provider']) + remote_provider) # first call to get_provider could be expensive, its # building folder tree structure in memory # call only if needed, eg. DO_UPLOAD or DO_DOWNLOAD @@ -384,17 +389,27 @@ class SyncServerThread(threading.Thread): duration = time.time() - start_time log.debug("One loop took {:.2f}s".format(duration)) - await asyncio.sleep(self.module.get_loop_delay(collection)) - except ConnectionResetError: - log.warning("ConnectionResetError in sync loop, trying next loop", - exc_info=True) - except CancelledError: - # just stopping server - pass - except Exception: - self.stop() - log.warning("Unhandled exception in sync loop, stopping server", - exc_info=True) + + delay = self.module.get_loop_delay(collection) + log.debug("Waiting for {} seconds to new loop".format(delay)) + self.timer = asyncio.create_task(self.run_timer(delay)) + await asyncio.gather(self.timer) + + except ConnectionResetError: + log.warning("ConnectionResetError in sync loop, " + "trying next loop", + exc_info=True) + except CancelledError: + # just stopping server + pass + except ResumableError: + log.warning("ResumableError in sync loop, " + "trying next loop", + exc_info=True) + except Exception: + self.stop() + log.warning("Unhandled except. in sync loop, stopping server", + exc_info=True) def stop(self): """Sets is_running flag to false, 'check_shutdown' shuts server down""" @@ -417,6 +432,17 @@ class SyncServerThread(threading.Thread): await asyncio.sleep(0.07) self.loop.stop() + async def run_timer(self, delay): + """Wait for 'delay' seconds to start next loop""" + await asyncio.sleep(delay) + + def reset_timer(self): + """Called when waiting for next loop should be skipped""" + log.debug("Resetting timer") + if self.timer: + self.timer.cancel() + self.timer = None + def _working_sites(self, collection): if self.module.is_project_paused(collection): log.debug("Both sites same, skipping") @@ -429,8 +455,9 @@ class SyncServerThread(threading.Thread): remote_site)) return None, None - if not all([site_is_working(self.module, collection, local_site), - site_is_working(self.module, collection, remote_site)]): + configured_sites = _get_configured_sites(self.module, collection) + if not all([local_site in configured_sites, + remote_site in configured_sites]): log.debug("Some of the sites {} - {} is not ".format(local_site, remote_site) + "working properly") diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 59c3787789..ed403b836d 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -2,6 +2,8 @@ import os from bson.objectid import ObjectId from datetime import datetime import threading +import platform +import copy from avalon.api import AvalonMongoDB @@ -9,12 +11,18 @@ from .. import PypeModule, ITrayModule from openpype.api import ( Anatomy, get_project_settings, + get_system_settings, get_local_site_id) from openpype.lib import PypeLogger +from openpype.settings.lib import ( + get_default_project_settings, + get_default_anatomy_settings, + get_anatomy_settings) from .providers.local_drive import LocalDriveHandler +from .providers import lib -from .utils import time_function, SyncStatus +from .utils import time_function, SyncStatus, EditableScopes log = PypeLogger().get_logger("SyncServer") @@ -83,6 +91,7 @@ class SyncServerModule(PypeModule, ITrayModule): DEFAULT_SITE = 'studio' LOCAL_SITE = 'local' LOG_PROGRESS_SEC = 5 # how often log progress to DB + DEFAULT_PRIORITY = 50 # higher is better, allowed range 1 - 1000 name = "sync_server" label = "Sync Queue" @@ -339,18 +348,6 @@ class SyncServerModule(PypeModule, ITrayModule): return self._get_enabled_sites_from_settings(sync_settings) - def get_configurable_items_for_site(self, project_name, site_name): - """ - Returns list of items that should be configurable by User - - Returns: - (list of dict) - [{key:"root", label:"root", value:"valueFromSettings"}] - """ - # if project_name is None: ..for get_default_project_settings - # return handler.get_configurable_items() - pass - def get_active_site(self, project_name): """ Returns active (mine) site for 'project_name' from settings @@ -401,6 +398,225 @@ class SyncServerModule(PypeModule, ITrayModule): return remote_site + def get_local_settings_schema(self): + """Wrapper for Local settings - all projects incl. Default""" + return self.get_configurable_items(EditableScopes.LOCAL) + + def get_configurable_items(self, scope=None): + """ + Returns list of sites that could be configurable for all projects. + + Could be filtered by 'scope' argument (list) + + Args: + scope (list of utils.EditableScope) + + Returns: + (dict of list of dict) + { + siteA : [ + { + key:"root", label:"root", + "value":"{'work': 'c:/projects'}", + "type": "dict", + "children":[ + { "key": "work", + "type": "text", + "value": "c:/projects"} + ] + }, + { + key:"credentials_url", label:"Credentials url", + "value":"'c:/projects/cred.json'", "type": "text", + "namespace": "{project_setting}/global/sync_server/ + sites" + } + ] + } + """ + editable = {} + applicable_projects = list(self.connection.projects()) + applicable_projects.append(None) + for project in applicable_projects: + project_name = None + if project: + project_name = project["name"] + + items = self.get_configurable_items_for_project(project_name, + scope) + editable.update(items) + + return editable + + def get_local_settings_schema_for_project(self, project_name): + """Wrapper for Local settings - for specific 'project_name'""" + return self.get_configurable_items_for_project(project_name, + EditableScopes.LOCAL) + + def get_configurable_items_for_project(self, project_name=None, + scope=None): + """ + Returns list of items that could be configurable for specific + 'project_name' + + Args: + project_name (str) - None > default project, + scope (list of utils.EditableScope) + (optional, None is all scopes, default is LOCAL) + + Returns: + (dict of list of dict) + { + siteA : [ + { + key:"root", label:"root", + "type": "dict", + "children":[ + { "key": "work", + "type": "text", + "value": "c:/projects"} + ] + }, + { + key:"credentials_url", label:"Credentials url", + "value":"'c:/projects/cred.json'", "type": "text", + "namespace": "{project_setting}/global/sync_server/ + sites" + } + ] + } + """ + allowed_sites = set() + sites = self.get_all_site_configs(project_name) + if project_name: + # Local Settings can select only from allowed sites for project + allowed_sites.update(set(self.get_active_sites(project_name))) + allowed_sites.update(set(self.get_remote_sites(project_name))) + + editable = {} + for site_name in sites.keys(): + if allowed_sites and site_name not in allowed_sites: + continue + + items = self.get_configurable_items_for_site(project_name, + site_name, + scope) + editable[site_name] = items + + return editable + + def get_local_settings_schema_for_site(self, project_name, site_name): + """Wrapper for Local settings - for particular 'site_name and proj.""" + return self.get_configurable_items_for_site(project_name, + site_name, + EditableScopes.LOCAL) + + def get_configurable_items_for_site(self, project_name=None, + site_name=None, + scope=None): + """ + Returns list of items that could be configurable. + + Args: + project_name (str) - None > default project + site_name (str) + scope (list of utils.EditableScope) + (optional, None is all scopes) + + Returns: + (list) + [ + { + key:"root", label:"root", type:"dict", + "children":[ + { "key": "work", + "type": "text", + "value": "c:/projects"} + ] + }, ... + ] + """ + provider_name = self.get_provider_for_site(site=site_name) + items = lib.factory.get_provider_configurable_items(provider_name) + + if project_name: + sync_s = self.get_sync_project_setting(project_name, + exclude_locals=True, + cached=False) + else: + sync_s = get_default_project_settings(exclude_locals=True) + sync_s = sync_s["global"]["sync_server"] + sync_s["sites"].update( + self._get_default_site_configs(self.enabled)) + + editable = [] + if type(scope) is not list: + scope = [scope] + scope = set(scope) + for key, properties in items.items(): + if scope is None or scope.intersection(set(properties["scope"])): + val = sync_s.get("sites", {}).get(site_name, {}).get(key) + + item = { + "key": key, + "label": properties["label"], + "type": properties["type"] + } + + if properties.get("namespace"): + item["namespace"] = properties.get("namespace") + if "platform" in item["namespace"]: + try: + if val: + val = val[platform.system().lower()] + except KeyError: + st = "{}'s field value {} should be".format(key, val) # noqa: E501 + log.error(st + " multiplatform dict") + + item["namespace"] = item["namespace"].replace('{site}', + site_name) + children = [] + if properties["type"] == "dict": + if val: + for val_key, val_val in val.items(): + child = { + "type": "text", + "key": val_key, + "value": val_val + } + children.append(child) + + if properties["type"] == "dict": + item["children"] = children + else: + item["value"] = val + + + + editable.append(item) + + return editable + + def reset_timer(self): + """ + Called when waiting for next loop should be skipped. + + In case of user's involvement (reset site), start that right away. + """ + self.sync_server_thread.reset_timer() + + def get_enabled_projects(self): + """Returns list of projects which have SyncServer enabled.""" + enabled_projects = [] + + if self.enabled: + for project in self.connection.projects(): + project_name = project["name"] + project_settings = self.get_sync_project_setting(project_name) + if project_settings and project_settings.get("enabled"): + enabled_projects.append(project_name) + + return enabled_projects """ End of Public API """ def get_local_file_path(self, collection, site_name, file_path): @@ -413,7 +629,7 @@ class SyncServerModule(PypeModule, ITrayModule): return local_file_path def _get_remote_sites_from_settings(self, sync_settings): - if not self.enabled or not sync_settings['enabled']: + if not self.enabled or not sync_settings.get('enabled'): return [] remote_sites = [self.DEFAULT_SITE, self.LOCAL_SITE] @@ -424,7 +640,7 @@ class SyncServerModule(PypeModule, ITrayModule): def _get_enabled_sites_from_settings(self, sync_settings): sites = [self.DEFAULT_SITE] - if self.enabled and sync_settings['enabled']: + if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) return sites @@ -445,10 +661,16 @@ class SyncServerModule(PypeModule, ITrayModule): if not self.enabled: return + enabled_projects = self.get_enabled_projects() + if not enabled_projects: + self.enabled = False + return + self.lock = threading.Lock() try: self.sync_server_thread = SyncServerThread(self) + from .tray.app import SyncServerWindow self.widget = SyncServerWindow(self) except ValueError: @@ -543,75 +765,145 @@ class SyncServerModule(PypeModule, ITrayModule): return self._sync_project_settings - def set_sync_project_settings(self): + def set_sync_project_settings(self, exclude_locals=False): """ Set sync_project_settings for all projects (caching) - + Args: + exclude_locals (bool): ignore overrides from Local Settings For performance """ - sync_project_settings = {} - - for collection in self.connection.database.collection_names(False): - sync_settings = self._parse_sync_settings_from_settings( - get_project_settings(collection)) - if sync_settings: - default_sites = self._get_default_site_configs() - sync_settings['sites'].update(default_sites) - sync_project_settings[collection] = sync_settings - - if not sync_project_settings: - log.info("No enabled and configured projects for sync.") + sync_project_settings = self._prepare_sync_project_settings( + exclude_locals) self._sync_project_settings = sync_project_settings - def get_sync_project_setting(self, project_name): + def _prepare_sync_project_settings(self, exclude_locals): + sync_project_settings = {} + system_sites = self.get_all_site_configs() + for collection in self.connection.database.collection_names(False): + sites = copy.deepcopy(system_sites) # get all configured sites + proj_settings = self._parse_sync_settings_from_settings( + get_project_settings(collection, + exclude_locals=exclude_locals)) + sites.update(self._get_default_site_configs( + proj_settings["enabled"], collection)) + sites.update(proj_settings['sites']) + proj_settings["sites"] = sites + + sync_project_settings[collection] = proj_settings + if not sync_project_settings: + log.info("No enabled and configured projects for sync.") + return sync_project_settings + + def get_sync_project_setting(self, project_name, exclude_locals=False, + cached=True): """ Handles pulling sync_server's settings for enabled 'project_name' Args: project_name (str): used in project settings + exclude_locals (bool): ignore overrides from Local Settings + cached (bool): use pre-cached values, or return fresh ones + cached values needed for single loop (with all overrides) + fresh values needed for Local settings (without overrides) Returns: (dict): settings dictionary for the enabled project, empty if no settings or sync is disabled """ # presets set already, do not call again and again # self.log.debug("project preset {}".format(self.presets)) - if self.sync_project_settings and \ - self.sync_project_settings.get(project_name): - return self.sync_project_settings.get(project_name) + if not cached: + return self._prepare_sync_project_settings(exclude_locals)\ + [project_name] - settings = get_project_settings(project_name) - return self._parse_sync_settings_from_settings(settings) + if not self.sync_project_settings or \ + not self.sync_project_settings.get(project_name): + self.set_sync_project_settings(exclude_locals) + + return self.sync_project_settings.get(project_name) def _parse_sync_settings_from_settings(self, settings): """ settings from api.get_project_settings, TOOD rename """ sync_settings = settings.get("global").get("sync_server") - if not sync_settings: - log.info("No project setting not syncing.") - return {} - if sync_settings.get("enabled"): - return sync_settings - return {} + return sync_settings - def _get_default_site_configs(self): + def get_all_site_configs(self, project_name=None): """ - Returns skeleton settings for 'studio' and user's local site + Returns (dict) with all sites configured system wide. + + Args: + project_name (str)(optional): if present, check if not disabled + + Returns: + (dict): {'studio': {'provider':'local_drive'...}, + 'MY_LOCAL': {'provider':....}} """ - default_config = {'provider': 'local_drive'} - all_sites = {self.DEFAULT_SITE: default_config, - get_local_site_id(): default_config} + sys_sett = get_system_settings() + sync_sett = sys_sett["modules"].get("sync_server") + + project_enabled = True + if project_name: + project_enabled = project_name in self.get_enabled_projects() + sync_enabled = sync_sett["enabled"] and project_enabled + + system_sites = {} + if sync_enabled: + for site, detail in sync_sett.get("sites", {}).items(): + system_sites[site] = detail + + system_sites.update(self._get_default_site_configs(sync_enabled, + project_name)) + + return system_sites + + def _get_default_site_configs(self, sync_enabled=True, project_name=None): + """ + Returns settings for 'studio' and user's local site + + Returns base values from setting, not overriden by Local Settings, + eg. value used to push TO LS not to get actual value for syncing. + """ + if not project_name: + anatomy_sett = get_default_anatomy_settings(exclude_locals=True) + else: + anatomy_sett = get_anatomy_settings(project_name, + exclude_locals=True) + roots = {} + for root, config in anatomy_sett["roots"].items(): + roots[root] = config[platform.system().lower()] + studio_config = { + 'provider': 'local_drive', + "root": roots + } + all_sites = {self.DEFAULT_SITE: studio_config} + if sync_enabled: + all_sites['local'] = {'provider': 'local_drive'} return all_sites - def get_provider_for_site(self, project_name, site): + def get_provider_for_site(self, project_name=None, site=None): """ - Return provider name for site. + Return provider name for site (unique name across all projects. """ - site_preset = self.get_sync_project_setting(project_name)["sites"].\ - get(site) - if site_preset: - return site_preset["provider"] + sites = {self.DEFAULT_SITE: "local_drive", + self.LOCAL_SITE: "local_drive", + get_local_site_id(): "local_drive"} - return "NA" + if site in sites.keys(): + return sites[site] + + if project_name: # backward compatibility + proj_settings = self.get_sync_project_setting(project_name) + provider = proj_settings.get("sites", {}).get(site, {}).\ + get("provider") + if provider: + return provider + + sys_sett = get_system_settings() + sync_sett = sys_sett["modules"].get("sync_server") + for site, detail in sync_sett.get("sites", {}).items(): + sites[site] = detail.get("provider") + + return sites.get(site, 'N/A') @time_function def get_sync_representations(self, collection, active_site, remote_site): @@ -639,7 +931,7 @@ class SyncServerModule(PypeModule, ITrayModule): self.connection.Session["AVALON_PROJECT"] = collection # retry_cnt - number of attempts to sync specific file before giving up retries_arr = self._get_retries_arr(collection) - query = { + match = { "type": "representation", "$or": [ {"$and": [ @@ -677,10 +969,47 @@ class SyncServerModule(PypeModule, ITrayModule): ]} ] } + + aggr = [ + {"$match": match}, + {'$unwind': '$files'}, + {'$addFields': { + 'order_remote': { + '$filter': {'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + }}, + 'order_local': { + '$filter': {'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + }}, + }}, + {'$addFields': { + 'priority': { + '$cond': [ + {'$size': '$order_local.priority'}, + {'$first': '$order_local.priority'}, + {'$cond': [ + {'$size': '$order_remote.priority'}, + {'$first': '$order_remote.priority'}, + self.DEFAULT_PRIORITY]} + ] + }, + }}, + {'$group': { + '_id': '$_id', + # pass through context - same for representation + 'context': {'$addToSet': '$context'}, + 'data': {'$addToSet': '$data'}, + # pass through files as a list + 'files': {'$addToSet': '$files'}, + 'priority': {'$max': "$priority"}, + }}, + {"$sort": {'priority': -1, '_id': 1}}, + ] log.debug("active_site:{} - remote_site:{}".format(active_site, remote_site)) - log.debug("query: {}".format(query)) - representations = self.connection.find(query) + log.debug("query: {}".format(aggr)) + representations = self.connection.aggregate(aggr) return representations @@ -693,6 +1022,15 @@ class SyncServerModule(PypeModule, ITrayModule): Always is comparing local record, eg. site with 'name' == self.presets[PROJECT_NAME]['config']["active_site"] + This leads to trigger actual upload or download, there is + a use case 'studio' <> 'remote' where user should publish + to 'studio', see progress in Tray GUI, but do not do + physical upload/download + (as multiple user would be doing that). + + Do physical U/D only when any of the sites is user's local, in that + case only user has the data and must U/D. + Args: file (dictionary): of file from representation in Mongo local_site (string): - local side of compare (usually 'studio') @@ -702,8 +1040,12 @@ class SyncServerModule(PypeModule, ITrayModule): (string) - one of SyncStatus """ sites = file.get("sites") or [] - # if isinstance(sites, list): # temporary, old format of 'sites' - # return SyncStatus.DO_NOTHING + + if get_local_site_id() not in (local_site, remote_site): + # don't do upload/download for studio sites + log.debug("No local site {} - {}".format(local_site, remote_site)) + return SyncStatus.DO_NOTHING + _, remote_rec = self._get_site_rec(sites, remote_site) or {} if remote_rec: # sync remote target created_dt = remote_rec.get("created_dt") @@ -726,7 +1068,7 @@ class SyncServerModule(PypeModule, ITrayModule): return SyncStatus.DO_NOTHING def update_db(self, collection, new_file_id, file, representation, - site, error=None, progress=None): + site, error=None, progress=None, priority=None): """ Update 'provider' portion of records in DB with success (file_id) or error (exception) @@ -740,12 +1082,16 @@ class SyncServerModule(PypeModule, ITrayModule): site (string): label ('gdrive', 'S3') error (string): exception message progress (float): 0-1 of progress of upload/download + priority (int): 0-100 set priority Returns: None """ representation_id = representation.get("_id") - file_id = file.get("_id") + file_id = None + if file: + file_id = file.get("_id") + query = { "_id": representation_id } @@ -757,6 +1103,8 @@ class SyncServerModule(PypeModule, ITrayModule): update["$unset"] = self._get_error_dict("", "", "") elif progress is not None: update["$set"] = self._get_progress_dict(progress) + elif priority is not None: + update["$set"] = self._get_priority_dict(priority, file_id) else: tries = self._get_tries_count(file, site) tries += 1 @@ -764,9 +1112,10 @@ class SyncServerModule(PypeModule, ITrayModule): update["$set"] = self._get_error_dict(error, tries) arr_filter = [ - {'s.name': site}, - {'f._id': ObjectId(file_id)} + {'s.name': site} ] + if file_id: + arr_filter.append({'f._id': ObjectId(file_id)}) self.connection.database[collection].update_one( query, @@ -775,7 +1124,7 @@ class SyncServerModule(PypeModule, ITrayModule): array_filters=arr_filter ) - if progress is not None: + if progress is not None or priority is not None: return status = 'failed' @@ -1045,7 +1394,7 @@ class SyncServerModule(PypeModule, ITrayModule): format(site_name)) return - provider_name = self.get_provider_for_site(collection, site_name) + provider_name = self.get_provider_for_site(site=site_name) if provider_name == 'local_drive': query = { @@ -1169,6 +1518,21 @@ class SyncServerModule(PypeModule, ITrayModule): val = {"files.$[f].sites.$[s].progress": progress} return val + def _get_priority_dict(self, priority, file_id): + """ + Provide priority metadata to be stored in Db. + Used during upload/download for GUI to show. + Args: + priority: (int) - priority for file(s) + Returns: + (dictionary) + """ + if file_id: + str_key = "files.$[f].sites.$[s].priority" + else: + str_key = "files.$[].sites.$[s].priority" + return {str_key: int(priority)} + def _get_retries_arr(self, project_name): """ Returns array with allowed values in 'tries' field. If repre diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/sync_server/tray/app.py index 25fbf0e49a..b3b6f0a6c3 100644 --- a/openpype/modules/sync_server/tray/app.py +++ b/openpype/modules/sync_server/tray/app.py @@ -7,7 +7,7 @@ from openpype import resources from openpype.modules.sync_server.tray.widgets import ( SyncProjectListWidget, - SyncRepresentationWidget + SyncRepresentationSummaryWidget ) log = PypeLogger().get_logger("SyncServer") @@ -47,7 +47,7 @@ class SyncServerWindow(QtWidgets.QDialog): left_column_layout.addWidget(self.pause_btn) left_column.setLayout(left_column_layout) - repres = SyncRepresentationWidget( + repres = SyncRepresentationSummaryWidget( sync_server, project=self.projects.current_project, parent=self) @@ -78,15 +78,33 @@ class SyncServerWindow(QtWidgets.QDialog): layout.addWidget(footer) self.setLayout(body_layout) - self.setWindowTitle("Sync Server") + self.setWindowTitle("Sync Queue") self.projects.project_changed.connect( lambda: repres.table_view.model().set_project( self.projects.current_project)) self.pause_btn.clicked.connect(self._pause) + self.pause_btn.setAutoDefault(False) + self.pause_btn.setDefault(False) repres.message_generated.connect(self._update_message) + self.representationWidget = repres + + def showEvent(self, event): + self.representationWidget.model.set_project( + self.projects.current_project) + self._set_running(True) + super().showEvent(event) + + def closeEvent(self, event): + self._set_running(False) + super().closeEvent(event) + + def _set_running(self, running): + self.representationWidget.model.is_running = running + self.representationWidget.model.timer.setInterval(0) + def _pause(self): if self.sync_server.is_paused(): self.sync_server.unpause_server() diff --git a/openpype/modules/sync_server/tray/delegates.py b/openpype/modules/sync_server/tray/delegates.py new file mode 100644 index 0000000000..9316ec2c3e --- /dev/null +++ b/openpype/modules/sync_server/tray/delegates.py @@ -0,0 +1,116 @@ +import os +from Qt import QtCore, QtWidgets, QtGui + +from openpype.lib import PypeLogger +from openpype.modules.sync_server.tray import lib + +log = PypeLogger().get_logger("SyncServer") + + +class PriorityDelegate(QtWidgets.QStyledItemDelegate): + """Creates editable line edit to set priority on representation""" + def paint(self, painter, option, index): + super(PriorityDelegate, self).paint(painter, option, index) + + if option.widget.selectionModel().isSelected(index) or \ + option.state & QtWidgets.QStyle.State_MouseOver: + edit_icon = index.data(lib.EditIconRole) + if not edit_icon: + return + + state = QtGui.QIcon.On + mode = QtGui.QIcon.Selected + + icon_side = 16 + icon_rect = QtCore.QRect( + option.rect.left() + option.rect.width() - icon_side - 4, + option.rect.top() + ((option.rect.height() - icon_side) / 2), + icon_side, + icon_side + ) + + edit_icon.paint( + painter, icon_rect, + QtCore.Qt.AlignRight, mode, state + ) + + def createEditor(self, parent, option, index): + editor = PriorityLineEdit( + parent, + option.widget.selectionModel().selectedRows()) + editor.setFocus(True) + return editor + + def setModelData(self, editor, model, index): + for index in editor.selected_idxs: + try: + val = int(editor.text()) + except ValueError: + val = model.sync_server.DEFAULT_PRIORITY + model.set_priority_data(index, val) + + +class PriorityLineEdit(QtWidgets.QLineEdit): + """Special LineEdit to consume Enter and store selected indexes""" + def __init__(self, parent=None, selected_idxs=None): + self.selected_idxs = selected_idxs + super(PriorityLineEdit, self).__init__(parent) + + def keyPressEvent(self, event): + result = super(PriorityLineEdit, self).keyPressEvent(event) + if ( + event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter) + ): + return event.accept() + + return result + + +class ImageDelegate(QtWidgets.QStyledItemDelegate): + """ + Prints icon of site and progress of synchronization + """ + + def __init__(self, parent=None): + super(ImageDelegate, self).__init__(parent) + self.icons = {} + + def paint(self, painter, option, index): + super(ImageDelegate, self).paint(painter, option, index) + option = QtWidgets.QStyleOptionViewItem(option) + option.showDecorationSelected = True + + provider = index.data(lib.ProviderRole) + value = index.data(lib.ProgressRole) + date_value = index.data(lib.DateRole) + is_failed = index.data(lib.FailedRole) + + if not self.icons.get(provider): + resource_path = os.path.dirname(__file__) + resource_path = os.path.join(resource_path, "..", + "providers", "resources") + pix_url = "{}/{}.png".format(resource_path, provider) + pixmap = QtGui.QPixmap(pix_url) + self.icons[provider] = pixmap + else: + pixmap = self.icons[provider] + + padding = 10 + point = QtCore.QPoint(option.rect.x() + padding, + option.rect.y() + + (option.rect.height() - pixmap.height()) / 2) + painter.drawPixmap(point, pixmap) + + overlay_rect = option.rect.translated(0, 0) + overlay_rect.setHeight(overlay_rect.height() * (1.0 - float(value))) + painter.fillRect(overlay_rect, + QtGui.QBrush(QtGui.QColor(0, 0, 0, 100))) + text_rect = option.rect.translated(10, 0) + painter.drawText(text_rect, + QtCore.Qt.AlignCenter, + date_value) + + if is_failed: + overlay_rect = option.rect.translated(0, 0) + painter.fillRect(overlay_rect, + QtGui.QBrush(QtGui.QColor(255, 0, 0, 35))) diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/sync_server/tray/lib.py index 0282d79ea1..25c600abd2 100644 --- a/openpype/modules/sync_server/tray/lib.py +++ b/openpype/modules/sync_server/tray/lib.py @@ -1,4 +1,7 @@ from Qt import QtCore +import attr +import abc +import six from openpype.lib import PypeLogger @@ -20,6 +23,112 @@ ProviderRole = QtCore.Qt.UserRole + 2 ProgressRole = QtCore.Qt.UserRole + 4 DateRole = QtCore.Qt.UserRole + 6 FailedRole = QtCore.Qt.UserRole + 8 +HeaderNameRole = QtCore.Qt.UserRole + 10 +FullItemRole = QtCore.Qt.UserRole + 12 +EditIconRole = QtCore.Qt.UserRole + 14 + + +@six.add_metaclass(abc.ABCMeta) +class AbstractColumnFilter: + + def __init__(self, column_name, dbcon=None): + self.column_name = column_name + self.dbcon = dbcon + self._search_variants = [] + + def search_variants(self): + """ + Returns all flavors of search available for this column, + """ + return self._search_variants + + @abc.abstractmethod + def values(self): + """ + Returns dict of available values for filter {'label':'value'} + """ + pass + + @abc.abstractmethod + def prepare_match_part(self, values): + """ + Prepares format valid for $match part from 'values + + Args: + values (dict): {'label': 'value'} + Returns: + (dict): {'COLUMN_NAME': {'$in': ['val1', 'val2']}} + """ + pass + + +class PredefinedSetFilter(AbstractColumnFilter): + + def __init__(self, column_name, values): + super().__init__(column_name) + self._search_variants = ['checkbox'] + self._values = values + if self._values and \ + list(self._values.keys())[0] == list(self._values.values())[0]: + self._search_variants.append('text') + + def values(self): + return {k: v for k, v in self._values.items()} + + def prepare_match_part(self, values): + return {'$in': list(values.keys())} + + +class RegexTextFilter(AbstractColumnFilter): + + def __init__(self, column_name): + super().__init__(column_name) + self._search_variants = ['text'] + + def values(self): + return {} + + def prepare_match_part(self, values): + """ values = {'text1 text2': 'text1 text2'} """ + if not values: + return {} + + regex_strs = set() + text = list(values.keys())[0] # only single key always expected + for word in text.split(): + regex_strs.add('.*{}.*'.format(word)) + + return {"$regex": "|".join(regex_strs), + "$options": 'i'} + + +class MultiSelectFilter(AbstractColumnFilter): + + def __init__(self, column_name, values=None, dbcon=None): + super().__init__(column_name) + self._values = values + self.dbcon = dbcon + self._search_variants = ['checkbox'] + + def values(self): + if self._values: + return {k: v for k, v in self._values.items()} + + recs = self.dbcon.find({'type': self.column_name}, {"name": 1, + "_id": -1}) + values = {} + for item in recs: + values[item["name"]] = item["name"] + return dict(sorted(values.items(), key=lambda it: it[1])) + + def prepare_match_part(self, values): + return {'$in': list(values.keys())} + + +@attr.s +class FilterDefinition: + type = attr.ib() + values = attr.ib(factory=list) def pretty_size(value, suffix='B'): @@ -49,4 +158,10 @@ def translate_provider_for_icon(sync_server, project, site): """ if site == sync_server.DEFAULT_SITE: return sync_server.DEFAULT_SITE - return sync_server.get_provider_for_site(project, site) + return sync_server.get_provider_for_site(site=site) + + +def get_item_by_id(model, object_id): + index = model.get_index(object_id) + item = model.data(index, FullItemRole) + return item diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 3cc53c6ec4..efef039b8b 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -6,8 +6,10 @@ from Qt import QtCore from Qt.QtCore import Qt from avalon.tools.delegates import pretty_timestamp +from avalon.vendor import qtawesome from openpype.lib import PypeLogger +from openpype.api import get_local_site_id from openpype.modules.sync_server.tray import lib @@ -41,6 +43,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): PAGE_SIZE = 20 # default page size to query for REFRESH_SEC = 5000 # in seconds, requery DB for new status + refresh_started = QtCore.Signal() + refresh_finished = QtCore.Signal() + @property def dbcon(self): """ @@ -56,17 +61,52 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): """Returns project""" return self._project + @property + def column_filtering(self): + return self._column_filtering + + @property + def is_running(self): + return self._is_running + + @is_running.setter + def is_running(self, state): + self._is_running = state + def rowCount(self, _index): return len(self._data) - def columnCount(self, _index): + def columnCount(self, _index=None): return len(self._header) - def headerData(self, section, orientation, role): + def headerData(self, section, orientation, role=Qt.DisplayRole): + if section >= len(self.COLUMN_LABELS): + return + if role == Qt.DisplayRole: if orientation == Qt.Horizontal: return self.COLUMN_LABELS[section][1] + if role == lib.HeaderNameRole: + if orientation == Qt.Horizontal: + return self.COLUMN_LABELS[section][0] # return name + + @property + def can_edit(self): + """Returns true if some site is user local site, eg. could edit""" + return get_local_site_id() in (self.active_site, self.remote_site) + + def get_column(self, index): + """ + Returns info about column + + Args: + index (QModelIndex) + Returns: + (tuple): (COLUMN_NAME: COLUMN_LABEL) + """ + return self.COLUMN_LABELS[index] + def get_header_index(self, value): """ Returns index of 'value' in headers @@ -94,8 +134,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): actually queried (scrolled a couple of times to list more than single page of records) """ - if self.sync_server.is_paused() or \ - self.sync_server.is_project_paused(self.project): + if self.is_editing or not self.is_running: return self.refresh_started.emit() self.beginResetModel() @@ -103,10 +142,10 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): self._rec_loaded = 0 if not representations: - self.query = self.get_default_query(load_records) + self.query = self.get_query(load_records) representations = self.dbcon.aggregate(self.query) - self.add_page_records(self.local_site, self.remote_site, + self.add_page_records(self.active_site, self.remote_site, representations) self.endResetModel() self.refresh_finished.emit() @@ -138,13 +177,13 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): log.debug("fetchMore") items_to_fetch = min(self._total_records - self._rec_loaded, self.PAGE_SIZE) - self.query = self.get_default_query(self._rec_loaded) + self.query = self.get_query(self._rec_loaded) representations = self.dbcon.aggregate(self.query) self.beginInsertRows(index, self._rec_loaded, self._rec_loaded + items_to_fetch - 1) - self.add_page_records(self.local_site, self.remote_site, + self.add_page_records(self.active_site, self.remote_site, representations) self.endInsertRows() @@ -156,6 +195,8 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): Sort is happening on a DB side, model is reset, db queried again. + It remembers one last sort, adds it as secondary after new sort. + Args: index (int): column index order (int): 0| @@ -170,8 +211,18 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): else: order = -1 - self.sort = {self.SORT_BY_COLUMN[index]: order, '_id': 1} - self.query = self.get_default_query() + backup_sort = dict(self.sort) + + self.sort = {self.SORT_BY_COLUMN[index]: order} # reset + # add last one + for key, val in backup_sort.items(): + if key != '_id' and key != self.SORT_BY_COLUMN[index]: + self.sort[key] = val + break + # add default one + self.sort['_id'] = 1 + + self.query = self.get_query() # import json # log.debug(json.dumps(self.query, indent=4).\ # replace('False', 'false').\ @@ -180,16 +231,86 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): representations = self.dbcon.aggregate(self.query) self.refresh(representations) - def set_filter(self, word_filter): + def set_word_filter(self, word_filter): """ Adds text value filtering Args: word_filter (str): string inputted by user """ - self.word_filter = word_filter + self._word_filter = word_filter self.refresh() + def get_filters(self): + """ + Returns all available filter editors per column_name keys. + """ + filters = {} + for column_name, _ in self.COLUMN_LABELS: + filter_rec = self.COLUMN_FILTERS.get(column_name) + if filter_rec: + filter_rec.dbcon = self.dbcon + filters[column_name] = filter_rec + + return filters + + def get_column_filter(self, index): + """ + Returns filter object for column 'index + + Args: + index(int): index of column in header + + Returns: + (AbstractColumnFilter) + """ + column_name = self._header[index] + + filter_rec = self.COLUMN_FILTERS.get(column_name) + if filter_rec: + filter_rec.dbcon = self.dbcon # up-to-date db connection + + return filter_rec + + def set_column_filtering(self, checked_values): + """ + Sets dictionary used in '$match' part of MongoDB aggregate + + Args: + checked_values(dict): key:values ({'status':{1:"Foo",3:"Bar"}} + + Modifies: + self._column_filtering : {'status': {'$in': [1, 2, 3]}} + """ + filtering = {} + for column_name, dict_value in checked_values.items(): + column_f = self.COLUMN_FILTERS.get(column_name) + if not column_f: + continue + column_f.dbcon = self.dbcon + filtering[column_name] = column_f.prepare_match_part(dict_value) + + self._column_filtering = filtering + + def get_column_filter_values(self, index): + """ + Returns list of available values for filtering in the column + + Args: + index(int): index of column in header + + Returns: + (dict) of value: label shown in filtering menu + 'value' is used in MongoDB query, 'label' is human readable for + menu + for some columns ('subset') might be 'value' and 'label' same + """ + filter_rec = self.get_column_filter(index) + if not filter_rec: + return {} + + return filter_rec.values() + def set_project(self, project): """ Changes project, called after project selection is changed @@ -199,7 +320,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): """ self._project = project self.sync_server.set_sync_project_settings() - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) self.refresh() @@ -251,7 +372,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): ("files_count", "Files"), ("files_size", "Size"), ("priority", "Priority"), - ("state", "Status") + ("status", "Status") ] DEFAULT_SORT = { @@ -259,18 +380,27 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): "_id": 1 } SORT_BY_COLUMN = [ - "context.asset", # asset - "context.subset", # subset - "context.version", # version - "context.representation", # representation + "asset", # asset + "subset", # subset + "version", # version + "representation", # representation "updated_dt_local", # local created_dt "updated_dt_remote", # remote created_dt "files_count", # count of files "files_size", # file size of all files - "context.asset", # priority TODO - "status" # state + "priority", # priority + "status" # status ] + COLUMN_FILTERS = { + 'status': lib.PredefinedSetFilter('status', lib.STATUS), + 'subset': lib.RegexTextFilter('subset'), + 'asset': lib.RegexTextFilter('asset'), + 'representation': lib.MultiSelectFilter('representation') + } + + EDITABLE_COLUMNS = ["priority"] + refresh_started = QtCore.Signal() refresh_finished = QtCore.Signal() @@ -297,34 +427,38 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): files_count = attr.ib(default=None) files_size = attr.ib(default=None) priority = attr.ib(default=None) - state = attr.ib(default=None) + status = attr.ib(default=None) path = attr.ib(default=None) - def __init__(self, sync_server, header, project=None): - super(SyncRepresentationSummaryModel, self).__init__() + def __init__(self, sync_server, header, project=None, parent=None): + super(SyncRepresentationSummaryModel, self).__init__(parent=parent) self._header = header self._data = [] self._project = project self._rec_loaded = 0 self._total_records = 0 # how many documents query actually found - self.word_filter = None + self._word_filter = None + self._column_filtering = {} + self._is_running = False + + self.edit_icon = qtawesome.icon("fa.edit", color="white") + self.is_editing = False + + self._word_filter = None - self._initialized = False if not self._project or self._project == lib.DUMMY_PROJECT: return self.sync_server = sync_server # TODO think about admin mode # this is for regular user, always only single local and single remote - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) - self.projection = self.get_default_projection() - self.sort = self.DEFAULT_SORT - self.query = self.get_default_query() - self.default_query = list(self.get_default_query()) + self.query = self.get_query() + self.default_query = list(self.get_query()) representations = self.dbcon.aggregate(self.query) self.refresh(representations) @@ -336,6 +470,9 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): def data(self, index, role): item = self._data[index.row()] + if role == lib.FullItemRole: + return item + header_value = self._header[index.column()] if role == lib.ProviderRole: if header_value == 'local_site': @@ -359,16 +496,23 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if role == lib.FailedRole: if header_value == 'local_site': - return item.state == lib.STATUS[2] and item.local_progress < 1 + return item.status == lib.STATUS[2] and \ + item.local_progress < 1 if header_value == 'remote_site': - return item.state == lib.STATUS[2] and item.remote_progress < 1 + return item.status == lib.STATUS[2] and \ + item.remote_progress < 1 - if role == Qt.DisplayRole: + if role in (Qt.DisplayRole, Qt.EditRole): # because of ImageDelegate if header_value in ['remote_site', 'local_site']: return "" return attr.asdict(item)[self._header[index.column()]] + + if role == lib.EditIconRole: + if self.can_edit and header_value in self.EDITABLE_COLUMNS: + return self.edit_icon + if role == Qt.UserRole: return item._id @@ -397,7 +541,6 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): remote_site) for repre in result.get("paginatedResults"): - context = repre.get("context").pop() files = repre.get("files", []) if isinstance(files, dict): # aggregate returns dictionary files = [files] @@ -420,17 +563,17 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): avg_progress_local = lib.convert_progress( repre.get('avg_progress_local', '0')) - if context.get("version"): - version = "v{:0>3d}".format(context.get("version")) + if repre.get("version"): + version = "v{:0>3d}".format(repre.get("version")) else: version = "master" item = self.SyncRepresentation( repre.get("_id"), - context.get("asset"), - context.get("subset"), + repre.get("asset"), + repre.get("subset"), version, - context.get("representation"), + repre.get("representation"), local_updated, remote_updated, local_site, @@ -441,7 +584,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): avg_progress_remote, repre.get("files_count", 1), lib.pretty_size(repre.get("files_size", 0)), - 1, + repre.get("priority"), lib.STATUS[repre.get("status", -1)], files[0].get('path') ) @@ -449,7 +592,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self._data.append(item) self._rec_loaded += 1 - def get_default_query(self, limit=0): + def get_query(self, limit=0): """ Returns basic aggregate query for main table. @@ -461,7 +604,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): 'sync_dt' - same for remote side 'local_site' - progress of repr on local side, 1 = finished 'remote_site' - progress on remote side, calculates from files - 'state' - + 'status' - 0 - in progress 1 - failed 2 - queued @@ -481,7 +624,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE - return [ + aggr = [ {"$match": self.get_match_part()}, {'$unwind': '$files'}, # merge potentially unwinded records back to single per repre @@ -492,7 +635,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): }}, 'order_local': { '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', self.local_site]} + 'cond': {'$eq': ['$$p.name', self.active_site]} }} }}, {'$addFields': { @@ -560,6 +703,16 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): '$cond': [{'$size': "$order_local.paused"}, 1, 0]}, + 'priority': { + '$cond': [ + {'$size': '$order_local.priority'}, + {'$first': '$order_local.priority'}, + {'$cond': [ + {'$size': '$order_remote.priority'}, + {'$first': '$order_remote.priority'}, + self.sync_server.DEFAULT_PRIORITY]} + ] + }, }}, {'$group': { '_id': '$_id', @@ -582,18 +735,29 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): 'failed_local_tries': {'$sum': '$failed_local_tries'}, 'paused_remote': {'$sum': '$paused_remote'}, 'paused_local': {'$sum': '$paused_local'}, - 'updated_dt_local': {'$max': "$updated_dt_local"} + 'updated_dt_local': {'$max': "$updated_dt_local"}, + 'priority': {'$max': "$priority"}, }}, - {"$project": self.projection}, - {"$sort": self.sort}, - { + {"$project": self.projection} + ] + + if self.column_filtering: + aggr.append( + {"$match": self.column_filtering} + ) + + aggr.extend( + [{"$sort": self.sort}, + { '$facet': { 'paginatedResults': [{'$skip': self._rec_loaded}, {'$limit': limit}], 'totalCount': [{'$count': 'count'}] } - } - ] + }] + ) + + return aggr def get_match_part(self): """ @@ -611,25 +775,26 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): """ base_match = { "type": "representation", - 'files.sites.name': {'$all': [self.local_site, + 'files.sites.name': {'$all': [self.active_site, self.remote_site]} } - if not self.word_filter: + if not self._word_filter: return base_match else: - regex_str = '.*{}.*'.format(self.word_filter) + regex_str = '.*{}.*'.format(self._word_filter) base_match['$or'] = [ {'context.subset': {'$regex': regex_str, '$options': 'i'}}, {'context.asset': {'$regex': regex_str, '$options': 'i'}}, {'context.representation': {'$regex': regex_str, '$options': 'i'}}] - if ObjectId.is_valid(self.word_filter): - base_match['$or'] = [{'_id': ObjectId(self.word_filter)}] + if ObjectId.is_valid(self._word_filter): + base_match['$or'] = [{'_id': ObjectId(self._word_filter)}] return base_match - def get_default_projection(self): + @property + def projection(self): """ Projection part for aggregate query. @@ -639,10 +804,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): (dict) """ return { - "context.subset": 1, - "context.asset": 1, - "context.version": 1, - "context.representation": 1, + "subset": {"$first": "$context.subset"}, + "asset": {"$first": "$context.asset"}, + "version": {"$first": "$context.version"}, + "representation": {"$first": "$context.representation"}, "data.path": 1, "files": 1, 'files_count': 1, @@ -653,6 +818,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): 'updated_dt_local': 1, 'paused_remote': 1, 'paused_local': 1, + 'priority': 1, 'status': { '$switch': { 'branches': [ @@ -699,6 +865,35 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): } } + def set_priority_data(self, index, value): + """ + Sets 'priority' flag and value on local site for selected reprs. + + Args: + index (QItemIndex): selected index from View + value (int): priority value + + Updates DB. + Potentially should allow set priority to any site when user + management is implemented. + """ + if not self.can_edit: + return + + repre_id = self.data(index, Qt.UserRole) + + representation = list(self.dbcon.find({"type": "representation", + "_id": repre_id})) + if representation: + self.sync_server.update_db(self.project, None, None, + representation.pop(), + get_local_site_id(), + priority=value) + self.is_editing = False + + # all other approaches messed up selection to 0th index + self.timer.setInterval(0) + class SyncRepresentationDetailModel(_SyncRepresentationModel): """ @@ -721,7 +916,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): ("remote_site", "Remote site"), ("files_size", "Size"), ("priority", "Priority"), - ("state", "Status") + ("status", "Status") ] PAGE_SIZE = 30 @@ -733,12 +928,16 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): "updated_dt_local", # local created_dt "updated_dt_remote", # remote created_dt "size", # remote progress - "context.asset", # priority TODO - "status" # state + "priority", # priority + "status" # status ] - refresh_started = QtCore.Signal() - refresh_finished = QtCore.Signal() + COLUMN_FILTERS = { + 'status': lib.PredefinedSetFilter('status', lib.STATUS), + 'file': lib.RegexTextFilter('file'), + } + + EDITABLE_COLUMNS = ["priority"] @attr.s class SyncRepresentationDetail: @@ -759,7 +958,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): remote_progress = attr.ib(default=None) size = attr.ib(default=None) priority = attr.ib(default=None) - state = attr.ib(default=None) + status = attr.ib(default=None) tries = attr.ib(default=None) error = attr.ib(default=None) path = attr.ib(default=None) @@ -772,22 +971,23 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self._project = project self._rec_loaded = 0 self._total_records = 0 # how many documents query actually found - self.word_filter = None + self._word_filter = None self._id = _id - self._initialized = False + self._column_filtering = {} + self._is_running = False + + self.is_editing = False + self.edit_icon = qtawesome.icon("fa.edit", color="white") self.sync_server = sync_server # TODO think about admin mode # this is for regular user, always only single local and single remote - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) self.sort = self.DEFAULT_SORT - # in case we would like to hide/show some columns - self.projection = self.get_default_projection() - - self.query = self.get_default_query() + self.query = self.get_query() representations = self.dbcon.aggregate(self.query) self.refresh(representations) @@ -798,6 +998,9 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): def data(self, index, role): item = self._data[index.row()] + if role == lib.FullItemRole: + return item + header_value = self._header[index.column()] if role == lib.ProviderRole: if header_value == 'local_site': @@ -821,15 +1024,23 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if role == lib.FailedRole: if header_value == 'local_site': - return item.state == lib.STATUS[2] and item.local_progress < 1 + return item.status == lib.STATUS[2] and \ + item.local_progress < 1 if header_value == 'remote_site': - return item.state == lib.STATUS[2] and item.remote_progress < 1 + return item.status == lib.STATUS[2] and \ + item.remote_progress < 1 - if role == Qt.DisplayRole: + if role in (Qt.DisplayRole, Qt.EditRole): # because of ImageDelegate if header_value in ['remote_site', 'local_site']: return "" + return attr.asdict(item)[self._header[index.column()]] + + if role == lib.EditIconRole: + if self.can_edit and header_value in self.EDITABLE_COLUMNS: + return self.edit_icon + if role == Qt.UserRole: return item._id @@ -899,7 +1110,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): local_progress, remote_progress, lib.pretty_size(file.get('size', 0)), - 1, + repre.get("priority"), lib.STATUS[repre.get("status", -1)], repre.get("tries"), '\n'.join(errors), @@ -909,7 +1120,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self._data.append(item) self._rec_loaded += 1 - def get_default_query(self, limit=0): + def get_query(self, limit=0): """ Gets query that gets used when no extra sorting, filtering or projecting is needed. @@ -923,7 +1134,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE - return [ + aggr = [ {"$match": self.get_match_part()}, {"$unwind": "$files"}, {'$addFields': { @@ -933,7 +1144,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): }}, 'order_local': { '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', self.local_site]} + 'cond': {'$eq': ['$$p.name', self.active_site]} }} }}, {'$addFields': { @@ -1017,9 +1228,28 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): "$order_remote.tries", [] ]} - ]}} + ]}}, + 'priority': { + '$cond': [ + {'$size': '$order_local.priority'}, + {'$first': '$order_local.priority'}, + {'$cond': [ + {'$size': '$order_remote.priority'}, + {'$first': '$order_remote.priority'}, + self.sync_server.DEFAULT_PRIORITY]} + ] + }, }}, - {"$project": self.projection}, + {"$project": self.projection} + ] + + if self.column_filtering: + aggr.append( + {"$match": self.column_filtering} + ) + print(self.column_filtering) + + aggr.extend([ {"$sort": self.sort}, { '$facet': { @@ -1028,7 +1258,9 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): 'totalCount': [{'$count': 'count'}] } } - ] + ]) + + return aggr def get_match_part(self): """ @@ -1038,20 +1270,21 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): Returns: (dict) """ - if not self.word_filter: + if not self._word_filter: return { "type": "representation", "_id": self._id } else: - regex_str = '.*{}.*'.format(self.word_filter) + regex_str = '.*{}.*'.format(self._word_filter) return { "type": "representation", "_id": self._id, '$or': [{'files.path': {'$regex': regex_str, '$options': 'i'}}] } - def get_default_projection(self): + @property + def projection(self): """ Projection part for aggregate query. @@ -1071,6 +1304,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): 'failed_remote_error': 1, 'failed_local_error': 1, 'tries': 1, + 'priority': 1, 'status': { '$switch': { 'branches': [ @@ -1122,3 +1356,37 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): }, 'data.path': 1 } + + def set_priority_data(self, index, value): + """ + Sets 'priority' flag and value on local site for selected reprs. + + Args: + index (QItemIndex): selected index from View + value (int): priority value + + Updates DB + """ + if not self.can_edit: + return + + file_id = self.data(index, Qt.UserRole) + + updated_file = None + # conversion from cursor to list + representations = list(self.dbcon.find({"type": "representation", + "_id": self._id})) + + representation = representations.pop() + for repre_file in representation["files"]: + if repre_file["_id"] == file_id: + updated_file = repre_file + break + + if representation and updated_file: + self.sync_server.update_db(self.project, None, updated_file, + representation, get_local_site_id(), + priority=value) + self.is_editing = False + # all other approaches messed up selection to 0th index + self.timer.setInterval(0) diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py index 5071ffa2b0..eae912206e 100644 --- a/openpype/modules/sync_server/tray/widgets.py +++ b/openpype/modules/sync_server/tray/widgets.py @@ -1,6 +1,7 @@ import os import subprocess import sys +from functools import partial from Qt import QtWidgets, QtCore, QtGui from Qt.QtCore import Qt @@ -14,6 +15,7 @@ from openpype.api import get_local_site_id from openpype.lib import PypeLogger from avalon.tools.delegates import pretty_timestamp +from avalon.vendor import qtawesome from openpype.modules.sync_server.tray.models import ( SyncRepresentationSummaryModel, @@ -21,6 +23,7 @@ from openpype.modules.sync_server.tray.models import ( ) from openpype.modules.sync_server.tray import lib +from openpype.modules.sync_server.tray import delegates log = PypeLogger().get_logger("SyncServer") @@ -40,6 +43,8 @@ class SyncProjectListWidget(ProjectListWidget): self.local_site = None self.icons = {} + self.layout().setContentsMargins(0, 0, 0, 0) + def validate_context_change(self): return True @@ -90,17 +95,19 @@ class SyncProjectListWidget(ProjectListWidget): self.project_name = point_index.data(QtCore.Qt.DisplayRole) - menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) + menu = QtWidgets.QMenu(self) actions_mapping = {} - if self.sync_server.is_project_paused(self.project_name): - action = QtWidgets.QAction("Unpause") - actions_mapping[action] = self._unpause - else: - action = QtWidgets.QAction("Pause") - actions_mapping[action] = self._pause - menu.addAction(action) + can_edit = self.model.can_edit + + if can_edit: + if self.sync_server.is_project_paused(self.project_name): + action = QtWidgets.QAction("Unpause") + actions_mapping[action] = self._unpause + else: + action = QtWidgets.QAction("Pause") + actions_mapping[action] = self._pause + menu.addAction(action) if self.local_site == get_local_site_id(): action = QtWidgets.QAction("Clear local project") @@ -132,7 +139,7 @@ class SyncProjectListWidget(ProjectListWidget): self.refresh() -class SyncRepresentationWidget(QtWidgets.QWidget): +class _SyncRepresentationWidget(QtWidgets.QWidget): """ Summary dialog with list of representations that matches current settings 'local_site' and 'remote_site'. @@ -140,87 +147,12 @@ class SyncRepresentationWidget(QtWidgets.QWidget): active_changed = QtCore.Signal() # active index changed message_generated = QtCore.Signal(str) - default_widths = ( - ("asset", 220), - ("subset", 190), - ("version", 55), - ("representation", 95), - ("local_site", 170), - ("remote_site", 170), - ("files_count", 50), - ("files_size", 60), - ("priority", 50), - ("state", 110) - ) + def _selection_changed(self, _new_selected, _all_selected): + idxs = self.selection_model.selectedRows() + self._selected_ids = set() - def __init__(self, sync_server, project=None, parent=None): - super(SyncRepresentationWidget, self).__init__(parent) - - self.sync_server = sync_server - - self._selected_id = None # keep last selected _id - self.representation_id = None - self.site_name = None # to pause/unpause representation - - self.filter = QtWidgets.QLineEdit() - self.filter.setPlaceholderText("Filter representations..") - - self._scrollbar_pos = None - - top_bar_layout = QtWidgets.QHBoxLayout() - top_bar_layout.addWidget(self.filter) - - self.table_view = QtWidgets.QTableView() - headers = [item[0] for item in self.default_widths] - - model = SyncRepresentationSummaryModel(sync_server, headers, project) - self.table_view.setModel(model) - self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.table_view.setSelectionMode( - QtWidgets.QAbstractItemView.SingleSelection) - self.table_view.setSelectionBehavior( - QtWidgets.QAbstractItemView.SelectRows) - self.table_view.horizontalHeader().setSortIndicator( - -1, Qt.AscendingOrder) - self.table_view.setSortingEnabled(True) - self.table_view.horizontalHeader().setSortIndicatorShown(True) - self.table_view.setAlternatingRowColors(True) - self.table_view.verticalHeader().hide() - - column = self.table_view.model().get_header_index("local_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) - - column = self.table_view.model().get_header_index("remote_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) - - for column_name, width in self.default_widths: - idx = model.get_header_index(column_name) - self.table_view.setColumnWidth(idx, width) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addLayout(top_bar_layout) - layout.addWidget(self.table_view) - - self.table_view.doubleClicked.connect(self._double_clicked) - self.filter.textChanged.connect(lambda: model.set_filter( - self.filter.text())) - self.table_view.customContextMenuRequested.connect( - self._on_context_menu) - - model.refresh_started.connect(self._save_scrollbar) - model.refresh_finished.connect(self._set_scrollbar) - self.table_view.model().modelReset.connect(self._set_selection) - - self.selection_model = self.table_view.selectionModel() - self.selection_model.selectionChanged.connect(self._selection_changed) - - def _selection_changed(self, _new_selection): - index = self.selection_model.currentIndex() - self._selected_id = \ - self.table_view.model().data(index, Qt.UserRole) + for index in idxs: + self._selected_ids.add(self.model.data(index, Qt.UserRole)) def _set_selection(self): """ @@ -228,151 +160,185 @@ class SyncRepresentationWidget(QtWidgets.QWidget): Keep selection during model refresh. """ - if self._selected_id: - index = self.table_view.model().get_index(self._selected_id) + existing_ids = set() + for selected_id in self._selected_ids: + index = self.model.get_index(selected_id) if index and index.isValid(): mode = QtCore.QItemSelectionModel.Select | \ QtCore.QItemSelectionModel.Rows - self.selection_model.setCurrentIndex(index, mode) - else: - self._selected_id = None + self.selection_model.select(index, mode) + existing_ids.add(selected_id) + + self._selected_ids = existing_ids def _double_clicked(self, index): """ Opens representation dialog with all files after doubleclick """ - _id = self.table_view.model().data(index, Qt.UserRole) - detail_window = SyncServerDetailWindow( - self.sync_server, _id, self.table_view.model().project) - detail_window.exec() + # priority editing + if self.model.can_edit: + column_name = self.model.get_column(index.column()) + if column_name[0] in self.model.EDITABLE_COLUMNS: + self.model.is_editing = True + self.table_view.openPersistentEditor(index) + return + _id = self.model.data(index, Qt.UserRole) + detail_window = SyncServerDetailWindow( + self.sync_server, _id, self.model.project, parent=self) + detail_window.exec() + def _on_context_menu(self, point): """ Shows menu with loader actions on Right-click. + + Supports multiple selects - adds all available actions, each + action handles if it appropriate for item itself, if not it skips. """ + is_multi = len(self._selected_ids) > 1 point_index = self.table_view.indexAt(point) - if not point_index.isValid(): + if not point_index.isValid() and not is_multi: return - self.item = self.table_view.model()._data[point_index.row()] - self.representation_id = self.item._id - log.debug("menu representation _id:: {}". - format(self.representation_id)) + if is_multi: + index = self.model.get_index(list(self._selected_ids)[0]) + item = self.model.data(index, lib.FullItemRole) + else: + item = self.model.data(point_index, lib.FullItemRole) + + can_edit = self.model.can_edit + action_kwarg_map, actions_mapping, menu = self._prepare_menu(item, + is_multi, + can_edit) + + result = menu.exec_(QtGui.QCursor.pos()) + if result: + to_run = actions_mapping[result] + to_run_kwargs = action_kwarg_map.get(result, {}) + if to_run: + to_run(**to_run_kwargs) + + self.model.refresh() + + def _prepare_menu(self, item, is_multi, can_edit): + menu = QtWidgets.QMenu(self) - menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) actions_mapping = {} - actions_kwargs_mapping = {} + action_kwarg_map = {} - local_site = self.item.local_site - local_progress = self.item.local_progress - remote_site = self.item.remote_site - remote_progress = self.item.remote_progress + active_site = self.model.active_site + remote_site = self.model.remote_site - for site, progress in {local_site: local_progress, + local_progress = item.local_progress + remote_progress = item.remote_progress + + project = self.model.project + + for site, progress in {active_site: local_progress, remote_site: remote_progress}.items(): - project = self.table_view.model().project - provider = self.sync_server.get_provider_for_site(project, - site) + provider = self.sync_server.get_provider_for_site(site=site) if provider == 'local_drive': if 'studio' in site: txt = " studio version" else: txt = " local version" action = QtWidgets.QAction("Open in explorer" + txt) - if progress == 1.0: + if progress == 1.0 or is_multi: actions_mapping[action] = self._open_in_explorer - actions_kwargs_mapping[action] = {'site': site} + action_kwarg_map[action] = \ + self._get_action_kwargs(site) menu.addAction(action) - # progress smaller then 1.0 --> in progress or queued - if local_progress < 1.0: - self.site_name = local_site - else: - self.site_name = remote_site - - if self.item.state in [lib.STATUS[0], lib.STATUS[1]]: - action = QtWidgets.QAction("Pause") - actions_mapping[action] = self._pause - menu.addAction(action) - - if self.item.state == lib.STATUS[3]: - action = QtWidgets.QAction("Unpause") - actions_mapping[action] = self._unpause - menu.addAction(action) - - # if self.item.state == lib.STATUS[1]: - # action = QtWidgets.QAction("Open error detail") - # actions_mapping[action] = self._show_detail - # menu.addAction(action) - - if remote_progress == 1.0: + if can_edit and (remote_progress == 1.0 or is_multi): action = QtWidgets.QAction("Re-sync Active site") - actions_mapping[action] = self._reset_local_site + action_kwarg_map[action] = self._get_action_kwargs(active_site) + actions_mapping[action] = self._reset_site menu.addAction(action) - if local_progress == 1.0: + if can_edit and (local_progress == 1.0 or is_multi): action = QtWidgets.QAction("Re-sync Remote site") - actions_mapping[action] = self._reset_remote_site + action_kwarg_map[action] = self._get_action_kwargs(remote_site) + actions_mapping[action] = self._reset_site menu.addAction(action) - if local_site != self.sync_server.DEFAULT_SITE: + if can_edit and active_site == get_local_site_id(): action = QtWidgets.QAction("Completely remove from local") + action_kwarg_map[action] = self._get_action_kwargs(active_site) actions_mapping[action] = self._remove_site menu.addAction(action) - else: - action = QtWidgets.QAction("Mark for sync to local") - actions_mapping[action] = self._add_site + + if can_edit: + action = QtWidgets.QAction("Change priority") + action_kwarg_map[action] = self._get_action_kwargs(active_site) + actions_mapping[action] = self._change_priority menu.addAction(action) + # # temp for testing only !!! + # action = QtWidgets.QAction("Download") + # action_kwarg_map[action] = self._get_action_kwargs(active_site) + # actions_mapping[action] = self._add_site + # menu.addAction(action) + if not actions_mapping: action = QtWidgets.QAction("< No action >") actions_mapping[action] = None menu.addAction(action) - result = menu.exec_(QtGui.QCursor.pos()) - if result: - to_run = actions_mapping[result] - to_run_kwargs = actions_kwargs_mapping.get(result, {}) - if to_run: - to_run(**to_run_kwargs) + return action_kwarg_map, actions_mapping, menu - self.table_view.model().refresh() + def _pause(self, selected_ids=None): + log.debug("Pause {}".format(selected_ids)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.status not in [lib.STATUS[0], lib.STATUS[1]]: + continue + for site_name in [self.model.active_site, self.model.remote_site]: + check_progress = self._get_progress(item, site_name) + if check_progress < 1: + self.sync_server.pause_representation(self.model.project, + representation_id, + site_name) - def _pause(self): - self.sync_server.pause_representation(self.table_view.model().project, - self.representation_id, - self.site_name) - self.site_name = None - self.message_generated.emit("Paused {}".format(self.representation_id)) + self.message_generated.emit("Paused {}".format(representation_id)) - def _unpause(self): - self.sync_server.unpause_representation( - self.table_view.model().project, - self.representation_id, - self.site_name) - self.site_name = None - self.message_generated.emit("Unpaused {}".format( - self.representation_id)) + def _unpause(self, selected_ids=None): + log.debug("UnPause {}".format(selected_ids)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.status not in lib.STATUS[3]: + continue + for site_name in [self.model.active_site, self.model.remote_site]: + check_progress = self._get_progress(item, site_name) + if check_progress < 1: + self.sync_server.unpause_representation( + self.model.project, + representation_id, + site_name) + + self.message_generated.emit("Unpause {}".format(representation_id)) # temporary here for testing, will be removed TODO - def _add_site(self): - log.info(self.representation_id) - project_name = self.table_view.model().project - local_site_name = get_local_site_id() - try: - self.sync_server.add_site( - project_name, - self.representation_id, - local_site_name - ) - self.message_generated.emit( - "Site {} added for {}".format(local_site_name, - self.representation_id)) - except ValueError as exp: - self.message_generated.emit("Error {}".format(str(exp))) + def _add_site(self, selected_ids=None, site_name=None): + log.debug("Add site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.local_site == site_name or item.remote_site == site_name: + # site already exists skip + continue - def _remove_site(self): + try: + self.sync_server.add_site( + self.model.project, + representation_id, + site_name) + self.message_generated.emit( + "Site {} added for {}".format(site_name, + representation_id)) + except ValueError as exp: + self.message_generated.emit("Error {}".format(str(exp))) + self.sync_server.reset_timer() + + def _remove_site(self, selected_ids=None, site_name=None): """ Removes site record AND files. @@ -382,65 +348,100 @@ class SyncRepresentationWidget(QtWidgets.QWidget): This could only happen when artist work on local machine, not connected to studio mounted drives. """ - log.info("Removing {}".format(self.representation_id)) - try: - local_site = get_local_site_id() - self.sync_server.remove_site( - self.table_view.model().project, - self.representation_id, - local_site, - True) - self.message_generated.emit("Site {} removed".format(local_site)) - except ValueError as exp: - self.message_generated.emit("Error {}".format(str(exp))) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + log.debug("Remove site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + log.info("Removing {}".format(representation_id)) + try: + self.sync_server.remove_site( + self.model.project, + representation_id, + site_name, + True) + self.message_generated.emit( + "Site {} removed".format(site_name)) + except ValueError as exp: + self.message_generated.emit("Error {}".format(str(exp))) - def _reset_local_site(self): + self.model.refresh( + load_records=self.model._rec_loaded) + self.sync_server.reset_timer() + + def _reset_site(self, selected_ids=None, site_name=None): """ Removes errors or success metadata for particular file >> forces redo of upload/download """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'local') - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + log.debug("Reset site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + check_progress = self._get_progress(item, site_name, True) - def _reset_remote_site(self): - """ - Removes errors or success metadata for particular file >> forces - redo of upload/download - """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'remote') - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + # do not reset if opposite side is not fully there + if check_progress != 1: + log.debug("Not fully available {} on other side, skipping". + format(check_progress)) + continue - def _open_in_explorer(self, site): - if not self.item: - return + self.sync_server.reset_provider_for_file( + self.model.project, + representation_id, + site_name=site_name, + force=True) - fpath = self.item.path - project = self.table_view.model().project - fpath = self.sync_server.get_local_file_path(project, - site, - fpath) + self.model.refresh( + load_records=self.model._rec_loaded) + self.sync_server.reset_timer() - fpath = os.path.normpath(os.path.dirname(fpath)) - if os.path.isdir(fpath): - if 'win' in sys.platform: # windows - subprocess.Popen('explorer "%s"' % fpath) - elif sys.platform == 'darwin': # macOS - subprocess.Popen(['open', fpath]) - else: # linux - try: - subprocess.Popen(['xdg-open', fpath]) - except OSError: - raise OSError('unsupported xdg-open call??') + def _open_in_explorer(self, selected_ids=None, site_name=None): + log.debug("Open in Explorer {}:{}".format(selected_ids, site_name)) + for selected_id in selected_ids: + item = lib.get_item_by_id(self.model, selected_id) + if not item: + return + + fpath = item.path + project = self.model.project + fpath = self.sync_server.get_local_file_path(project, + site_name, + fpath) + + fpath = os.path.normpath(os.path.dirname(fpath)) + if os.path.isdir(fpath): + if 'win' in sys.platform: # windows + subprocess.Popen('explorer "%s"' % fpath) + elif sys.platform == 'darwin': # macOS + subprocess.Popen(['open', fpath]) + else: # linux + try: + subprocess.Popen(['xdg-open', fpath]) + except OSError: + raise OSError('unsupported xdg-open call??') + + def _change_priority(self, **kwargs): + """Open editor to change priority on first selected row""" + if self._selected_ids: + # get_index returns dummy index with column equals to 0 + index = self.model.get_index(list(self._selected_ids)[0]) + column_no = self.model.get_header_index("priority") # real column + real_index = self.model.index(index.row(), column_no) + self.model.is_editing = True + self.table_view.openPersistentEditor(real_index) + + def _get_progress(self, item, site_name, opposite=False): + """Returns progress value according to site (side)""" + progress = {'local': item.local_progress, + 'remote': item.remote_progress} + side = 'remote' + if site_name == self.model.active_site: + side = 'local' + if opposite: + side = 'remote' if side == 'local' else 'local' + + return progress[side] + + def _get_action_kwargs(self, site_name): + """Default format of kwargs for action""" + return {"selected_ids": self._selected_ids, "site_name": site_name} def _save_scrollbar(self): self._scrollbar_pos = self.table_view.verticalScrollBar().value() @@ -450,312 +451,125 @@ class SyncRepresentationWidget(QtWidgets.QWidget): self.table_view.verticalScrollBar().setValue(self._scrollbar_pos) -class SyncRepresentationDetailWidget(QtWidgets.QWidget): - """ - Widget to display list of synchronizable files for single repre. - - Args: - _id (str): representation _id - project (str): name of project with repre - parent (QDialog): SyncServerDetailWindow - """ - active_changed = QtCore.Signal() # active index changed +class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): default_widths = ( - ("file", 290), - ("local_site", 185), - ("remote_site", 185), - ("size", 60), - ("priority", 25), - ("state", 110) + ("asset", 190), + ("subset", 170), + ("version", 60), + ("representation", 145), + ("local_site", 160), + ("remote_site", 160), + ("files_count", 50), + ("files_size", 60), + ("priority", 70), + ("status", 110) ) - def __init__(self, sync_server, _id=None, project=None, parent=None): - super(SyncRepresentationDetailWidget, self).__init__(parent) - - log.debug("Representation_id:{}".format(_id)) - self.representation_id = _id - self.item = None # set to item that mouse was clicked over - self.project = project + def __init__(self, sync_server, project=None, parent=None): + super(SyncRepresentationSummaryWidget, self).__init__(parent) self.sync_server = sync_server - self._selected_id = None + self._selected_ids = set() # keep last selected _id - self.filter = QtWidgets.QLineEdit() - self.filter.setPlaceholderText("Filter representation..") + txt_filter = QtWidgets.QLineEdit() + txt_filter.setPlaceholderText("Quick filter representations..") + txt_filter.setClearButtonEnabled(True) + txt_filter.addAction( + qtawesome.icon("fa.filter", color="gray"), + QtWidgets.QLineEdit.LeadingPosition) + self.txt_filter = txt_filter self._scrollbar_pos = None top_bar_layout = QtWidgets.QHBoxLayout() - top_bar_layout.addWidget(self.filter) + top_bar_layout.addWidget(self.txt_filter) - self.table_view = QtWidgets.QTableView() + table_view = QtWidgets.QTableView() headers = [item[0] for item in self.default_widths] - model = SyncRepresentationDetailModel(sync_server, headers, _id, - project) - self.table_view.setModel(model) - self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.table_view.setSelectionMode( - QtWidgets.QAbstractItemView.SingleSelection) - self.table_view.setSelectionBehavior( - QtWidgets.QTableView.SelectRows) - self.table_view.horizontalHeader().setSortIndicator(-1, - Qt.AscendingOrder) - self.table_view.setSortingEnabled(True) - self.table_view.horizontalHeader().setSortIndicatorShown(True) - self.table_view.setAlternatingRowColors(True) - self.table_view.verticalHeader().hide() + model = SyncRepresentationSummaryModel(sync_server, headers, project, + parent=self) + table_view.setModel(model) + table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + table_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + table_view.setSelectionBehavior( + QtWidgets.QAbstractItemView.SelectRows) + table_view.horizontalHeader().setSortIndicator( + -1, Qt.AscendingOrder) + table_view.setAlternatingRowColors(True) + table_view.verticalHeader().hide() + table_view.viewport().setAttribute(QtCore.Qt.WA_Hover, True) - column = self.table_view.model().get_header_index("local_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) + column = table_view.model().get_header_index("local_site") + delegate = delegates.ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) - column = self.table_view.model().get_header_index("remote_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) + column = table_view.model().get_header_index("remote_site") + delegate = delegates.ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) - for column_name, width in self.default_widths: - idx = model.get_header_index(column_name) - self.table_view.setColumnWidth(idx, width) + column = table_view.model().get_header_index("priority") + priority_delegate = delegates.PriorityDelegate(self) + table_view.setItemDelegateForColumn(column, priority_delegate) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) - layout.addWidget(self.table_view) + layout.addWidget(table_view) - self.filter.textChanged.connect(lambda: model.set_filter( - self.filter.text())) - self.table_view.customContextMenuRequested.connect( - self._on_context_menu) + self.table_view = table_view + self.model = model + + horizontal_header = HorizontalHeader(self) + + table_view.setHorizontalHeader(horizontal_header) + table_view.setSortingEnabled(True) + + for column_name, width in self.default_widths: + idx = model.get_header_index(column_name) + table_view.setColumnWidth(idx, width) + + table_view.doubleClicked.connect(self._double_clicked) + self.txt_filter.textChanged.connect(lambda: model.set_word_filter( + self.txt_filter.text())) + table_view.customContextMenuRequested.connect(self._on_context_menu) model.refresh_started.connect(self._save_scrollbar) model.refresh_finished.connect(self._set_scrollbar) - self.table_view.model().modelReset.connect(self._set_selection) + model.modelReset.connect(self._set_selection) self.selection_model = self.table_view.selectionModel() self.selection_model.selectionChanged.connect(self._selection_changed) - def _selection_changed(self): - index = self.selection_model.currentIndex() - self._selected_id = self.table_view.model().data(index, Qt.UserRole) + def _prepare_menu(self, item, is_multi, can_edit): + action_kwarg_map, actions_mapping, menu = \ + super()._prepare_menu(item, is_multi, can_edit) - def _set_selection(self): - """ - Sets selection to 'self._selected_id' if exists. - - Keep selection during model refresh. - """ - if self._selected_id: - index = self.table_view.model().get_index(self._selected_id) - if index and index.isValid(): - mode = QtCore.QItemSelectionModel.Select | \ - QtCore.QItemSelectionModel.Rows - self.selection_model.setCurrentIndex(index, mode) - else: - self._selected_id = None - - def _show_detail(self): - """ - Shows windows with error message for failed sync of a file. - """ - dt = max(self.item.created_dt, self.item.sync_dt) - detail_window = SyncRepresentationErrorWindow(self.item._id, - self.project, - dt, - self.item.tries, - self.item.error) - detail_window.exec() - - def _on_context_menu(self, point): - """ - Shows menu with loader actions on Right-click. - """ - point_index = self.table_view.indexAt(point) - if not point_index.isValid(): - return - - self.item = self.table_view.model()._data[point_index.row()] - - menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) - actions_mapping = {} - actions_kwargs_mapping = {} - - local_site = self.item.local_site - local_progress = self.item.local_progress - remote_site = self.item.remote_site - remote_progress = self.item.remote_progress - - for site, progress in {local_site: local_progress, - remote_site: remote_progress}.items(): - project = self.table_view.model().project - provider = self.sync_server.get_provider_for_site(project, - site) - if provider == 'local_drive': - if 'studio' in site: - txt = " studio version" - else: - txt = " local version" - action = QtWidgets.QAction("Open in explorer" + txt) - if progress == 1: - actions_mapping[action] = self._open_in_explorer - actions_kwargs_mapping[action] = {'site': site} - menu.addAction(action) - - if self.item.state == lib.STATUS[2]: - action = QtWidgets.QAction("Open error detail") - actions_mapping[action] = self._show_detail + if can_edit and ( + item.status in [lib.STATUS[0], lib.STATUS[1]] or is_multi): + action = QtWidgets.QAction("Pause in queue") + actions_mapping[action] = self._pause + # pause handles which site_name it will pause itself + action_kwarg_map[action] = {"selected_ids": self._selected_ids} menu.addAction(action) - if float(remote_progress) == 1.0: - action = QtWidgets.QAction("Re-sync active site") - actions_mapping[action] = self._reset_local_site + if can_edit and (item.status == lib.STATUS[3] or is_multi): + action = QtWidgets.QAction("Unpause in queue") + actions_mapping[action] = self._unpause + action_kwarg_map[action] = {"selected_ids": self._selected_ids} menu.addAction(action) - if float(local_progress) == 1.0: - action = QtWidgets.QAction("Re-sync remote site") - actions_mapping[action] = self._reset_remote_site - menu.addAction(action) - - if not actions_mapping: - action = QtWidgets.QAction("< No action >") - actions_mapping[action] = None - menu.addAction(action) - - result = menu.exec_(QtGui.QCursor.pos()) - if result: - to_run = actions_mapping[result] - to_run_kwargs = actions_kwargs_mapping.get(result, {}) - if to_run: - to_run(**to_run_kwargs) - - def _reset_local_site(self): - """ - Removes errors or success metadata for particular file >> forces - redo of upload/download - """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'local', - self.item._id) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) - - def _reset_remote_site(self): - """ - Removes errors or success metadata for particular file >> forces - redo of upload/download - """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'remote', - self.item._id) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) - - def _open_in_explorer(self, site): - if not self.item: - return - - fpath = self.item.path - project = self.project - fpath = self.sync_server.get_local_file_path(project, site, fpath) - - fpath = os.path.normpath(os.path.dirname(fpath)) - if os.path.isdir(fpath): - if 'win' in sys.platform: # windows - subprocess.Popen('explorer "%s"' % fpath) - elif sys.platform == 'darwin': # macOS - subprocess.Popen(['open', fpath]) - else: # linux - try: - subprocess.Popen(['xdg-open', fpath]) - except OSError: - raise OSError('unsupported xdg-open call??') - - def _save_scrollbar(self): - self._scrollbar_pos = self.table_view.verticalScrollBar().value() - - def _set_scrollbar(self): - if self._scrollbar_pos: - self.table_view.verticalScrollBar().setValue(self._scrollbar_pos) - - -class SyncRepresentationErrorWidget(QtWidgets.QWidget): - """ - Dialog to show when sync error happened, prints error message - """ - - def __init__(self, _id, dt, tries, msg, parent=None): - super(SyncRepresentationErrorWidget, self).__init__(parent) - - layout = QtWidgets.QHBoxLayout(self) - - txts = [] - txts.append("{}: {}".format("Last update date", pretty_timestamp(dt))) - txts.append("{}: {}".format("Retries", str(tries))) - txts.append("{}: {}".format("Error message", msg)) - - text_area = QtWidgets.QPlainTextEdit("\n\n".join(txts)) - text_area.setReadOnly(True) - layout.addWidget(text_area) - - -class ImageDelegate(QtWidgets.QStyledItemDelegate): - """ - Prints icon of site and progress of synchronization - """ - - def __init__(self, parent=None): - super(ImageDelegate, self).__init__(parent) - self.icons = {} - - def paint(self, painter, option, index): - super(ImageDelegate, self).paint(painter, option, index) - option = QtWidgets.QStyleOptionViewItem(option) - option.showDecorationSelected = True - - provider = index.data(lib.ProviderRole) - value = index.data(lib.ProgressRole) - date_value = index.data(lib.DateRole) - is_failed = index.data(lib.FailedRole) - - if not self.icons.get(provider): - resource_path = os.path.dirname(__file__) - resource_path = os.path.join(resource_path, "..", - "providers", "resources") - pix_url = "{}/{}.png".format(resource_path, provider) - pixmap = QtGui.QPixmap(pix_url) - self.icons[provider] = pixmap - else: - pixmap = self.icons[provider] - - padding = 10 - point = QtCore.QPoint(option.rect.x() + padding, - option.rect.y() + - (option.rect.height() - pixmap.height()) / 2) - painter.drawPixmap(point, pixmap) - - overlay_rect = option.rect.translated(0, 0) - overlay_rect.setHeight(overlay_rect.height() * (1.0 - float(value))) - painter.fillRect(overlay_rect, - QtGui.QBrush(QtGui.QColor(0, 0, 0, 100))) - text_rect = option.rect.translated(10, 0) - painter.drawText(text_rect, - QtCore.Qt.AlignCenter, - date_value) - - if is_failed: - overlay_rect = option.rect.translated(0, 0) - painter.fillRect(overlay_rect, - QtGui.QBrush(QtGui.QColor(255, 0, 0, 35))) + return action_kwarg_map, actions_mapping, menu class SyncServerDetailWindow(QtWidgets.QDialog): + """Wrapper window for SyncRepresentationDetailWidget + + Creates standalone window with list of files for selected repre_id. + """ def __init__(self, sync_server, _id, project, parent=None): log.debug( "!!! SyncServerDetailWindow _id:: {}".format(_id)) @@ -792,8 +606,171 @@ class SyncServerDetailWindow(QtWidgets.QDialog): self.setWindowTitle("Sync Representation Detail") +class SyncRepresentationDetailWidget(_SyncRepresentationWidget): + """ + Widget to display list of synchronizable files for single repre. + + Args: + _id (str): representation _id + project (str): name of project with repre + parent (QDialog): SyncServerDetailWindow + """ + active_changed = QtCore.Signal() # active index changed + + default_widths = ( + ("file", 290), + ("local_site", 185), + ("remote_site", 185), + ("size", 60), + ("priority", 60), + ("status", 110) + ) + + def __init__(self, sync_server, _id=None, project=None, parent=None): + super(SyncRepresentationDetailWidget, self).__init__(parent) + + log.debug("Representation_id:{}".format(_id)) + self.project = project + + self.sync_server = sync_server + + self.representation_id = _id + self._selected_ids = set() + + self.txt_filter = QtWidgets.QLineEdit() + self.txt_filter.setPlaceholderText("Quick filter representation..") + self.txt_filter.setClearButtonEnabled(True) + self.txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"), + QtWidgets.QLineEdit.LeadingPosition) + + self._scrollbar_pos = None + + top_bar_layout = QtWidgets.QHBoxLayout() + top_bar_layout.addWidget(self.txt_filter) + + table_view = QtWidgets.QTableView() + headers = [item[0] for item in self.default_widths] + + model = SyncRepresentationDetailModel(sync_server, headers, _id, + project) + model.is_running = True + + table_view.setModel(model) + table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + table_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + table_view.setSelectionBehavior( + QtWidgets.QTableView.SelectRows) + table_view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder) + table_view.horizontalHeader().setSortIndicatorShown(True) + table_view.setAlternatingRowColors(True) + table_view.verticalHeader().hide() + + column = model.get_header_index("local_site") + delegate = delegates.ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) + + column = model.get_header_index("remote_site") + delegate = delegates.ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) + + if model.can_edit: + column = table_view.model().get_header_index("priority") + priority_delegate = delegates.PriorityDelegate(self) + table_view.setItemDelegateForColumn(column, priority_delegate) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addLayout(top_bar_layout) + layout.addWidget(table_view) + + self.model = model + + self.selection_model = table_view.selectionModel() + self.selection_model.selectionChanged.connect(self._selection_changed) + + horizontal_header = HorizontalHeader(self) + + table_view.setHorizontalHeader(horizontal_header) + table_view.setSortingEnabled(True) + + for column_name, width in self.default_widths: + idx = model.get_header_index(column_name) + table_view.setColumnWidth(idx, width) + + self.table_view = table_view + + self.txt_filter.textChanged.connect(lambda: model.set_word_filter( + self.txt_filter.text())) + table_view.doubleClicked.connect(self._double_clicked) + table_view.customContextMenuRequested.connect(self._on_context_menu) + + model.refresh_started.connect(self._save_scrollbar) + model.refresh_finished.connect(self._set_scrollbar) + model.modelReset.connect(self._set_selection) + + def _double_clicked(self, index): + """ + Opens representation dialog with all files after doubleclick + """ + # priority editing + if self.model.can_edit: + column_name = self.model.get_column(index.column()) + if column_name[0] in self.model.EDITABLE_COLUMNS: + self.model.is_editing = True + self.table_view.openPersistentEditor(index) + return + + def _show_detail(self, selected_ids=None): + """ + Shows windows with error message for failed sync of a file. + """ + detail_window = SyncRepresentationErrorWindow(self.model, selected_ids) + + detail_window.exec() + + def _prepare_menu(self, item, is_multi, can_edit): + """Adds view (and model) dependent actions to default ones""" + action_kwarg_map, actions_mapping, menu = \ + super()._prepare_menu(item, is_multi, can_edit) + + if item.status == lib.STATUS[2] or is_multi: + action = QtWidgets.QAction("Open error detail") + actions_mapping[action] = self._show_detail + action_kwarg_map[action] = {"selected_ids": self._selected_ids} + + menu.addAction(action) + + return action_kwarg_map, actions_mapping, menu + + def _reset_site(self, selected_ids=None, site_name=None): + """ + Removes errors or success metadata for particular file >> forces + redo of upload/download + """ + for file_id in selected_ids: + item = lib.get_item_by_id(self.model, file_id) + check_progress = self._get_progress(item, site_name, True) + + # do not reset if opposite side is not fully there + if check_progress != 1: + log.debug("Not fully available {} on other side, skipping". + format(check_progress)) + continue + + self.sync_server.reset_provider_for_file( + self.model.project, + self.representation_id, + site_name=site_name, + file_id=file_id, + force=True) + self.model.refresh( + load_records=self.model._rec_loaded) + + class SyncRepresentationErrorWindow(QtWidgets.QDialog): - def __init__(self, _id, project, dt, tries, msg, parent=None): + """Wrapper window to show errors during sync on file(s)""" + def __init__(self, model, selected_ids, parent=None): super(SyncRepresentationErrorWindow, self).__init__(parent) self.setWindowFlags(QtCore.Qt.Window) self.setFocusPolicy(QtCore.Qt.StrongFocus) @@ -804,7 +781,8 @@ class SyncRepresentationErrorWindow(QtWidgets.QDialog): body = QtWidgets.QWidget() - container = SyncRepresentationErrorWidget(_id, dt, tries, msg, + container = SyncRepresentationErrorWidget(model, + selected_ids, parent=self) body_layout = QtWidgets.QHBoxLayout(body) body_layout.addWidget(container) @@ -818,3 +796,299 @@ class SyncRepresentationErrorWindow(QtWidgets.QDialog): self.setLayout(body_layout) self.setWindowTitle("Sync Representation Error Detail") + + +class SyncRepresentationErrorWidget(QtWidgets.QWidget): + """ + Dialog to show when sync error happened, prints formatted error message + """ + def __init__(self, model, selected_ids, parent=None): + super(SyncRepresentationErrorWidget, self).__init__(parent) + + layout = QtWidgets.QVBoxLayout(self) + + no_errors = True + for file_id in selected_ids: + item = lib.get_item_by_id(model, file_id) + if not item.created_dt or not item.sync_dt or not item.error: + continue + + no_errors = False + dt = max(item.created_dt, item.sync_dt) + + txts = [] + txts.append("{}: {}
".format("Last update date", + pretty_timestamp(dt))) + txts.append("{}: {}
".format("Retries", + str(item.tries))) + txts.append("{}: {}
".format("Error message", + item.error)) + + text_area = QtWidgets.QTextEdit("\n\n".join(txts)) + text_area.setReadOnly(True) + layout.addWidget(text_area) + + if no_errors: + text_area = QtWidgets.QTextEdit() + text_area.setText("

No errors located

") + text_area.setReadOnly(True) + layout.addWidget(text_area) + + +class HorizontalHeader(QtWidgets.QHeaderView): + """Reiplemented QHeaderView to contain clickable changeable button""" + def __init__(self, parent=None): + super(HorizontalHeader, self).__init__(QtCore.Qt.Horizontal, parent) + self._parent = parent + self.checked_values = {} + + self.setModel(self._parent.model) + + self.setSectionsClickable(True) + + self.menu_items_dict = {} + self.menu = None + self.header_cells = [] + self.filter_buttons = {} + + self.filter_icon = qtawesome.icon("fa.filter", color="gray") + self.filter_set_icon = qtawesome.icon("fa.filter", color="white") + + self.init_layout() + + self._resetting = False + + @property + def model(self): + """Keep model synchronized with parent widget""" + return self._parent.model + + def init_layout(self): + """Initial preparation of header's content""" + for column_idx in range(self.model.columnCount()): + column_name, column_label = self.model.get_column(column_idx) + filter_rec = self.model.get_filters().get(column_name) + if not filter_rec: + continue + + icon = self.filter_icon + button = QtWidgets.QPushButton(icon, "", self) + + button.setFixedSize(24, 24) + button.setStyleSheet( + "QPushButton::menu-indicator{width:0px;}" + "QPushButton{border: none;background: transparent;}") + button.clicked.connect(partial(self._get_menu, + column_name, column_idx)) + button.setFlat(True) + self.filter_buttons[column_name] = button + + def showEvent(self, event): + """Paint header""" + super(HorizontalHeader, self).showEvent(event) + + for i in range(len(self.header_cells)): + cell_content = self.header_cells[i] + cell_content.setGeometry(self.sectionViewportPosition(i), 0, + self.sectionSize(i) - 1, self.height()) + + cell_content.show() + + def _set_filter_icon(self, column_name): + """Set different states of button depending on its engagement""" + button = self.filter_buttons.get(column_name) + if button: + if self.checked_values.get(column_name): + button.setIcon(self.filter_set_icon) + else: + button.setIcon(self.filter_icon) + + def _reset_filter(self, column_name): + """ + Remove whole column from filter >> not in $match at all (faster) + """ + self._resetting = True # mark changes to consume them + if self.checked_values.get(column_name) is not None: + self.checked_values.pop(column_name) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, True) + self._resetting = False + + def _apply_filter(self, column_name, values, state): + """ + Sets 'values' to specific 'state' (checked/unchecked), + sends to model. + """ + if self._resetting: # event triggered by _resetting, skip it + return + + self._update_checked_values(column_name, values, state) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, False) + + def _apply_text_filter(self, column_name, items, line_edit): + """ + Resets all checkboxes, prefers inserted text. + """ + le_text = line_edit.text() + self._update_checked_values(column_name, items, 0) # reset other + if self.checked_values.get(column_name) is not None or \ + le_text == '': + self.checked_values.pop(column_name) # reset during typing + + if le_text: + self._update_checked_values(column_name, {le_text: le_text}, 2) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, True) + + def _filter_and_refresh_model_and_menu(self, column_name, + model=True, menu=True): + """ + Refresh model and its content and possibly menu for big changes. + """ + if model: + self.model.set_column_filtering(self.checked_values) + self.model.refresh() + if menu: + self._menu_refresh(column_name) + + def _get_menu(self, column_name, index): + """Prepares content of menu for 'column_name'""" + menu = QtWidgets.QMenu(self) + filter_rec = self.model.get_filters()[column_name] + self.menu_items_dict[column_name] = filter_rec.values() + + # text filtering only if labels same as values, not if codes are used + if 'text' in filter_rec.search_variants(): + line_edit = QtWidgets.QLineEdit(menu) + line_edit.setClearButtonEnabled(True) + line_edit.addAction(self.filter_icon, + QtWidgets.QLineEdit.LeadingPosition) + + line_edit.setFixedHeight(line_edit.height()) + txt = "" + if self.checked_values.get(column_name): + txt = list(self.checked_values.get(column_name).keys())[0] + line_edit.setText(txt) + + action_le = QtWidgets.QWidgetAction(menu) + action_le.setDefaultWidget(line_edit) + line_edit.textChanged.connect( + partial(self._apply_text_filter, column_name, + filter_rec.values(), line_edit)) + menu.addAction(action_le) + menu.addSeparator() + + if 'checkbox' in filter_rec.search_variants(): + action_all = QtWidgets.QAction("All", self) + action_all.triggered.connect(partial(self._reset_filter, + column_name)) + menu.addAction(action_all) + + action_none = QtWidgets.QAction("Unselect all", self) + state_unchecked = 0 + action_none.triggered.connect(partial(self._apply_filter, + column_name, + filter_rec.values(), + state_unchecked)) + menu.addAction(action_none) + menu.addSeparator() + + # nothing explicitly >> ALL implicitly >> first time + if self.checked_values.get(column_name) is None: + checked_keys = self.menu_items_dict[column_name].keys() + else: + checked_keys = self.checked_values[column_name] + + for value, label in self.menu_items_dict[column_name].items(): + checkbox = QtWidgets.QCheckBox(str(label), menu) + + # temp + checkbox.setStyleSheet("QCheckBox{spacing: 5px;" + "padding:5px 5px 5px 5px;}") + if value in checked_keys: + checkbox.setChecked(True) + + action = QtWidgets.QWidgetAction(menu) + action.setDefaultWidget(checkbox) + + checkbox.stateChanged.connect(partial(self._apply_filter, + column_name, {value: label})) + menu.addAction(action) + + self.menu = menu + + self._show_menu(index, menu) + + def _show_menu(self, index, menu): + """Shows 'menu' under header column of 'index'""" + global_pos_point = self.mapToGlobal( + QtCore.QPoint(self.sectionViewportPosition(index), 0)) + menu.setMinimumWidth(self.sectionSize(index)) + menu.setMinimumHeight(self.height()) + menu.exec_(QtCore.QPoint(global_pos_point.x(), + global_pos_point.y() + self.height())) + + def _menu_refresh(self, column_name): + """ + Reset boxes after big change - word filtering or reset + """ + for action in self.menu.actions(): + if not isinstance(action, QtWidgets.QWidgetAction): + continue + + widget = action.defaultWidget() + if not isinstance(widget, QtWidgets.QCheckBox): + continue + + if not self.checked_values.get(column_name) or \ + widget.text() in self.checked_values[column_name].values(): + widget.setChecked(True) + else: + widget.setChecked(False) + + def _update_checked_values(self, column_name, values, state): + """ + Modify dictionary of set values in columns for filtering. + + Modifies 'self.checked_values' + """ + copy_menu_items = dict(self.menu_items_dict[column_name]) + checked = self.checked_values.get(column_name, copy_menu_items) + set_items = dict(values.items()) # prevent dict change during loop + for value, label in set_items.items(): + if state == 2 and label: # checked + checked[value] = label + elif state == 0 and checked.get(value): + checked.pop(value) + + self.checked_values[column_name] = checked + + def paintEvent(self, event): + self._fix_size() + super(HorizontalHeader, self).paintEvent(event) + + def _fix_size(self): + for column_idx in range(self.model.columnCount()): + vis_index = self.visualIndex(column_idx) + index = self.logicalIndex(vis_index) + section_width = self.sectionSize(index) + + column_name = self.model.headerData(column_idx, + QtCore.Qt.Horizontal, + lib.HeaderNameRole) + button = self.filter_buttons.get(column_name) + if not button: + continue + + pos_x = self.sectionViewportPosition( + index) + section_width - self.height() + + pos_y = 0 + if button.height() < self.height(): + pos_y = int((self.height() - button.height()) / 2) + button.setGeometry( + pos_x, + pos_y, + self.height(), + self.height()) diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 36f3444399..d4fc29ff8a 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -3,6 +3,11 @@ from openpype.api import Logger log = Logger().get_logger("SyncServer") +class ResumableError(Exception): + """Error which could be temporary, skip current loop, try next time""" + pass + + class SyncStatus: DO_NOTHING = 0 DO_UPLOAD = 1 @@ -28,3 +33,9 @@ def time_function(method): return result return timed + + +class EditableScopes: + SYSTEM = 0 + PROJECT = 1 + LOCAL = 2 diff --git a/openpype/modules/user/__init__.py b/openpype/modules/user/__init__.py deleted file mode 100644 index a97ac0eef6..0000000000 --- a/openpype/modules/user/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .user_module import ( - UserModule, - IUserModule -) - - -__all__ = ( - "UserModule", - "IUserModule" -) diff --git a/openpype/modules/user/rest_api.py b/openpype/modules/user/rest_api.py deleted file mode 100644 index 566425a19b..0000000000 --- a/openpype/modules/user/rest_api.py +++ /dev/null @@ -1,35 +0,0 @@ -import json -from aiohttp.web_response import Response - - -class UserModuleRestApi: - def __init__(self, user_module, server_manager): - self.module = user_module - self.server_manager = server_manager - - self.prefix = "/user" - - self.register() - - def register(self): - self.server_manager.add_route( - "GET", - self.prefix + "/username", - self.get_username - ) - self.server_manager.add_route( - "GET", - self.prefix + "/show_widget", - self.show_user_widget - ) - - async def get_username(self, request): - return Response( - status=200, - body=json.dumps(self.module.cred, indent=4), - content_type="application/json" - ) - - async def show_user_widget(self, request): - self.module.action_show_widget.trigger() - return Response(status=200) diff --git a/openpype/modules/user/user_module.py b/openpype/modules/user/user_module.py deleted file mode 100644 index 7d257f1781..0000000000 --- a/openpype/modules/user/user_module.py +++ /dev/null @@ -1,169 +0,0 @@ -import os -import json -import getpass - -from abc import ABCMeta, abstractmethod - -import six -import appdirs - -from .. import ( - PypeModule, - ITrayModule, - IWebServerRoutes -) - - -@six.add_metaclass(ABCMeta) -class IUserModule: - """Interface for other modules to use user change callbacks.""" - - @abstractmethod - def on_pype_user_change(self, username): - """What should happen on Pype user change.""" - pass - - -class UserModule(PypeModule, ITrayModule, IWebServerRoutes): - cred_folder_path = os.path.normpath( - appdirs.user_data_dir('pype-app', 'pype') - ) - cred_filename = 'user_info.json' - env_name = "OPENPYPE_USERNAME" - - name = "user" - - def initialize(self, modules_settings): - user_settings = modules_settings[self.name] - self.enabled = user_settings["enabled"] - - self.callbacks_on_user_change = [] - self.cred = {} - self.cred_path = os.path.normpath(os.path.join( - self.cred_folder_path, self.cred_filename - )) - - # Tray attributes - self.widget_login = None - self.action_show_widget = None - - self.rest_api_obj = None - - def tray_init(self): - from .widget_user import UserWidget - self.widget_login = UserWidget(self) - - self.load_credentials() - - def register_callback_on_user_change(self, callback): - self.callbacks_on_user_change.append(callback) - - def tray_start(self): - """Store credentials to env and preset them to widget""" - username = "" - if self.cred: - username = self.cred.get("username") or "" - - os.environ[self.env_name] = username - self.widget_login.set_user(username) - - def tray_exit(self): - """Nothing special for User.""" - return - - def get_user(self): - return self.cred.get("username") or getpass.getuser() - - def webserver_initialization(self, server_manager): - """Implementation of IWebServerRoutes interface.""" - from .rest_api import UserModuleRestApi - - self.rest_api_obj = UserModuleRestApi(self, server_manager) - - def connect_with_modules(self, enabled_modules): - for module in enabled_modules: - if isinstance(module, IUserModule): - self.callbacks_on_user_change.append( - module.on_pype_user_change - ) - - # Definition of Tray menu - def tray_menu(self, parent_menu): - from Qt import QtWidgets - """Add menu or action to Tray(or parent)'s menu""" - action = QtWidgets.QAction("Username", parent_menu) - action.triggered.connect(self.show_widget) - parent_menu.addAction(action) - parent_menu.addSeparator() - - self.action_show_widget = action - - def load_credentials(self): - """Get credentials from JSON file """ - credentials = {} - try: - file = open(self.cred_path, "r") - credentials = json.load(file) - file.close() - - self.cred = credentials - username = credentials.get("username") - if username: - self.log.debug("Loaded Username \"{}\"".format(username)) - else: - self.log.debug("Pype Username is not set") - - return credentials - - except FileNotFoundError: - return self.save_credentials(getpass.getuser()) - - except json.decoder.JSONDecodeError: - self.log.warning(( - "File where users credentials should be stored" - " has invalid json format. Loading system username." - )) - return self.save_credentials(getpass.getuser()) - - def change_credentials(self, username): - self.save_credentials(username) - for callback in self.callbacks_on_user_change: - try: - callback(username) - except Exception: - self.log.warning( - "Failed to execute callback \"{}\".".format( - str(callback) - ), - exc_info=True - ) - - def save_credentials(self, username): - """Save credentials to JSON file, env and widget""" - if username is None: - username = "" - - username = str(username).strip() - - self.cred = {"username": username} - os.environ[self.env_name] = username - if self.widget_login: - self.widget_login.set_user(username) - try: - file = open(self.cred_path, "w") - file.write(json.dumps(self.cred)) - file.close() - self.log.debug("Username \"{}\" stored".format(username)) - except Exception: - self.log.error( - "Could not store username to file \"{}\"".format( - self.cred_path - ), - exc_info=True - ) - - return self.cred - - def show_widget(self): - """Show dialog to enter credentials""" - self.widget_login.show() diff --git a/openpype/modules/user/widget_user.py b/openpype/modules/user/widget_user.py deleted file mode 100644 index f8ecadf56b..0000000000 --- a/openpype/modules/user/widget_user.py +++ /dev/null @@ -1,88 +0,0 @@ -from Qt import QtCore, QtGui, QtWidgets -from avalon import style -from openpype import resources - - -class UserWidget(QtWidgets.QWidget): - - MIN_WIDTH = 300 - - def __init__(self, module): - - super(UserWidget, self).__init__() - - self.module = module - - # Style - icon = QtGui.QIcon(resources.pype_icon_filepath()) - self.setWindowIcon(icon) - self.setWindowTitle("Username Settings") - self.setMinimumWidth(self.MIN_WIDTH) - self.setStyleSheet(style.load_stylesheet()) - - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - self.setLayout(self._main()) - - def show(self, *args, **kwargs): - super().show(*args, **kwargs) - # Move widget to center of active screen on show - screen = QtWidgets.QApplication.desktop().screen() - screen_center = lambda self: ( - screen.rect().center() - self.rect().center() - ) - self.move(screen_center(self)) - - def _main(self): - main_layout = QtWidgets.QVBoxLayout() - - form_layout = QtWidgets.QFormLayout() - form_layout.setContentsMargins(10, 15, 10, 5) - - label_username = QtWidgets.QLabel("Username:") - label_username.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - label_username.setTextFormat(QtCore.Qt.RichText) - - input_username = QtWidgets.QLineEdit() - input_username.setPlaceholderText( - QtCore.QCoreApplication.translate("main", "e.g. John Smith") - ) - - form_layout.addRow(label_username, input_username) - - btn_save = QtWidgets.QPushButton("Save") - btn_save.clicked.connect(self.click_save) - - btn_cancel = QtWidgets.QPushButton("Cancel") - btn_cancel.clicked.connect(self.close) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_save) - btn_group.addWidget(btn_cancel) - - main_layout.addLayout(form_layout) - main_layout.addLayout(btn_group) - - self.input_username = input_username - - return main_layout - - def set_user(self, username): - self.input_username.setText(username) - - def click_save(self): - # all what should happen - validations and saving into appsdir - username = self.input_username.text() - self.module.change_credentials(username) - self._close_widget() - - def closeEvent(self, event): - event.ignore() - self._close_widget() - - def _close_widget(self): - self.hide() diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 8e3999e9c4..263c534b64 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -14,10 +14,15 @@ import avalon.pipeline from openpype.api import Anatomy -class DeleteOldVersions(api.Loader): +class DeleteOldVersions(api.SubsetLoader): """Deletes specific number of old version""" + + is_multiple_contexts_compatible = True + sequence_splitter = "__sequence_splitter__" + representations = ["*"] families = ["*"] + tool_names = ["library_loader"] label = "Delete Old Versions" order = 35 @@ -259,9 +264,11 @@ class DeleteOldVersions(api.Loader): ) if not version_ids: - msg = "Skipping processing. Nothing to delete." + msg = "Skipping processing. Nothing to delete on {}/{}".format( + asset["name"], subset["name"] + ) self.log.info(msg) - self.message(msg) + print(msg) return repres = list(self.dbcon.find({ @@ -397,25 +404,30 @@ class DeleteOldVersions(api.Loader): self.log.error(msg) self.message(msg) - msg = "Total size of files: " + self.sizeof_fmt(size) - self.log.info(msg) - self.message(msg) + return size - def load(self, context, name=None, namespace=None, options=None): + def load(self, contexts, name=None, namespace=None, options=None): try: - versions_to_keep = 2 - remove_publish_folder = False - if options: - versions_to_keep = options.get( - "versions_to_keep", versions_to_keep - ) - remove_publish_folder = options.get( - "remove_publish_folder", remove_publish_folder - ) + size = 0 + for count, context in enumerate(contexts): + versions_to_keep = 2 + remove_publish_folder = False + if options: + versions_to_keep = options.get( + "versions_to_keep", versions_to_keep + ) + remove_publish_folder = options.get( + "remove_publish_folder", remove_publish_folder + ) - data = self.get_data(context, versions_to_keep) + data = self.get_data(context, versions_to_keep) - self.main(data, remove_publish_folder) + size += self.main(data, remove_publish_folder) + print("Progressing {}/{}".format(count + 1, len(contexts))) + + msg = "Total size of files: " + self.sizeof_fmt(size) + self.log.info(msg) + self.message(msg) except Exception: self.log.error("Failed to delete versions.", exc_info=True) @@ -425,6 +437,7 @@ class CalculateOldVersions(DeleteOldVersions): """Calculate file size of old versions""" label = "Calculate Old Versions" order = 30 + tool_names = ["library_loader"] options = [ qargparse.Integer( @@ -438,6 +451,9 @@ class CalculateOldVersions(DeleteOldVersions): def main(self, data, remove_publish_folder): size = 0 + if not data: + return size + if remove_publish_folder: size = self.delete_whole_dir_paths( data["dir_paths"].values(), delete=False @@ -447,6 +463,4 @@ class CalculateOldVersions(DeleteOldVersions): data["dir_paths"], data["file_paths_by_dir"], delete=False ) - msg = "Total size of files: " + self.sizeof_fmt(size) - self.log.info(msg) - self.message(msg) + return size diff --git a/openpype/plugins/publish/collect_current_pype_user.py b/openpype/plugins/publish/collect_current_pype_user.py index de4e950d56..1a52a59012 100644 --- a/openpype/plugins/publish/collect_current_pype_user.py +++ b/openpype/plugins/publish/collect_current_pype_user.py @@ -1,6 +1,7 @@ import os import getpass import pyblish.api +from openpype.lib import get_openpype_username class CollectCurrentUserPype(pyblish.api.ContextPlugin): @@ -11,9 +12,6 @@ class CollectCurrentUserPype(pyblish.api.ContextPlugin): label = "Collect Pype User" def process(self, context): - user = os.getenv("OPENPYPE_USERNAME", "").strip() - if not user: - user = context.data.get("user", getpass.getuser()) - + user = get_openpype_username() context.data["user"] = user - self.log.debug("Colected user \"{}\"".format(user)) + self.log.debug("Collected user \"{}\"".format(user)) diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index 390ce443b6..1aa10fcb9b 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -15,7 +15,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): label = "Collect Hierarchy" order = pyblish.api.CollectorOrder - 0.57 families = ["shot"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, context): temp_context = {} diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index 53cc249033..e1b8b95a46 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -20,7 +20,7 @@ class CollectOcioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.58 families = ["shot", "clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_review.py b/openpype/plugins/publish/collect_otio_review.py index 0c7eeaea44..4243926ba3 100644 --- a/openpype/plugins/publish/collect_otio_review.py +++ b/openpype/plugins/publish/collect_otio_review.py @@ -4,7 +4,7 @@ Requires: context -> otioTimeline Optional: - otioClip.metadata -> masterLayer + instance -> reviewTrack Provides: instance -> otioReviewClips @@ -22,16 +22,16 @@ class CollectOcioReview(pyblish.api.InstancePlugin): label = "Collect OTIO Review" order = pyblish.api.CollectorOrder - 0.57 families = ["clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # get basic variables - otio_review_clips = list() + otio_review_clips = [] otio_timeline = instance.context.data["otioTimeline"] otio_clip = instance.data["otioClip"] # optionally get `reviewTrack` - review_track_name = otio_clip.metadata.get("reviewTrack") + review_track_name = instance.data.get("reviewTrack") # generate range in parent otio_tl_range = otio_clip.range_in_parent() @@ -88,6 +88,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin): otio_review_clips.append(otio_gap) if otio_review_clips: + instance.data["label"] += " (review)" instance.data["families"] += ["review", "ftrack"] instance.data["otioReviewClips"] = otio_review_clips self.log.info( diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index a0c6b9339b..cebfc90630 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -19,9 +19,13 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): label = "Collect OTIO Subset Resources" order = pyblish.api.CollectorOrder - 0.57 families = ["clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): + + if "audio" in instance.data["family"]: + return + if not instance.data.get("representations"): instance.data["representations"] = list() version_data = dict() @@ -48,8 +52,8 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): trimmed_media_range) a_frame_start, a_frame_end = openpype.lib.otio_range_to_frame_range( otio_avalable_range) - a_frame_start_h, a_frame_end_h = openpype.lib.otio_range_to_frame_range( - trimmed_media_range_h) + a_frame_start_h, a_frame_end_h = openpype.lib.\ + otio_range_to_frame_range(trimmed_media_range_h) # fix frame_start and frame_end frame to be in range of media if a_frame_start_h < a_frame_start: @@ -80,6 +84,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): media_ref = otio_clip.media_reference metadata = media_ref.metadata + is_sequence = None # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer @@ -116,7 +121,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): # `ImageSequenceReference` path = media_ref.target_url collection_data = openpype.lib.make_sequence_collection( - path, trimmed_media_range, metadata) + path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data self.log.debug(collection) @@ -126,7 +131,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): dirname, filename = os.path.split(media_ref.target_url) self.staging_dir = dirname - self.log.debug(path) + self.log.debug(filename) repre = self._create_representation( frame_start, frame_end, file=filename) diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py index f58bd0dd9d..669e6752f3 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/openpype/plugins/publish/collect_scene_version.py @@ -19,7 +19,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if "unreal" in pyblish.api.registered_hosts(): return - assert context.data.get('currentFile'), "Cannot get curren file" + assert context.data.get('currentFile'), "Cannot get current file" filename = os.path.basename(context.data.get('currentFile')) if '' in filename: diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index e266c39714..76f6ffc608 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -1,19 +1,25 @@ import os import re +import subprocess import json import copy import tempfile +import platform +import shutil + +import clique +import six +import pyblish import openpype import openpype.api -import pyblish from openpype.lib import ( get_pype_execute_args, should_decompress, get_decompress_dir, - decompress + decompress, + CREATE_NO_WINDOW ) -import shutil class ExtractBurnin(openpype.api.Extractor): @@ -48,18 +54,17 @@ class ExtractBurnin(openpype.api.Extractor): ] # Default options for burnins for cases that are not set in presets. default_options = { - "opacity": 1, - "x_offset": 5, - "y_offset": 5, + "font_size": 42, + "font_color": [255, 255, 255, 255], + "bg_color": [0, 0, 0, 127], "bg_padding": 5, - "bg_opacity": 0.5, - "font_size": 42 + "x_offset": 5, + "y_offset": 5 } # Preset attributes profiles = None options = None - fields = None def process(self, instance): # ffmpeg doesn't support multipart exrs @@ -103,7 +108,7 @@ class ExtractBurnin(openpype.api.Extractor): return # Pre-filter burnin definitions by instance families - burnin_defs = self.filter_burnins_by_families(profile, instance) + burnin_defs = self.filter_burnins_defs(profile, instance) if not burnin_defs: self.log.info(( "Skipped instance. Burnin definitions are not set for profile" @@ -111,19 +116,7 @@ class ExtractBurnin(openpype.api.Extractor): ).format(host_name, family, task_name, profile)) return - # Prepare burnin options - profile_options = copy.deepcopy(self.default_options) - for key, value in (self.options or {}).items(): - if value is not None: - profile_options[key] = value - - # Prepare global burnin values from presets - profile_burnins = {} - for key, value in (self.fields or {}).items(): - key_low = key.lower() - if key_low in self.positions: - if value is not None: - profile_burnins[key_low] = value + burnin_options = self._get_burnin_options() # Prepare basic data for processing _burnin_data, _temp_data = self.prepare_basic_data(instance) @@ -134,11 +127,6 @@ class ExtractBurnin(openpype.api.Extractor): # [pype executable, *pype script, "run"] executable_args = get_pype_execute_args("run", scriptpath) - # Environments for script process - env = os.environ.copy() - # pop PYTHONPATH - env.pop("PYTHONPATH", None) - for idx, repre in enumerate(tuple(instance.data["representations"])): self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) if not self.repres_is_valid(repre): @@ -184,26 +172,11 @@ class ExtractBurnin(openpype.api.Extractor): elif "ftrackreview" in new_repre["tags"]: new_repre["tags"].remove("ftrackreview") - burnin_options = copy.deepcopy(profile_options) - burnin_values = copy.deepcopy(profile_burnins) - - # Options overrides - for key, value in (burnin_def.get("options") or {}).items(): - # Set or override value if is valid - if value is not None: - burnin_options[key] = value - - # Burnin values overrides - for key, value in burnin_def.items(): - key_low = key.lower() - if key_low in self.positions: - if value is not None: - # Set or override value if is valid - burnin_values[key_low] = value - - elif key_low in burnin_values: - # Pop key if value is set to None (null in json) - burnin_values.pop(key_low) + burnin_values = {} + for key in self.positions: + value = burnin_def.get(key) + if value: + burnin_values[key] = value # Remove "delete" tag from new representation if "delete" in new_repre["tags"]: @@ -214,7 +187,8 @@ class ExtractBurnin(openpype.api.Extractor): # able have multiple outputs in case of more burnin presets # Join previous "outputName" with filename suffix new_name = "_".join( - [new_repre["outputName"], filename_suffix]) + [new_repre["outputName"], filename_suffix] + ) new_repre["name"] = new_name new_repre["outputName"] = new_name @@ -246,8 +220,10 @@ class ExtractBurnin(openpype.api.Extractor): "input": temp_data["full_input_path"], "output": temp_data["full_output_path"], "burnin_data": burnin_data, - "options": burnin_options, - "values": burnin_values + "options": copy.deepcopy(burnin_options), + "values": burnin_values, + "full_input_path": temp_data["full_input_paths"][0], + "first_frame": temp_data["first_frame"] } self.log.debug( @@ -273,10 +249,14 @@ class ExtractBurnin(openpype.api.Extractor): self.log.debug("Executing: {}".format(" ".join(args))) # Run burnin script - openpype.api.run_subprocess( - args, shell=True, logger=self.log, env=env - ) + process_kwargs = { + "logger": self.log, + "env": {} + } + if platform.system().lower() == "windows": + process_kwargs["creationflags"] = CREATE_NO_WINDOW + openpype.api.run_subprocess(args, **process_kwargs) # Remove the temporary json os.remove(temporary_json_filepath) @@ -301,6 +281,57 @@ class ExtractBurnin(openpype.api.Extractor): if do_decompress and os.path.exists(decompressed_dir): shutil.rmtree(decompressed_dir) + def _get_burnin_options(self): + # Prepare burnin options + burnin_options = copy.deepcopy(self.default_options) + if self.options: + for key, value in self.options.items(): + if value is not None: + burnin_options[key] = copy.deepcopy(value) + + # Convert colors defined as list of numbers RGBA (0-255) + # BG Color + bg_color = burnin_options.get("bg_color") + if bg_color and isinstance(bg_color, list): + bg_red, bg_green, bg_blue, bg_alpha = bg_color + bg_color_hex = "#{0:0>2X}{1:0>2X}{2:0>2X}".format( + bg_red, bg_green, bg_blue + ) + bg_color_alpha = float(bg_alpha) / 255 + burnin_options["bg_opacity"] = bg_color_alpha + burnin_options["bg_color"] = bg_color_hex + + # FG Color + font_color = burnin_options.get("font_color") + if font_color and isinstance(font_color, list): + fg_red, fg_green, fg_blue, fg_alpha = font_color + fg_color_hex = "#{0:0>2X}{1:0>2X}{2:0>2X}".format( + fg_red, fg_green, fg_blue + ) + fg_color_alpha = float(fg_alpha) / 255 + burnin_options["opacity"] = fg_color_alpha + burnin_options["font_color"] = fg_color_hex + + # Define font filepath + # - font filepath may be defined in settings + font_filepath = burnin_options.get("font_filepath") + if font_filepath and isinstance(font_filepath, dict): + sys_name = platform.system().lower() + font_filepath = font_filepath.get(sys_name) + + if font_filepath and isinstance(font_filepath, six.string_types): + font_filepath = font_filepath.format(**os.environ) + if not os.path.exists(font_filepath): + font_filepath = None + + # Use OpenPype default font + if not font_filepath: + font_filepath = openpype.api.resources.get_liberation_font_path() + + burnin_options["font"] = font_filepath + + return burnin_options + def prepare_basic_data(self, instance): """Pick data from instance for processing and for burnin strings. @@ -419,23 +450,15 @@ class ExtractBurnin(openpype.api.Extractor): list: Containg all burnin definitions matching entered tags. """ filtered_burnins = {} - repre_tags_low = [tag.lower() for tag in tags] + repre_tags_low = set(tag.lower() for tag in tags) for filename_suffix, burnin_def in burnin_defs.items(): valid = True - output_filters = burnin_def.get("filter") - if output_filters: + tag_filters = burnin_def["filter"]["tags"] + if tag_filters: # Check tag filters - tag_filters = output_filters.get("tags") - if tag_filters: - tag_filters_low = [tag.lower() for tag in tag_filters] - valid = False - for tag in repre_tags_low: - if tag in tag_filters_low: - valid = True - break + tag_filters_low = set(tag.lower() for tag in tag_filters) - if not valid: - continue + valid = bool(repre_tags_low & tag_filters_low) if valid: filtered_burnins[filename_suffix] = burnin_def @@ -461,32 +484,47 @@ class ExtractBurnin(openpype.api.Extractor): None: This is processing method. """ # TODO we should find better way to know if input is sequence - is_sequence = ( - "sequence" in new_repre["tags"] - and isinstance(new_repre["files"], (tuple, list)) - ) + input_filenames = new_repre["files"] + is_sequence = False + if isinstance(input_filenames, (tuple, list)): + if len(input_filenames) > 1: + is_sequence = True + + # Sequence must have defined first frame + # - not used if input is not a sequence + first_frame = None if is_sequence: - input_filename = new_repre["sequence_file"] - else: - input_filename = new_repre["files"] + collections, _ = clique.assemble(input_filenames) + if not collections: + is_sequence = False + else: + input_filename = new_repre["sequence_file"] + collection = collections[0] + indexes = list(collection.indexes) + padding = len(str(max(indexes))) + head = collection.format("{head}") + tail = collection.format("{tail}") + output_filename = "{}%{:0>2}d{}{}".format( + head, padding, filename_suffix, tail + ) + repre_files = [] + for idx in indexes: + repre_files.append(output_filename % idx) - filepart_start, ext = os.path.splitext(input_filename) - dir_path, basename = os.path.split(filepart_start) + first_frame = min(indexes) - if is_sequence: - # NOTE modified to keep name when multiple dots are in name - basename_parts = basename.split(".") - frame_part = basename_parts.pop(-1) + if not is_sequence: + input_filename = input_filenames + if isinstance(input_filename, (tuple, list)): + input_filename = input_filename[0] - basename_start = ".".join(basename_parts) + filename_suffix - new_basename = ".".join((basename_start, frame_part)) - output_filename = new_basename + ext - - else: + filepart_start, ext = os.path.splitext(input_filename) + dir_path, basename = os.path.split(filepart_start) output_filename = basename + filename_suffix + ext + if dir_path: + output_filename = os.path.join(dir_path, output_filename) - if dir_path: - output_filename = os.path.join(dir_path, output_filename) + repre_files = output_filename stagingdir = new_repre["stagingDir"] full_input_path = os.path.join( @@ -498,6 +536,9 @@ class ExtractBurnin(openpype.api.Extractor): temp_data["full_input_path"] = full_input_path temp_data["full_output_path"] = full_output_path + temp_data["first_frame"] = first_frame + + new_repre["files"] = repre_files self.log.debug("full_input_path: {}".format(full_input_path)) self.log.debug("full_output_path: {}".format(full_output_path)) @@ -505,17 +546,16 @@ class ExtractBurnin(openpype.api.Extractor): # Prepare full paths to input files and filenames for reprensetation full_input_paths = [] if is_sequence: - repre_files = [] - for frame_index in range(1, temp_data["duration"] + 1): - repre_files.append(output_filename % frame_index) - full_input_paths.append(full_input_path % frame_index) + for filename in input_filenames: + filepath = os.path.join( + os.path.normpath(stagingdir), filename + ).replace("\\", "/") + full_input_paths.append(filepath) else: full_input_paths.append(full_input_path) - repre_files = output_filename temp_data["full_input_paths"] = full_input_paths - new_repre["files"] = repre_files def prepare_repre_data(self, instance, repre, burnin_data, temp_data): """Prepare data for representation. @@ -694,17 +734,16 @@ class ExtractBurnin(openpype.api.Extractor): final_profile.pop("__value__") return final_profile - def filter_burnins_by_families(self, profile, instance): - """Filter outputs that are not supported for instance families. + def filter_burnins_defs(self, profile, instance): + """Filter outputs by their values from settings. - Output definitions without families filter are marked as valid. + Output definitions with at least one value are marked as valid. Args: profile (dict): Profile from presets matching current context. - families (list): All families of current instance. Returns: - list: Containg all output definitions matching entered families. + list: Containg all valid output definitions. """ filtered_burnin_defs = {} @@ -712,21 +751,52 @@ class ExtractBurnin(openpype.api.Extractor): if not burnin_defs: return filtered_burnin_defs - # Prepare families families = self.families_from_instance(instance) - families = [family.lower() for family in families] + low_families = [family.lower() for family in families] - for filename_suffix, burnin_def in burnin_defs.items(): - burnin_filter = burnin_def.get("filter") - # When filters not set then skip filtering process - if burnin_filter: - families_filters = burnin_filter.get("families") - if not self.families_filter_validation( - families, families_filters - ): - continue + for filename_suffix, orig_burnin_def in burnin_defs.items(): + burnin_def = copy.deepcopy(orig_burnin_def) + def_filter = burnin_def.get("filter", None) or {} + for key in ("families", "tags"): + if key not in def_filter: + def_filter[key] = [] + + families_filters = def_filter["families"] + if not self.families_filter_validation( + low_families, families_filters + ): + self.log.debug(( + "Skipped burnin definition \"{}\". Family" + " fiters ({}) does not match current instance families: {}" + ).format( + filename_suffix, str(families_filters), str(families) + )) + continue + + # Burnin values + burnin_values = {} + for key, value in tuple(burnin_def.items()): + key_low = key.lower() + if key_low in self.positions and value: + burnin_values[key_low] = value + + # Skip processing if burnin values are not set + if not burnin_values: + self.log.warning(( + "Burnin values for Burnin definition \"{}\"" + " are not filled. Definition will be skipped." + " Origin value: {}" + ).format(filename_suffix, str(orig_burnin_def))) + continue + + burnin_values["filter"] = def_filter + + filtered_burnin_defs[filename_suffix] = burnin_values + + self.log.debug(( + "Burnin definition \"{}\" passed first filtering." + ).format(filename_suffix)) - filtered_burnin_defs[filename_suffix] = burnin_def return filtered_burnin_defs def families_filter_validation(self, families, output_families_filter): diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index dd1f09bafa..e263edd931 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -2,7 +2,6 @@ import pyblish.api from avalon import io from copy import deepcopy - class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): """Create entities in Avalon based on collected data.""" @@ -100,13 +99,20 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} + entity_tasks = cur_entity_data["tasks"] or {} + + # create tasks as dict by default + if not entity_tasks: + cur_entity_data["tasks"] = entity_tasks + new_tasks = data.pop("tasks", {}) if "tasks" not in cur_entity_data and not new_tasks: continue for task_name in new_tasks: - if task_name in cur_entity_data["tasks"].keys(): + if task_name in entity_tasks.keys(): continue - cur_entity_data["tasks"][task_name] = new_tasks[task_name] + cur_entity_data["tasks"][task_name] = new_tasks[ + task_name] cur_entity_data.update(data) data = cur_entity_data else: diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py new file mode 100644 index 0000000000..43e40097f7 --- /dev/null +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -0,0 +1,295 @@ +import os +import pyblish +import openpype.api +from openpype.lib import ( + get_ffmpeg_tool_path +) +import tempfile +import opentimelineio as otio + + +class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): + """Extract Audio tracks from OTIO timeline. + + Process will merge all found audio tracks into one long .wav file at frist + stage. Then it will trim it into individual short audio files relative to + asset length and add it to each marked instance data representation. This + is influenced by instance data audio attribute """ + + order = pyblish.api.ExtractorOrder - 0.44 + label = "Extract OTIO Audio Tracks" + hosts = ["hiero", "resolve"] + + # FFmpeg tools paths + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + + def process(self, context): + """Convert otio audio track's content to audio representations + + Args: + context (pyblish.Context): context of publisher + """ + # split the long audio file to peces devided by isntances + audio_instances = self.get_audio_instances(context) + self.log.debug("Audio instances: {}".format(len(audio_instances))) + + if len(audio_instances) < 1: + self.log.info("No audio instances available") + return + + # get sequence + otio_timeline = context.data["otioTimeline"] + + # temp file + audio_temp_fpath = self.create_temp_file("audio") + + # get all audio inputs from otio timeline + audio_inputs = self.get_audio_track_items(otio_timeline) + + # create empty audio with longest duration + empty = self.create_empty(audio_inputs) + + # add empty to list of audio inputs + audio_inputs.insert(0, empty) + + # create cmd + cmd = self.ffmpeg_path + " " + cmd += self.create_cmd(audio_inputs) + cmd += audio_temp_fpath + + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + + # remove empty + os.remove(empty["mediaPath"]) + + # cut instance framerange and add to representations + self.add_audio_to_instances(audio_temp_fpath, audio_instances) + + # remove full mixed audio file + os.remove(audio_temp_fpath) + + def add_audio_to_instances(self, audio_file, instances): + created_files = [] + for inst in instances: + name = inst.data["asset"] + + recycling_file = [f for f in created_files if name in f] + + # frameranges + timeline_in_h = inst.data["clipInH"] + timeline_out_h = inst.data["clipOutH"] + fps = inst.data["fps"] + + # create duration + duration = (timeline_out_h - timeline_in_h) + 1 + + # ffmpeg generate new file only if doesnt exists already + if not recycling_file: + # convert to seconds + start_sec = float(timeline_in_h / fps) + duration_sec = float(duration / fps) + + # temp audio file + audio_fpath = self.create_temp_file(name) + + cmd = " ".join([ + self.ffmpeg_path, + "-ss {}".format(start_sec), + "-t {}".format(duration_sec), + "-i {}".format(audio_file), + audio_fpath + ]) + + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + else: + audio_fpath = recycling_file.pop() + + if "audio" in (inst.data["families"] + [inst.data["family"]]): + # create empty representation attr + if "representations" not in inst.data: + inst.data["representations"] = [] + # add to representations + inst.data["representations"].append({ + "files": os.path.basename(audio_fpath), + "name": "wav", + "ext": "wav", + "stagingDir": os.path.dirname(audio_fpath), + "frameStart": 0, + "frameEnd": duration + }) + + elif "reviewAudio" in inst.data.keys(): + audio_attr = inst.data.get("audio") or [] + audio_attr.append({ + "filename": audio_fpath, + "offset": 0 + }) + inst.data["audio"] = audio_attr + + # add generated audio file to created files for recycling + if audio_fpath not in created_files: + created_files.append(audio_fpath) + + def get_audio_instances(self, context): + """Return only instances which are having audio in families + + Args: + context (pyblish.context): context of publisher + + Returns: + list: list of selected instances + """ + return [ + _i for _i in context + # filter only those with audio family + # and also with reviewAudio data key + if bool("audio" in ( + _i.data.get("families", []) + [_i.data["family"]]) + ) or _i.data.get("reviewAudio") + ] + + def get_audio_track_items(self, otio_timeline): + """Get all audio clips form OTIO audio tracks + + Args: + otio_timeline (otio.schema.timeline): timeline object + + Returns: + list: list of audio clip dictionaries + """ + output = [] + # go trough all audio tracks + for otio_track in otio_timeline.tracks: + if "Audio" not in otio_track.kind: + continue + self.log.debug("_" * 50) + playhead = 0 + for otio_clip in otio_track: + self.log.debug(otio_clip) + if isinstance(otio_clip, otio.schema.Gap): + playhead += otio_clip.source_range.duration.value + elif isinstance(otio_clip, otio.schema.Clip): + start = otio_clip.source_range.start_time.value + duration = otio_clip.source_range.duration.value + fps = otio_clip.source_range.start_time.rate + media_path = otio_clip.media_reference.target_url + input = { + "mediaPath": media_path, + "delayFrame": playhead, + "startFrame": start, + "durationFrame": duration, + "delayMilSec": int(float(playhead / fps) * 1000), + "startSec": float(start / fps), + "durationSec": float(duration / fps), + "fps": fps + } + if input not in output: + output.append(input) + self.log.debug("__ input: {}".format(input)) + playhead += otio_clip.source_range.duration.value + + return output + + def create_empty(self, inputs): + """Create an empty audio file used as duration placeholder + + Args: + inputs (list): list of audio clip dictionaries + + Returns: + dict: audio clip dictionary + """ + # temp file + empty_fpath = self.create_temp_file("empty") + + # get all end frames + end_secs = [(_i["delayFrame"] + _i["durationFrame"]) / _i["fps"] + for _i in inputs] + # get the max of end frames + max_duration_sec = max(end_secs) + + # create empty cmd + cmd = " ".join([ + self.ffmpeg_path, + "-f lavfi", + "-i anullsrc=channel_layout=stereo:sample_rate=48000", + "-t {}".format(max_duration_sec), + empty_fpath + ]) + + # generate empty with ffmpeg + # run subprocess + self.log.debug("Executing: {}".format(cmd)) + + openpype.api.run_subprocess( + cmd, shell=True, logger=self.log + ) + + # return dict with output + return { + "mediaPath": empty_fpath, + "delayMilSec": 0, + "startSec": 0.00, + "durationSec": max_duration_sec + } + + def create_cmd(self, inputs): + """Creating multiple input cmd string + + Args: + inputs (list): list of input dicts. Order mater. + + Returns: + str: the command body + + """ + # create cmd segments + _inputs = "" + _filters = "-filter_complex \"" + _channels = "" + for index, input in enumerate(inputs): + input_format = input.copy() + input_format.update({"i": index}) + _inputs += ( + "-ss {startSec} " + "-t {durationSec} " + "-i \"{mediaPath}\" " + ).format(**input_format) + + _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( + **input_format) + _channels += "[r{}]".format(index) + + # merge all cmd segments together + cmd = _inputs + _filters + _channels + cmd += str( + "amix=inputs={inputs}:duration=first:" + "dropout_transition=1000,volume={inputs}[a]\" " + ).format(inputs=len(inputs)) + cmd += "-map \"[a]\" " + + return cmd + + def create_temp_file(self, name): + """Create temp wav file + + Args: + name (str): name to be used in file name + + Returns: + str: temp fpath + """ + return os.path.normpath( + tempfile.mktemp( + prefix="pyblish_tmp_{}_".format(name), + suffix=".wav" + ) + ) diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 146f3b88ec..3bd217d5d4 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # create representation data diff --git a/openpype/plugins/publish/extract_otio_review.py b/openpype/plugins/publish/extract_otio_review.py index 91a680ddb0..07fe6f2731 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/openpype/plugins/publish/extract_otio_review.py @@ -40,8 +40,8 @@ class ExtractOTIOReview(openpype.api.Extractor): order = api.ExtractorOrder - 0.45 label = "Extract OTIO review" - hosts = ["resolve"] families = ["review"] + hosts = ["resolve", "hiero"] # plugin default attributes temp_file_head = "tempFile." @@ -188,7 +188,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # creating and registering representation representation = self._create_representation(start, duration) instance.data["representations"].append(representation) - self.log.info(f"Adding representation: {representation}") + self.log.info("Adding representation: {}".format(representation)) def _create_representation(self, start, duration): """ @@ -388,7 +388,7 @@ class ExtractOTIOReview(openpype.api.Extractor): (int(end_offset + duration) + 1)): seq_number = padding.format(start_frame + index) self.log.debug( - f"index: `{index}` | seq_number: `{seq_number}`") + "index: `{}` | seq_number: `{}`".format(index, seq_number)) new_frames.append(int(seq_number)) new_frames += self.used_frames self.used_frames = new_frames diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index a71b1db66b..048d16fabb 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -55,7 +55,7 @@ class ExtractReview(pyblish.api.InstancePlugin): profiles = None def process(self, instance): - self.log.debug(instance.data["representations"]) + self.log.debug(str(instance.data["representations"])) # Skip review when requested. if not instance.data.get("review", True): return @@ -333,10 +333,24 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get FFmpeg arguments from profile presets out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} - ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] - ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] - ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] - ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] + _ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] + _ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] + _ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] + _ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] + + # Cleanup empty strings + ffmpeg_input_args = [ + value for value in _ffmpeg_input_args if value.strip() + ] + ffmpeg_output_args = [ + value for value in _ffmpeg_output_args if value.strip() + ] + ffmpeg_video_filters = [ + value for value in _ffmpeg_video_filters if value.strip() + ] + ffmpeg_audio_filters = [ + value for value in _ffmpeg_audio_filters if value.strip() + ] if isinstance(new_repre['files'], list): input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f @@ -704,6 +718,105 @@ class ExtractReview(pyblish.api.InstancePlugin): return audio_in_args, audio_filters, audio_out_args + def get_letterbox_filters( + self, + letter_box_def, + input_res_ratio, + output_res_ratio, + pixel_aspect, + scale_factor_by_width, + scale_factor_by_height + ): + output = [] + + ratio = letter_box_def["ratio"] + state = letter_box_def["state"] + fill_color = letter_box_def["fill_color"] + f_red, f_green, f_blue, f_alpha = fill_color + fill_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + f_red, f_green, f_blue + ) + fill_color_alpha = float(f_alpha) / 255 + + line_thickness = letter_box_def["line_thickness"] + line_color = letter_box_def["line_color"] + l_red, l_green, l_blue, l_alpha = line_color + line_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + l_red, l_green, l_blue + ) + line_color_alpha = float(l_alpha) / 255 + + if input_res_ratio == output_res_ratio: + ratio /= pixel_aspect + elif input_res_ratio < output_res_ratio: + ratio /= scale_factor_by_width + else: + ratio /= scale_factor_by_height + + if state == "letterbox": + if fill_color_alpha > 0: + top_box = ( + "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c={}@{}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + bottom_box = ( + "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" + ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c={1}@{2}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + output.extend([top_box, bottom_box]) + + if line_color_alpha > 0 and line_thickness > 0: + top_line = ( + "drawbox=0:round((ih-(iw*(1/{0})))/2)-{1}:iw:{1}:" + "t=fill:c={2}@{3}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + bottom_line = ( + "drawbox=0:ih-round((ih-(iw*(1/{})))/2)" + ":iw:{}:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + output.extend([top_line, bottom_line]) + + elif state == "pillar": + if fill_color_alpha > 0: + left_box = ( + "drawbox=0:0:round((iw-(ih*{}))/2):ih:t=fill:c={}@{}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + right_box = ( + "drawbox=iw-round((iw-(ih*{0}))/2))" + ":0:round((iw-(ih*{0}))/2):ih:t=fill:c={1}@{2}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + output.extend([left_box, right_box]) + + if line_color_alpha > 0 and line_thickness > 0: + left_line = ( + "drawbox=round((iw-(ih*{}))/2):0:{}:ih:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + + right_line = ( + "drawbox=iw-round((iw-(ih*{}))/2))" + ":0:{}:ih:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + + output.extend([left_line, right_line]) + + else: + raise ValueError( + "Letterbox state \"{}\" is not recognized".format(state) + ) + + return output + def rescaling_filters(self, temp_data, output_def, new_repre): """Prepare vieo filters based on tags in new representation. @@ -715,7 +828,8 @@ class ExtractReview(pyblish.api.InstancePlugin): """ filters = [] - letter_box = output_def.get("letter_box") + letter_box_def = output_def["letter_box"] + letter_box_enabled = letter_box_def["enabled"] # Get instance data pixel_aspect = temp_data["pixel_aspect"] @@ -795,7 +909,7 @@ class ExtractReview(pyblish.api.InstancePlugin): if ( output_width == input_width and output_height == input_height - and not letter_box + and not letter_box_enabled and pixel_aspect == 1 ): self.log.debug( @@ -834,30 +948,24 @@ class ExtractReview(pyblish.api.InstancePlugin): ) # letter_box - if letter_box: - if input_res_ratio == output_res_ratio: - letter_box /= pixel_aspect - elif input_res_ratio < output_res_ratio: - letter_box /= scale_factor_by_width - else: - letter_box /= scale_factor_by_height - - scale_filter = "scale={}x{}:flags=lanczos".format( - output_width, output_height + if letter_box_enabled: + filters.extend([ + "scale={}x{}:flags=lanczos".format( + output_width, output_height + ), + "setsar=1" + ]) + filters.extend( + self.get_letterbox_filters( + letter_box_def, + input_res_ratio, + output_res_ratio, + pixel_aspect, + scale_factor_by_width, + scale_factor_by_height + ) ) - top_box = ( - "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black" - ).format(letter_box) - - bottom_box = ( - "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" - ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" - ).format(letter_box) - - # Add letter box filters - filters.extend([scale_filter, "setsar=1", top_box, bottom_box]) - # scaling none square pixels and 1920 width if ( input_height != output_height diff --git a/openpype/plugins/publish/integrate_master_version.py b/openpype/plugins/publish/integrate_hero_version.py similarity index 100% rename from openpype/plugins/publish/integrate_master_version.py rename to openpype/plugins/publish/integrate_hero_version.py diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py index ab9b85983b..9769f0d165 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_new.py @@ -12,10 +12,13 @@ import shutil from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import io +from avalon.api import format_template_with_optional_keys from avalon.vendor import filelink import openpype.api from datetime import datetime # from pype.modules import ModulesManager +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib import prepare_template_data # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -294,7 +297,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): else: orig_transfers = list(instance.data['transfers']) - template_name = self.template_name_from_instance(instance) + task_name = io.Session.get("AVALON_TASK") + family = self.main_family_from_instance(instance) + + key_values = {"families": family, "tasks": task_name} + profile = filter_profiles(self.template_name_profiles, key_values, + logger=self.log) + if profile: + template_name = profile["template_name"] published_representations = {} for idx, repre in enumerate(instance.data["representations"]): @@ -697,14 +707,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset = io.find_one({"_id": _id}) - # add group if available - if instance.data.get("subsetGroup"): - io.update_many({ - 'type': 'subset', - '_id': io.ObjectId(subset["_id"]) - }, {'$set': {'data.subsetGroup': - instance.data.get('subsetGroup')}} - ) + self._set_subset_group(instance, subset["_id"]) # Update families on subset. families = [instance.data["family"]] @@ -716,6 +719,65 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): return subset + def _set_subset_group(self, instance, subset_id): + """ + Mark subset as belonging to group in DB. + + Uses Settings > Global > Publish plugins > IntegrateAssetNew + + Args: + instance (dict): processed instance + subset_id (str): DB's subset _id + + """ + # add group if available + integrate_new_sett = (instance.context.data["project_settings"] + ["global"] + ["publish"] + ["IntegrateAssetNew"]) + + profiles = integrate_new_sett["subset_grouping_profiles"] + + filtering_criteria = { + "families": instance.data["family"], + "hosts": instance.data["anatomyData"]["app"], + "tasks": instance.data["anatomyData"]["task"] or + io.Session["AVALON_TASK"] + } + matching_profile = filter_profiles(profiles, filtering_criteria) + + filled_template = None + if matching_profile: + template = matching_profile["template"] + fill_pairs = ( + ("family", filtering_criteria["families"]), + ("task", filtering_criteria["tasks"]), + ("host", filtering_criteria["hosts"]), + ("subset", instance.data["subset"]), + ("renderlayer", instance.data.get("renderlayer")) + ) + fill_pairs = prepare_template_data(fill_pairs) + + try: + filled_template = \ + format_template_with_optional_keys(fill_pairs, template) + except KeyError: + keys = [] + if fill_pairs: + keys = fill_pairs.keys() + + msg = "Subset grouping failed. " \ + "Only {} are expected in Settings".format(','.join(keys)) + self.log.warning(msg) + + if instance.data.get("subsetGroup") or filled_template: + subset_group = instance.data.get('subsetGroup') or filled_template + + io.update_many({ + 'type': 'subset', + '_id': io.ObjectId(subset_id) + }, {'$set': {'data.subsetGroup': subset_group}}) + def create_version(self, subset, version_number, data=None): """ Copy given source to destination @@ -798,68 +860,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): family = instance.data["families"][0] return family - def template_name_from_instance(self, instance): - template_name = self.default_template_name - if not self.template_name_profiles: - self.log.debug(( - "Template name profiles are not set." - " Using default \"{}\"" - ).format(template_name)) - return template_name - - # Task name from session? - task_name = io.Session.get("AVALON_TASK") - family = self.main_family_from_instance(instance) - - matching_profiles = {} - highest_value = -1 - self.log.debug( - "Template name profiles:\n{}".format(self.template_name_profiles) - ) - for name, filters in self.template_name_profiles.items(): - value = 0 - families = filters.get("families") - if families: - if family not in families: - continue - value += 1 - - tasks = filters.get("tasks") - if tasks: - if task_name not in tasks: - continue - value += 1 - - if value > highest_value: - matching_profiles = {} - highest_value = value - - if value == highest_value: - matching_profiles[name] = filters - - if len(matching_profiles) == 1: - template_name = tuple(matching_profiles.keys())[0] - self.log.debug( - "Using template name \"{}\".".format(template_name) - ) - - elif len(matching_profiles) > 1: - template_name = tuple(matching_profiles.keys())[0] - self.log.warning(( - "More than one template profiles matched" - " Family \"{}\" and Task: \"{}\"." - " Using first template name in row \"{}\"." - ).format(family, task_name, template_name)) - - else: - self.log.debug(( - "None of template profiles matched" - " Family \"{}\" and Task: \"{}\"." - " Using default template name \"{}\"" - ).format(family, task_name, template_name)) - - return template_name - def get_rootless_path(self, anatomy, path): """ Returns, if possible, path without absolute portion from host (eg. 'c:\' or '/opt/..') diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index a2d97429d3..981cca82dc 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -119,26 +119,3 @@ class PypeCommands: def validate_jsons(self): pass - @staticmethod - def generate_zip(out_path: str): - """Generate zip file from current sources. - - Args: - out_path (str): Path to generated zip file. - - """ - from igniter import bootstrap_repos - - # create zip file - bs = bootstrap_repos.BootstrapRepos() - if out_path: - out_path = Path(out_path) - bs.data_dir = out_path.parent - - print(f">>> Creating zip in {bs.data_dir} ...") - repo_file = bs.create_version_from_live_code() - if not repo_file: - print("!!! Error while creating zip file.") - exit(1) - - print(f">>> Created {repo_file}") diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 5e2a22f1b5..ca77171981 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -5,7 +5,6 @@ import subprocess import platform import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins -from openpype.api import resources import openpype.lib @@ -14,7 +13,7 @@ ffprobe_path = openpype.lib.get_ffmpeg_tool_path("ffprobe") FFMPEG = ( - '"{}" -i "%(input)s" %(filters)s %(args)s%(output)s' + '"{}"%(input_args)s -i "%(input)s" %(filters)s %(args)s%(output)s' ).format(ffmpeg_path) FFPROBE = ( @@ -70,6 +69,87 @@ def get_fps(str_value): return str(fps) +def _prores_codec_args(ffprobe_data): + output = [] + + tags = ffprobe_data.get("tags") or {} + encoder = tags.get("encoder") or "" + if encoder.endswith("prores_ks"): + codec_name = "prores_ks" + + elif encoder.endswith("prores_aw"): + codec_name = "prores_aw" + + else: + codec_name = "prores" + + output.extend(["-codec:v", codec_name]) + + pix_fmt = ffprobe_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + # Rest of arguments is prores_kw specific + if codec_name == "prores_ks": + codec_tag_to_profile_map = { + "apco": "proxy", + "apcs": "lt", + "apcn": "standard", + "apch": "hq", + "ap4h": "4444", + "ap4x": "4444xq" + } + codec_tag_str = ffprobe_data.get("codec_tag_string") + if codec_tag_str: + profile = codec_tag_to_profile_map.get(codec_tag_str) + if profile: + output.extend(["-profile:v", profile]) + + return output + + +def _h264_codec_args(ffprobe_data): + output = [] + + output.extend(["-codec:v", "h264"]) + + pix_fmt = ffprobe_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-intra"]) + output.extend(["-g", "1"]) + + return output + + +def get_codec_args(ffprobe_data): + codec_name = ffprobe_data.get("codec_name") + # Codec "prores" + if codec_name == "prores": + return _prores_codec_args(ffprobe_data) + + # Codec "h264" + if codec_name == "h264": + return _h264_codec_args(ffprobe_data) + + output = [] + if codec_name: + output.extend(["-codec:v", codec_name]) + + bit_rate = ffprobe_data.get("bit_rate") + if bit_rate: + output.extend(["-b:v", bit_rate]) + + pix_fmt = ffprobe_data.get("pix_fmt") + if pix_fmt: + output.extend(["-pix_fmt", pix_fmt]) + + output.extend(["-g", "1"]) + + return output + + class ModifiedBurnins(ffmpeg_burnins.Burnins): ''' This is modification of OTIO FFmpeg Burnin adapter. @@ -121,10 +201,15 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'font_size': 42 } - def __init__(self, source, streams=None, options_init=None): + def __init__( + self, source, streams=None, options_init=None, first_frame=None + ): if not streams: streams = _streams(source) + self.first_frame = first_frame + self.input_args = [] + super().__init__(source, streams) if options_init: @@ -236,31 +321,26 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): timecode_text = options.get("timecode") or "" text_for_size += timecode_text + font_path = options.get("font") + if not font_path or not os.path.exists(font_path): + font_path = ffmpeg_burnins.FONT + + options["font"] = font_path + data.update(options) - - os_system = platform.system().lower() - data_font = data.get("font") - if not data_font: - data_font = ( - resources.get_liberation_font_path().replace("\\", "/") - ) - elif isinstance(data_font, dict): - data_font = data_font[os_system] - - if data_font: - data["font"] = data_font - options["font"] = data_font - if ffmpeg_burnins._is_windows(): - data["font"] = ( - data_font - .replace(os.sep, r'\\' + os.sep) - .replace(':', r'\:') - ) - data.update( ffmpeg_burnins._drawtext(align, resolution, text_for_size, options) ) + arg_font_path = font_path + if platform.system().lower() == "windows": + arg_font_path = ( + arg_font_path + .replace(os.sep, r'\\' + os.sep) + .replace(':', r'\:') + ) + data["font"] = arg_font_path + self.filters['drawtext'].append(draw % data) if options.get('bg_color') is not None: @@ -289,7 +369,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if self.filter_string: filters = '-vf "{}"'.format(self.filter_string) + if self.first_frame is not None: + start_number_arg = "-start_number {}".format(self.first_frame) + self.input_args.append(start_number_arg) + if "start_number" not in args: + if not args: + args = start_number_arg + else: + args = " ".join((start_number_arg, args)) + + input_args = "" + if self.input_args: + input_args = " {}".format(" ".join(self.input_args)) + return (FFMPEG % { + 'input_args': input_args, 'input': self.source, 'output': output, 'args': '%s ' % args if args else '', @@ -370,7 +464,8 @@ def example(input_path, output_path): def burnins_from_data( input_path, output_path, data, - codec_data=None, options=None, burnin_values=None, overwrite=True + codec_data=None, options=None, burnin_values=None, overwrite=True, + full_input_path=None, first_frame=None ): """This method adds burnins to video/image file based on presets setting. @@ -427,8 +522,11 @@ def burnins_from_data( "shot": "sh0010" } """ + streams = None + if full_input_path: + streams = _streams(full_input_path) - burnin = ModifiedBurnins(input_path, options_init=options) + burnin = ModifiedBurnins(input_path, streams, options, first_frame) frame_start = data.get("frame_start") frame_end = data.get("frame_end") @@ -541,38 +639,13 @@ def burnins_from_data( if codec_data: # Use codec definition from method arguments ffmpeg_args = codec_data + ffmpeg_args.append("-g 1") else: ffprobe_data = burnin._streams[0] - codec_name = ffprobe_data.get("codec_name") - if codec_name: - if codec_name == "prores": - tags = ffprobe_data.get("tags") or {} - encoder = tags.get("encoder") or "" - if encoder.endswith("prores_ks"): - codec_name = "prores_ks" - - elif encoder.endswith("prores_aw"): - codec_name = "prores_aw" - ffmpeg_args.append("-codec:v {}".format(codec_name)) - - profile_name = ffprobe_data.get("profile") - if profile_name: - # lower profile name and repalce spaces with underscore - profile_name = profile_name.replace(" ", "_").lower() - ffmpeg_args.append("-profile:v {}".format(profile_name)) - - bit_rate = ffprobe_data.get("bit_rate") - if bit_rate: - ffmpeg_args.append("-b:v {}".format(bit_rate)) - - pix_fmt = ffprobe_data.get("pix_fmt") - if pix_fmt: - ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) + ffmpeg_args.extend(get_codec_args(ffprobe_data)) # Use group one (same as `-intra` argument, which is deprecated) - ffmpeg_args.append("-g 1") - ffmpeg_args_str = " ".join(ffmpeg_args) burnin.render( output_path, args=ffmpeg_args_str, overwrite=overwrite, **data @@ -591,6 +664,8 @@ if __name__ == "__main__": in_data["burnin_data"], codec_data=in_data.get("codec"), options=in_data.get("options"), - burnin_values=in_data.get("values") + burnin_values=in_data.get("values"), + full_input_path=in_data.get("full_input_path"), + first_frame=in_data.get("first_frame") ) print("* Burnin script has finished") diff --git a/openpype/settings/__init__.py b/openpype/settings/__init__.py index b4187829fc..b5810deef4 100644 --- a/openpype/settings/__init__.py +++ b/openpype/settings/__init__.py @@ -1,9 +1,13 @@ +from .exceptions import ( + SaveWarningExc +) from .lib import ( get_system_settings, get_project_settings, get_current_project_settings, get_anatomy_settings, - get_environments + get_environments, + get_local_settings ) from .entities import ( SystemSettings, @@ -12,11 +16,14 @@ from .entities import ( __all__ = ( + "SaveWarningExc", + "get_system_settings", "get_project_settings", "get_current_project_settings", "get_anatomy_settings", "get_environments", + "get_local_settings", "SystemSettings", "ProjectSettings" diff --git a/openpype/settings/defaults/project_anatomy/attributes.json b/openpype/settings/defaults/project_anatomy/attributes.json index 3ad6761331..387e12bcea 100644 --- a/openpype/settings/defaults/project_anatomy/attributes.json +++ b/openpype/settings/defaults/project_anatomy/attributes.json @@ -14,13 +14,13 @@ "nuke/12-2", "nukex/12-2", "hiero/12-2", - "resolve/16", + "resolve/stable", "houdini/18-5", "blender/2-91", "harmony/20", "photoshop/2021", "aftereffects/2021", - "unreal/4-24" + "unreal/4-26" ], "tools_env": [] } \ No newline at end of file diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index 4e98463ee4..ff16c22663 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -25,6 +25,9 @@ } }, "nuke": { + "viewer": { + "viewerProcess": "sRGB" + }, "workfile": { "colorManagement": "Nuke", "OCIO_config": "nuke-default", @@ -102,7 +105,7 @@ }, { "name": "tile_color", - "value": "0xff0000ff" + "value": "0xadab1dff" }, { "name": "channels", diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index 2b16f59d01..63477b9d82 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -11,7 +11,7 @@ "path": "{@folder}/{@file}" }, "render": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/render/{subset}/{@version}", + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", "file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{ext}", "path": "{@folder}/{@file}" }, diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/openpype/settings/defaults/project_settings/aftereffects.json new file mode 100644 index 0000000000..f54dbb9612 --- /dev/null +++ b/openpype/settings/defaults/project_settings/aftereffects.json @@ -0,0 +1,18 @@ +{ + "publish": { + "ValidateSceneSettings": { + "enabled": true, + "optional": true, + "active": true, + "skip_resolution_check": [".*"], + "skip_timelines_check": [".*"] + }, + "AfterEffectsSubmitDeadline": { + "use_published": true, + "priority": 50, + "primary_pool": "", + "secondary_pool": "", + "chunk_size": 1000000 + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 9ff551491c..03f3e19a64 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -21,6 +21,7 @@ "secondary_pool": "", "group": "", "department": "", + "use_gpu": true, "limit_groups": {} }, "HarmonySubmitDeadline": { diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 8970aa8ac8..b964ce07c3 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -1,7 +1,6 @@ { "events": { "sync_to_avalon": { - "enabled": true, "statuses_name_change": [ "ready", "not ready" diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index ca1b258e72..1f54bed03c 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -1,13 +1,24 @@ { "publish": { "IntegrateHeroVersion": { - "enabled": true + "enabled": true, + "optional": true, + "families": [ + "model", + "rig", + "look", + "pointcache", + "animation", + "setdress", + "layout", + "mayaAscii" + ] }, "ExtractJpegEXR": { "enabled": true, "ffmpeg_args": { "input": [ - "-gamma 2.2" + "-apply_trc gamma22" ], "output": [] } @@ -26,11 +37,11 @@ "ftrackreview" ], "ffmpeg_args": { - "video_filters": [], - "audio_filters": [], - "input": [ - "-gamma 2.2" + "video_filters": [ + "eq=gamma=2.2" ], + "audio_filters": [], + "input": [], "output": [ "-pix_fmt yuv420p", "-crf 18", @@ -45,7 +56,25 @@ ] }, "width": 0, - "height": 0 + "height": 0, + "letter_box": { + "enabled": false, + "ratio": 0.0, + "state": "letterbox", + "fill_color": [ + 0, + 0, + 0, + 255 + ], + "line_thickness": 0, + "line_color": [ + 255, + 0, + 0, + 255 + ] + } } } } @@ -55,11 +84,26 @@ "enabled": true, "options": { "font_size": 42, - "opacity": 1.0, - "bg_opacity": 0.5, + "font_color": [ + 255, + 255, + 255, + 255 + ], + "bg_color": [ + 0, + 0, + 0, + 127 + ], "x_offset": 5, "y_offset": 5, - "bg_padding": 5 + "bg_padding": 5, + "font_filepath": { + "windows": "", + "darwin": "", + "linux": "" + } }, "profiles": [ { @@ -72,26 +116,41 @@ "TOP_RIGHT": "{anatomy[version]}", "BOTTOM_LEFT": "{username}", "BOTTOM_CENTERED": "{asset}", - "BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}" + "BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}", + "filter": { + "families": [], + "tags": [] + } } } } ] }, "IntegrateAssetNew": { - "template_name_profiles": { - "publish": { + "template_name_profiles": [ + { "families": [], - "tasks": [] + "tasks": [], + "template_name": "publish" }, - "render": { + { "families": [ "review", "render", "prerender" - ] + ], + "tasks": [], + "template_name": "render" } - } + ], + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "tasks": [], + "template": "" + } + ] }, "ProcessSubmittedJobOnFarm": { "enabled": true, @@ -208,13 +267,6 @@ "remote_site": "studio" }, "sites": { - "gdrive": { - "provider": "gdrive", - "credentials_url": "", - "root": { - "work": "" - } - } } }, "project_plugins": { diff --git a/openpype/settings/defaults/project_settings/harmony.json b/openpype/settings/defaults/project_settings/harmony.json index f5f084dd44..0c7a35c058 100644 --- a/openpype/settings/defaults/project_settings/harmony.json +++ b/openpype/settings/defaults/project_settings/harmony.json @@ -1,14 +1,18 @@ { - "general": { - "skip_resolution_check": [], - "skip_timelines_check": [] - }, "publish": { "CollectPalettes": { "allowed_tasks": [ - "." + ".*" ] }, + "ValidateSceneSettings": { + "enabled": true, + "optional": true, + "active": true, + "frame_check_filter": [], + "skip_resolution_check": [], + "skip_timelines_check": [] + }, "HarmonySubmitDeadline": { "use_published": false, "priority": 50, diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index b69bc66457..44b27fc16f 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -17,6 +17,28 @@ "handleEnd": 10 } }, + "load": { + "LoadClip": { + "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "review" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "h264", + "mov" + ], + "clip_name_template": "{asset}_{subset}_{representation}" + } + }, "publish": { "CollectInstanceVersion": { "enabled": false diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index dfece74f80..779b8bb3f3 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -135,6 +135,12 @@ "enabled": false, "attributes": {} }, + "ValidateRenderSettings": { + "arnold_render_attributes": [], + "vray_render_attributes": [], + "redshift_render_attributes": [], + "renderman_render_attributes": [] + }, "ValidateModelName": { "enabled": false, "material_file": { @@ -382,10 +388,6 @@ "optional": true, "active": true, "bake_attributes": [] - }, - "MayaSubmitDeadline": { - "enabled": true, - "tile_assembler_plugin": "DraftTileAssembler" } }, "load": { diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 852e041805..bb5232cea7 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -86,12 +86,6 @@ }, "ExtractSlateFrame": { "viewer_lut_raw": false - }, - "NukeSubmitDeadline": { - "deadline_priority": 50, - "deadline_pool": "", - "deadline_pool_secondary": "", - "deadline_chunk_size": 1 } }, "load": { diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index 2355f39aa1..63d6da4633 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -210,11 +210,11 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-0": "13.0 (Testing only)", "12-2": "12.2", "12-0": "12.0", "11-3": "11.3", - "11-2": "11.2", - "13-0": "13.0 (Testing only)" + "11-2": "11.2" } } }, @@ -354,11 +354,11 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-0": "13.0 (Testing only)", "12-2": "12.2", "12-0": "12.0", "11-3": "11.3", - "11-2": "11.2", - "13-0": "13.0 (Testing only)" + "11-2": "11.2" } } }, @@ -496,11 +496,11 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-0": "13.0 (Testing only)", "12-2": "12.2", "12-0": "12.0", "11-3": "11.3", - "11-2": "11.2", - "13-0": "13.0 (Testing only)" + "11-2": "11.2" } } }, @@ -640,11 +640,11 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-0": "13.0 (Testing only)", "12-2": "12.2", "12-0": "12.0", "11-3": "11.3", - "11-2": "11.2", - "13-0": "13.0 (Testing only)" + "11-2": "11.2" } } }, @@ -758,9 +758,9 @@ "RESOLVE_DEV": "True" }, "variants": { - "16": { + "stable": { "enabled": true, - "variant_label": "16", + "variant_label": "stable", "use_python_2": false, "executables": { "windows": [ @@ -1165,6 +1165,7 @@ }, "variants": { "4-26": { + "use_python_2": false, "executables": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index 2568e8b6a8..d03fedf3c9 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -1,12 +1,10 @@ { "studio_name": "Studio name", "studio_code": "stu", + "admin_password": "", "environment": { - "OPENPYPE_OCIO_CONFIG": "{STUDIO_SOFT}/OpenColorIO-Configs", "__environment_keys__": { - "global": [ - "OPENPYPE_OCIO_CONFIG" - ] + "global": [] } }, "openpype_path": { diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index b3065058a1..5c4aa6c485 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -135,7 +135,8 @@ "workspace_name": "" }, "sync_server": { - "enabled": false + "enabled": false, + "sites": {} }, "deadline": { "enabled": true, @@ -161,9 +162,6 @@ "log_viewer": { "enabled": true }, - "user": { - "enabled": true - }, "standalonepublish_tool": { "enabled": true } diff --git a/openpype/settings/defaults/system_settings/tools.json b/openpype/settings/defaults/system_settings/tools.json index b0adccc65e..181236abe8 100644 --- a/openpype/settings/defaults/system_settings/tools.json +++ b/openpype/settings/defaults/system_settings/tools.json @@ -32,7 +32,7 @@ }, "__dynamic_keys_labels__": { "3-2": "3.2", - "3-1": "3.2" + "3-1": "3.1" } } }, diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index f76a915225..2c71b622ee 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -101,7 +101,8 @@ from .enum_entity import ( BaseEnumEntity, EnumEntity, AppsEnumEntity, - ToolsEnumEntity + ToolsEnumEntity, + ProvidersEnum ) from .list_entity import ListEntity @@ -149,6 +150,7 @@ __all__ = ( "EnumEntity", "AppsEnumEntity", "ToolsEnumEntity", + "ProvidersEnum", "ListEntity", diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index b5c42e1da0..3e73fa8aa6 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -457,27 +457,18 @@ class BaseItemEntity(BaseEntity): pass @property - def can_discard_changes(self): - """Result defines if `discard_changes` will be processed. - - Also can be used as validation before the method is called. - """ + def _can_discard_changes(self): + """Defines if `discard_changes` will be processed.""" return self.has_unsaved_changes @property - def can_add_to_studio_default(self): - """Result defines if `add_to_studio_default` will be processed. - - Also can be used as validation before the method is called. - """ + def _can_add_to_studio_default(self): + """Defines if `add_to_studio_default` will be processed.""" if self._override_state is not OverrideState.STUDIO: return False - if self.is_dynamic_item or self.is_in_dynamic_item: - return False - # Skip if entity is under group - if self.group_item: + if self.group_item is not None: return False # Skip if is group and any children is already marked with studio @@ -487,36 +478,24 @@ class BaseItemEntity(BaseEntity): return True @property - def can_remove_from_studio_default(self): - """Result defines if `remove_from_studio_default` can be triggered. - - This can be also used as validation before the method is called. - """ + def _can_remove_from_studio_default(self): + """Defines if `remove_from_studio_default` can be processed.""" if self._override_state is not OverrideState.STUDIO: return False - if self.is_dynamic_item or self.is_in_dynamic_item: - return False - if not self.has_studio_override: return False return True @property - def can_add_to_project_override(self): - """Result defines if `add_to_project_override` can be triggered. - - Also can be used as validation before the method is called. - """ - if self.is_dynamic_item or self.is_in_dynamic_item: - return False - + def _can_add_to_project_override(self): + """Defines if `add_to_project_override` can be processed.""" # Show only when project overrides are set if self._override_state is not OverrideState.PROJECT: return False # Do not show on items under group item - if self.group_item: + if self.group_item is not None: return False # Skip if already is marked to save project overrides @@ -525,14 +504,8 @@ class BaseItemEntity(BaseEntity): return True @property - def can_remove_from_project_override(self): - """Result defines if `remove_from_project_override` can be triggered. - - This can be also used as validation before the method is called. - """ - if self.is_dynamic_item or self.is_in_dynamic_item: - return False - + def _can_remove_from_project_override(self): + """Defines if `remove_from_project_override` can be processed.""" if self._override_state is not OverrideState.PROJECT: return False @@ -544,6 +517,54 @@ class BaseItemEntity(BaseEntity): return False return True + @property + def can_trigger_discard_changes(self): + """Defines if can trigger `discard_changes`. + + Also can be used as validation before the method is called. + """ + return self._can_discard_changes + + @property + def can_trigger_add_to_studio_default(self): + """Defines if can trigger `add_to_studio_default`. + + Also can be used as validation before the method is called. + """ + if self.is_dynamic_item or self.is_in_dynamic_item: + return False + return self._can_add_to_studio_default + + @property + def can_trigger_remove_from_studio_default(self): + """Defines if can trigger `remove_from_studio_default`. + + Also can be used as validation before the method is called. + """ + if self.is_dynamic_item or self.is_in_dynamic_item: + return False + return self._can_remove_from_studio_default + + @property + def can_trigger_add_to_project_override(self): + """Defines if can trigger `add_to_project_override`. + + Also can be used as validation before the method is called. + """ + if self.is_dynamic_item or self.is_in_dynamic_item: + return False + return self._can_add_to_project_override + + @property + def can_trigger_remove_from_project_override(self): + """Defines if can trigger `remove_from_project_override`. + + Also can be used as validation before the method is called. + """ + if self.is_dynamic_item or self.is_in_dynamic_item: + return False + return self._can_remove_from_project_override + def discard_changes(self, on_change_trigger=None): """Discard changes on entity and it's children. @@ -568,7 +589,7 @@ class BaseItemEntity(BaseEntity): """ initialized = False if on_change_trigger is None: - if not self.can_discard_changes: + if not self.can_trigger_discard_changes: return initialized = True @@ -588,7 +609,7 @@ class BaseItemEntity(BaseEntity): def add_to_studio_default(self, on_change_trigger=None): initialized = False if on_change_trigger is None: - if not self.can_add_to_studio_default: + if not self.can_trigger_add_to_studio_default: return initialized = True @@ -625,7 +646,7 @@ class BaseItemEntity(BaseEntity): """ initialized = False if on_change_trigger is None: - if not self.can_remove_from_studio_default: + if not self.can_trigger_remove_from_studio_default: return initialized = True @@ -649,7 +670,7 @@ class BaseItemEntity(BaseEntity): def add_to_project_override(self, on_change_trigger=None): initialized = False if on_change_trigger is None: - if not self.can_add_to_project_override: + if not self.can_trigger_add_to_project_override: return initialized = True @@ -689,7 +710,7 @@ class BaseItemEntity(BaseEntity): initialized = False if on_change_trigger is None: - if not self.can_remove_from_project_override: + if not self.can_trigger_remove_from_project_override: return initialized = True on_change_trigger = [] @@ -775,7 +796,8 @@ class ItemEntity(BaseItemEntity): # Group item reference if self.parent.is_group: self.group_item = self.parent - elif self.parent.group_item: + + elif self.parent.group_item is not None: self.group_item = self.parent.group_item self.key = self.schema_data.get("key") diff --git a/openpype/settings/entities/dict_immutable_keys_entity.py b/openpype/settings/entities/dict_immutable_keys_entity.py index d5563f80d6..052bbda4d0 100644 --- a/openpype/settings/entities/dict_immutable_keys_entity.py +++ b/openpype/settings/entities/dict_immutable_keys_entity.py @@ -353,6 +353,20 @@ class DictImmutableKeysEntity(ItemEntity): for key in METADATA_KEYS: if key in value: metadata[key] = value.pop(key) + + old_metadata = metadata.get(M_OVERRIDEN_KEY) + if old_metadata: + old_metadata_set = set(old_metadata) + new_metadata = [] + for key in self.non_gui_children.keys(): + if key in old_metadata: + new_metadata.append(key) + old_metadata_set.remove(key) + + for key in old_metadata_set: + new_metadata.append(key) + metadata[M_OVERRIDEN_KEY] = new_metadata + return value, metadata def update_default_value(self, value): @@ -458,6 +472,9 @@ class DictImmutableKeysEntity(ItemEntity): for child_obj in self.non_gui_children.values(): child_obj.add_to_studio_default(on_change_trigger) self._ignore_child_changes = False + + self._update_current_metadata() + self.parent.on_child_change(self) def _remove_from_studio_default(self, on_change_trigger): @@ -471,6 +488,9 @@ class DictImmutableKeysEntity(ItemEntity): for child_obj in self.non_gui_children.values(): child_obj.add_to_project_override(_on_change_trigger) self._ignore_child_changes = False + + self._update_current_metadata() + self.parent.on_child_change(self) def _remove_from_project_override(self, on_change_trigger): diff --git a/openpype/settings/entities/dict_mutable_keys_entity.py b/openpype/settings/entities/dict_mutable_keys_entity.py index 7ba44ed0df..907bf98784 100644 --- a/openpype/settings/entities/dict_mutable_keys_entity.py +++ b/openpype/settings/entities/dict_mutable_keys_entity.py @@ -1,6 +1,5 @@ import re import copy - from .lib import ( NOT_SET, OverrideState @@ -94,11 +93,18 @@ class DictMutableKeysEntity(EndpointEntity): for key in prev_keys: self.pop(key) + def _convert_to_valid_type(self, value): + try: + return dict(value) + except Exception: + pass + return super(DictMutableKeysEntity, self)._convert_to_valid_type(value) + def set_key_value(self, key, value): # TODO Check for value type if is Settings entity? child_obj = self.children_by_key.get(key) if not child_obj: - if not KEY_REGEX.match(key): + if not self.store_as_list and not KEY_REGEX.match(key): raise InvalidKeySymbols(self.path, key) child_obj = self.add_key(key) @@ -112,7 +118,7 @@ class DictMutableKeysEntity(EndpointEntity): if new_key == old_key: return - if not KEY_REGEX.match(new_key): + if not self.store_as_list and not KEY_REGEX.match(new_key): raise InvalidKeySymbols(self.path, new_key) self.children_by_key[new_key] = self.children_by_key.pop(old_key) @@ -125,11 +131,15 @@ class DictMutableKeysEntity(EndpointEntity): self._has_project_override = True self.on_change() - def _add_key(self, key): + def _add_key(self, key, _ingore_key_validation=False): if key in self.children_by_key: self.pop(key) - if not KEY_REGEX.match(key): + if ( + not _ingore_key_validation + and not self.store_as_list + and not KEY_REGEX.match(key) + ): raise InvalidKeySymbols(self.path, key) if self.value_is_env_group: @@ -194,6 +204,7 @@ class DictMutableKeysEntity(EndpointEntity): self.children_by_key = {} self.children_label_by_id = {} + self.store_as_list = self.schema_data.get("store_as_list") or False self.value_is_env_group = ( self.schema_data.get("value_is_env_group") or False ) @@ -222,7 +233,7 @@ class DictMutableKeysEntity(EndpointEntity): if self.value_is_env_group: self.item_schema["env_group_key"] = "" - if not self.group_item: + if self.group_item is None: self.is_group = True def schema_validations(self): @@ -237,6 +248,10 @@ class DictMutableKeysEntity(EndpointEntity): if used_temp_label: self.label = None + if self.value_is_env_group and self.store_as_list: + reason = "Item can't store environments metadata to list output." + raise EntitySchemaError(self, reason) + if not self.schema_data.get("object_type"): reason = ( "Modifiable dictionary must have specified `object_type`." @@ -251,8 +266,18 @@ class DictMutableKeysEntity(EndpointEntity): ) raise EntitySchemaError(self, reason) - for child_obj in self.children_by_key.values(): - child_obj.schema_validations() + # Validate object type schema + child_validated = False + for child_entity in self.children_by_key.values(): + child_entity.schema_validations() + child_validated = True + break + + if not child_validated: + key = "__tmp__" + tmp_child = self._add_key(key) + tmp_child.schema_validations() + self.children_by_key.pop(key) def get_child_path(self, child_obj): result_key = None @@ -277,21 +302,24 @@ class DictMutableKeysEntity(EndpointEntity): self.on_change() - def _metadata_for_current_state(self): + def _get_metadata_for_state(self, state): if ( - self._override_state is OverrideState.PROJECT + state is OverrideState.PROJECT and self._project_override_value is not NOT_SET ): return self._project_override_metadata if ( - self._override_state >= OverrideState.STUDIO + state >= OverrideState.STUDIO and self._studio_override_value is not NOT_SET ): return self._studio_override_metadata return self._default_metadata + def _metadata_for_current_state(self): + return self._get_metadata_for_state(self._override_state) + def set_override_state(self, state): # Trigger override state change of root if is not same if self.root_item.override_state is not state: @@ -319,6 +347,7 @@ class DictMutableKeysEntity(EndpointEntity): using_project_overrides = False using_studio_overrides = False + using_default_values = False if ( state is OverrideState.PROJECT and self.had_project_override @@ -336,14 +365,28 @@ class DictMutableKeysEntity(EndpointEntity): metadata = self._studio_override_metadata else: + using_default_values = True value = self._default_value metadata = self._default_metadata if value is NOT_SET: + using_default_values = False value = self.value_on_not_set + using_values_from_state = False + if state is OverrideState.PROJECT: + using_values_from_state = using_project_overrides + elif state is OverrideState.STUDIO: + using_values_from_state = using_studio_overrides + elif state is OverrideState.DEFAULTS: + using_values_from_state = using_default_values + new_value = copy.deepcopy(value) + if using_values_from_state: + initial_value = copy.deepcopy(value) + initial_value.update(metadata) + # Simulate `clear` method without triggering value change for key in tuple(self.children_by_key.keys()): self.children_by_key.pop(key) @@ -356,30 +399,62 @@ class DictMutableKeysEntity(EndpointEntity): children_label_by_id = {} metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {} for _key, _value in new_value.items(): - if not KEY_REGEX.match(_key): + label = metadata_labels.get(_key) + if self.store_as_list or KEY_REGEX.match(_key): + child_entity = self._add_key(_key) + else: # Replace invalid characters with underscore # - this is safety to not break already existing settings - _key = re.sub( - r"[^{}]+".format(KEY_ALLOWED_SYMBOLS), - "_", - _key - ) + new_key = self._convert_to_regex_valid_key(_key) + if not using_values_from_state: + child_entity = self._add_key(new_key) + else: + child_entity = self._add_key( + _key, _ingore_key_validation=True + ) + self.change_key(_key, new_key) + _key = new_key + + if not label: + label = metadata_labels.get(new_key) - child_entity = self._add_key(_key) child_entity.update_default_value(_value) if using_project_overrides: child_entity.update_project_value(_value) elif using_studio_overrides: child_entity.update_studio_value(_value) - label = metadata_labels.get(_key) if label: children_label_by_id[child_entity.id] = label child_entity.set_override_state(state) self.children_label_by_id = children_label_by_id - self.initial_value = self.settings_value() + _settings_value = self.settings_value() + if using_values_from_state: + if _settings_value is NOT_SET: + initial_value = NOT_SET + + elif self.store_as_list: + new_initial_value = [] + for key, value in _settings_value: + if key in initial_value: + new_initial_value.append(key, initial_value.pop(key)) + + for key, value in initial_value.items(): + new_initial_value.append(key, value) + initial_value = new_initial_value + else: + initial_value = _settings_value + + self.initial_value = initial_value + + def _convert_to_regex_valid_key(self, key): + return re.sub( + r"[^{}]+".format(KEY_ALLOWED_SYMBOLS), + "_", + key + ) def children_key_by_id(self): return { @@ -389,6 +464,12 @@ class DictMutableKeysEntity(EndpointEntity): @property def value(self): + if self.store_as_list: + output = [] + for key, child_entity in self.children_by_key.items(): + output.append(key, child_entity.value) + return output + output = {} for key, child_entity in self.children_by_key.items(): output[key] = child_entity.value @@ -468,6 +549,13 @@ class DictMutableKeysEntity(EndpointEntity): return False def _settings_value(self): + if self.store_as_list: + output = [] + for key, child_entity in self.children_by_key.items(): + child_value = child_entity.settings_value() + output.append([key, child_value]) + return output + output = {} for key, child_entity in self.children_by_key.items(): child_value = child_entity.settings_value() @@ -519,6 +607,9 @@ class DictMutableKeysEntity(EndpointEntity): self.had_project_override = value is not NOT_SET def _discard_changes(self, on_change_trigger): + if not self._can_discard_changes: + return + self.set_override_state(self._override_state) on_change_trigger.append(self.on_change) @@ -527,6 +618,9 @@ class DictMutableKeysEntity(EndpointEntity): self.on_change() def _remove_from_studio_default(self, on_change_trigger): + if not self._can_remove_from_studio_default: + return + value = self._default_value if value is NOT_SET: value = self.value_on_not_set @@ -536,13 +630,24 @@ class DictMutableKeysEntity(EndpointEntity): # Simulate `clear` method without triggering value change for key in tuple(self.children_by_key.keys()): - child_obj = self.children_by_key.pop(key) + self.children_by_key.pop(key) + + metadata = self._get_metadata_for_state(OverrideState.DEFAULTS) + metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {} + children_label_by_id = {} # Create new children for _key, _value in new_value.items(): - child_obj = self._add_key(_key) - child_obj.update_default_value(_value) - child_obj.set_override_state(self._override_state) + new_key = self._convert_to_regex_valid_key(_key) + child_entity = self._add_key(new_key) + child_entity.update_default_value(_value) + label = metadata_labels.get(_key) + if label: + children_label_by_id[child_entity.id] = label + + child_entity.set_override_state(self._override_state) + + self.children_label_by_id = children_label_by_id self._ignore_child_changes = False @@ -555,10 +660,7 @@ class DictMutableKeysEntity(EndpointEntity): self.on_change() def _remove_from_project_override(self, on_change_trigger): - if self._override_state is not OverrideState.PROJECT: - return - - if not self.has_project_override: + if not self._can_remove_from_project_override: return if self._has_studio_override: @@ -574,15 +676,27 @@ class DictMutableKeysEntity(EndpointEntity): # Simulate `clear` method without triggering value change for key in tuple(self.children_by_key.keys()): - child_obj = self.children_by_key.pop(key) + self.children_by_key.pop(key) + + metadata = self._get_metadata_for_state(OverrideState.STUDIO) + metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {} + children_label_by_id = {} # Create new children for _key, _value in new_value.items(): - child_obj = self._add_key(_key) - child_obj.update_default_value(_value) + new_key = self._convert_to_regex_valid_key(_key) + child_entity = self._add_key(new_key) + child_entity.update_default_value(_value) if self._has_studio_override: - child_obj.update_studio_value(_value) - child_obj.set_override_state(self._override_state) + child_entity.update_studio_value(_value) + + label = metadata_labels.get(_key) + if label: + children_label_by_id[child_entity.id] = label + + child_entity.set_override_state(self._override_state) + + self.children_label_by_id = children_label_by_id self._ignore_child_changes = False diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 693305cb1e..c6021b68de 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -217,3 +217,41 @@ class ToolsEnumEntity(BaseEnumEntity): if key in self.valid_keys: new_value.append(key) self._current_value = new_value + + +class ProvidersEnum(BaseEnumEntity): + schema_types = ["providers-enum"] + + def _item_initalization(self): + self.multiselection = False + self.value_on_not_set = "" + self.enum_items = [] + self.valid_keys = set() + self.valid_value_types = (str, ) + self.placeholder = None + + def _get_enum_values(self): + from openpype.modules.sync_server.providers import lib as lib_providers + + providers = lib_providers.factory.providers + + valid_keys = set() + valid_keys.add('') + enum_items = [{'': 'Choose Provider'}] + for provider_code, provider_info in providers.items(): + provider, _ = provider_info + enum_items.append({provider_code: provider.LABEL}) + valid_keys.add(provider_code) + + return enum_items, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ProvidersEnum, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + + value_on_not_set = list(self.valid_keys)[0] + if self._current_value is NOT_SET: + self._current_value = value_on_not_set + + self.value_on_not_set = value_on_not_set diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py index e897576d43..409e6a66b4 100644 --- a/openpype/settings/entities/input_entities.py +++ b/openpype/settings/entities/input_entities.py @@ -32,7 +32,7 @@ class EndpointEntity(ItemEntity): super(EndpointEntity, self).__init__(*args, **kwargs) if ( - not (self.group_item or self.is_group) + not (self.group_item is not None or self.is_group) and not (self.is_dynamic_item or self.is_in_dynamic_item) ): self.is_group = True @@ -251,6 +251,9 @@ class InputEntity(EndpointEntity): self._current_value = copy.deepcopy(value) def _discard_changes(self, on_change_trigger=None): + if not self._can_discard_changes: + return + self._value_is_modified = False if self._override_state >= OverrideState.PROJECT: self._has_project_override = self.had_project_override @@ -286,6 +289,9 @@ class InputEntity(EndpointEntity): self.on_change() def _remove_from_studio_default(self, on_change_trigger): + if not self._can_remove_from_studio_default: + return + value = self._default_value if value is NOT_SET: value = self.value_on_not_set @@ -301,10 +307,7 @@ class InputEntity(EndpointEntity): self.on_change() def _remove_from_project_override(self, on_change_trigger): - if self._override_state is not OverrideState.PROJECT: - return - - if not self._has_project_override: + if not self._can_remove_from_project_override: return self._has_project_override = False @@ -407,6 +410,9 @@ class PathInput(InputEntity): self.valid_value_types = (STRING_TYPE, ) self.value_on_not_set = "" + # GUI attributes + self.placeholder_text = self.schema_data.get("placeholder") + class RawJsonEntity(InputEntity): schema_types = ["raw-json"] diff --git a/openpype/settings/entities/item_entities.py b/openpype/settings/entities/item_entities.py index 56e7d1c7b2..48336080b6 100644 --- a/openpype/settings/entities/item_entities.py +++ b/openpype/settings/entities/item_entities.py @@ -49,18 +49,21 @@ class PathEntity(ItemEntity): return self.child_obj.items() def _item_initalization(self): - if not self.group_item and not self.is_group: + if self.group_item is None and not self.is_group: self.is_group = True self.multiplatform = self.schema_data.get("multiplatform", False) self.multipath = self.schema_data.get("multipath", False) + placeholder_text = self.schema_data.get("placeholder") + # Create child object if not self.multiplatform and not self.multipath: valid_value_types = (STRING_TYPE, ) item_schema = { "type": "path-input", - "key": self.key + "key": self.key, + "placeholder": placeholder_text } elif not self.multiplatform: @@ -68,7 +71,10 @@ class PathEntity(ItemEntity): item_schema = { "type": "list", "key": self.key, - "object_type": "path-input" + "object_type": { + "type": "path-input", + "placeholder": placeholder_text + } } else: @@ -87,9 +93,13 @@ class PathEntity(ItemEntity): } if self.multipath: child_item["type"] = "list" - child_item["object_type"] = "path-input" + child_item["object_type"] = { + "type": "path-input", + "placeholder": placeholder_text + } else: child_item["type"] = "path-input" + child_item["placeholder"] = placeholder_text item_schema["children"].append(child_item) @@ -199,7 +209,7 @@ class ListStrictEntity(ItemEntity): # GUI attribute self.is_horizontal = self.schema_data.get("horizontal", True) - if not self.group_item and not self.is_group: + if self.group_item is None and not self.is_group: self.is_group = True def schema_validations(self): @@ -453,4 +463,5 @@ class ListStrictEntity(ItemEntity): def reset_callbacks(self): super(ListStrictEntity, self).reset_callbacks() - self.child_obj.reset_callbacks() + for child_obj in self.children: + child_obj.reset_callbacks() diff --git a/openpype/settings/entities/list_entity.py b/openpype/settings/entities/list_entity.py index c6155b78f8..4b3f7a2659 100644 --- a/openpype/settings/entities/list_entity.py +++ b/openpype/settings/entities/list_entity.py @@ -59,43 +59,38 @@ class ListEntity(EndpointEntity): ) def append(self, item): - child_obj = self._add_new_item() - child_obj.set_override_state(self._override_state) + child_obj = self.add_new_item(trigger_change=False) child_obj.set(item) - self.on_change() + self.on_child_change(child_obj) def extend(self, items): for item in items: self.append(item) def clear(self): - self.children.clear() - self.on_change() + if not self.children: + return + + first_item = self.children.pop(0) + while self.children: + self.children.pop(0) + self.on_child_change(first_item) def pop(self, idx): item = self.children.pop(idx) - self.on_change() + self.on_child_change(item) return item def remove(self, item): - for idx, child_obj in enumerate(self.children): - found = False - if isinstance(item, BaseEntity): - if child_obj is item: - found = True - elif child_obj.value == item: - found = True - - if found: - self.pop(idx) - return - raise ValueError("ListEntity.remove(x): x not in ListEntity") + try: + self.pop(self.index(item)) + except ValueError: + raise ValueError("ListEntity.remove(x): x not in ListEntity") def insert(self, idx, item): - child_obj = self._add_new_item(idx) - child_obj.set_override_state(self._override_state) + child_obj = self.add_new_item(idx, trigger_change=False) child_obj.set(item) - self.on_change() + self.on_child_change(child_obj) def _add_new_item(self, idx=None): child_obj = self.create_schema_object(self.item_schema, self, True) @@ -105,10 +100,12 @@ class ListEntity(EndpointEntity): self.children.insert(idx, child_obj) return child_obj - def add_new_item(self, idx=None): + def add_new_item(self, idx=None, trigger_change=True): child_obj = self._add_new_item(idx) child_obj.set_override_state(self._override_state) - self.on_change() + + if trigger_change: + self.on_child_change(child_obj) return child_obj def swap_items(self, item_1, item_2): @@ -144,7 +141,7 @@ class ListEntity(EndpointEntity): item_schema = {"type": item_schema} self.item_schema = item_schema - if not self.group_item: + if self.group_item is None: self.is_group = True # Value that was set on set_override_state @@ -167,8 +164,18 @@ class ListEntity(EndpointEntity): ) raise EntitySchemaError(self, reason) - for child_obj in self.children: - child_obj.schema_validations() + # Validate object type schema + child_validated = False + for child_entity in self.children: + child_entity.schema_validations() + child_validated = True + break + + if not child_validated: + idx = 0 + tmp_child = self._add_new_item(idx) + tmp_child.schema_validations() + self.children.pop(idx) def get_child_path(self, child_obj): result_idx = None @@ -343,7 +350,7 @@ class ListEntity(EndpointEntity): return output def _discard_changes(self, on_change_trigger): - if self._override_state is OverrideState.NOT_DEFINED: + if not self._can_discard_changes: return not_set = object() @@ -405,7 +412,7 @@ class ListEntity(EndpointEntity): self.on_change() def _remove_from_studio_default(self, on_change_trigger): - if self._override_state is not OverrideState.STUDIO: + if not self._can_remove_from_studio_default: return value = self._default_value @@ -433,10 +440,7 @@ class ListEntity(EndpointEntity): self.on_change() def _remove_from_project_override(self, on_change_trigger): - if self._override_state is not OverrideState.PROJECT: - return - - if not self.has_project_override: + if not self._can_remove_from_project_override: return if self._has_studio_override: diff --git a/openpype/settings/entities/root_entities.py b/openpype/settings/entities/root_entities.py index eed3d47f46..b89473d9fb 100644 --- a/openpype/settings/entities/root_entities.py +++ b/openpype/settings/entities/root_entities.py @@ -23,6 +23,7 @@ from openpype.settings.constants import ( PROJECT_ANATOMY_KEY, KEY_REGEX ) +from openpype.settings.exceptions import SaveWarningExc from openpype.settings.lib import ( DEFAULTS_DIR, @@ -724,8 +725,19 @@ class ProjectSettings(RootEntity): project_settings = settings_value.get(PROJECT_SETTINGS_KEY) or {} project_anatomy = settings_value.get(PROJECT_ANATOMY_KEY) or {} - save_project_settings(self.project_name, project_settings) - save_project_anatomy(self.project_name, project_anatomy) + warnings = [] + try: + save_project_settings(self.project_name, project_settings) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) + + try: + save_project_anatomy(self.project_name, project_anatomy) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) + + if warnings: + raise SaveWarningExc(warnings) def _validate_defaults_to_save(self, value): """Valiations of default values before save.""" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 6bc158aa60..b4666b302a 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -78,6 +78,10 @@ "type": "schema", "name": "schema_project_hiero" }, + { + "type": "schema", + "name": "schema_project_aftereffects" + }, { "type": "schema", "name": "schema_project_harmony" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json new file mode 100644 index 0000000000..63bf9274a3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json @@ -0,0 +1,90 @@ +{ + "type": "dict", + "collapsible": true, + "key": "aftereffects", + "label": "AfterEffects", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "ValidateSceneSettings", + "label": "Validate Scene Settings", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Validate if FPS and Resolution match shot data" + }, + { + "type": "list", + "key": "skip_resolution_check", + "object_type": "text", + "label": "Skip Resolution Check for Tasks" + }, + { + "type": "list", + "key": "skip_timelines_check", + "object_type": "text", + "label": "Skip Timeline Check for Tasks" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "AfterEffectsSubmitDeadline", + "label": "AfterEffects Submit to Deadline", + "children": [ + { + "type": "boolean", + "key": "use_published", + "label": "Use Published scene" + }, + { + "type": "number", + "key": "priority", + "label": "Priority" + }, + { + "type": "text", + "key": "primary_pool", + "label": "Primary Pool" + }, + { + "type": "text", + "key": "secondary_pool", + "label": "Secondary Pool" + }, + { + "type": "number", + "key": "chunk_size", + "label": "Frames Per Task" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index f46221ba63..1346fb3dad 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -128,6 +128,11 @@ "key": "department", "label": "Department" }, + { + "type": "boolean", + "key": "use_gpu", + "label": "Use GPU" + }, { "type": "dict-modifiable", "key": "limit_groups", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index a801175031..b1bb207578 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -14,13 +14,7 @@ "type": "dict", "key": "sync_to_avalon", "label": "Sync to avalon", - "checkbox_key": "enabled", "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, { "type": "label", "label": "Allow name and hierarchy change only if following statuses are on all children tasks" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json index c4cdccff42..8b5d638cd8 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json @@ -5,26 +5,6 @@ "label": "Harmony", "is_file": true, "children": [ - { - "type": "dict", - "collapsible": true, - "key": "general", - "label": "General", - "children": [ - { - "type": "list", - "key": "skip_resolution_check", - "object_type": "text", - "label": "Skip Resolution Check for Tasks" - }, - { - "type": "list", - "key": "skip_timelines_check", - "object_type": "text", - "label": "Skip Timeliene Check for Tasks" - } - ] - }, { "type": "dict", "collapsible": true, @@ -45,6 +25,52 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "ValidateSceneSettings", + "label": "Validate Scene Settings", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Validate if FrameStart, FrameEnd and Resolution match shot data" + }, + { + "type": "list", + "key": "frame_check_filter", + "label": "Skip Frame check for Assets with", + "object_type": "text" + }, + { + "type": "list", + "key": "skip_resolution_check", + "object_type": "text", + "label": "Skip Resolution Check for Tasks" + }, + { + "type": "list", + "key": "skip_timelines_check", + "object_type": "text", + "label": "Skip Timeline Check for Tasks" + } + ] + }, { "type": "dict", "collapsible": true, @@ -59,7 +85,7 @@ { "type": "number", "key": "priority", - "label": "priority" + "label": "Priority" }, { "type": "text", @@ -74,7 +100,7 @@ { "type": "number", "key": "chunk_size", - "label": "Chunk Size" + "label": "Frames Per Task" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index d2191a45a0..f717eff7dd 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -120,6 +120,45 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "load", + "label": "Loader plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Load Clip", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json index ea1b8fc9da..9428ce2db0 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_syncserver.json @@ -50,14 +50,10 @@ "type": "dict", "children": [ { - "type": "text", - "key": "provider", - "label": "Provider" - }, - { - "type": "text", + "type": "path", "key": "credentials_url", - "label": "Credentials url" + "label": "Credentials url", + "multiplatform": true }, { "type": "dict-modifiable", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index b48f90bd91..3c589f9492 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -118,6 +118,19 @@ "type": "dict", "label": "Nuke", "children": [ + { + "key": "viewer", + "type": "dict", + "label": "Viewer", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, { "key": "workfile", "type": "dict", @@ -234,8 +247,7 @@ "label": "Used in plugins", "object_type": { "type": "text", - "key": "pluginClass", - "label": "Plugin Class" + "key": "pluginClass" } }, { @@ -282,8 +294,7 @@ "label": "Used in plugins", "object_type": { "type": "text", - "key": "pluginClass", - "label": "Plugin Class" + "key": "pluginClass" } }, { diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 3c079a130d..0efe3b8fea 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -16,6 +16,17 @@ "type": "boolean", "key": "enabled", "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" } ] }, @@ -101,30 +112,8 @@ "type": "text" }, { - "key": "tags", - "label": "Tags", - "type": "enum", - "multiselection": true, - "enum_items": [ - { - "burnin": "Add burnins" - }, - { - "ftrackreview": "Add to Ftrack" - }, - { - "delete": "Delete output" - }, - { - "slate-frame": "Add slate frame" - }, - { - "no-handles": "Skip handle frames" - }, - { - "sequence": "Output as image sequence" - } - ] + "type": "schema", + "name": "schema_representation_tags" }, { "key": "ffmpeg_args", @@ -203,6 +192,69 @@ "default": 0, "minimum": 0, "maximum": 100000 + }, + { + "key": "letter_box", + "label": "Letter box", + "type": "dict", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled", + "default": false + }, + { + "key": "ratio", + "label": "Ratio", + "type": "number", + "decimal": 4, + "default": 0, + "minimum": 0, + "maximum": 10000 + }, + { + "key": "state", + "label": "Type", + "type": "enum", + "enum_items": [ + { + "letterbox": "Letterbox" + }, + { + "pillar": "Pillar" + } + ] + }, + { + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Fill Color", + "name": "fill_color" + } + ] + }, + { + "key": "line_thickness", + "label": "Line Thickness", + "type": "number", + "minimum": 0, + "maximum": 1000 + }, + { + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Line Color", + "name": "line_color" + } + ] + } + ] } ] } @@ -238,20 +290,24 @@ "minimum": 0 }, { - "type": "number", - "key": "opacity", - "label": "Font opacity", - "decimal": 2, - "maximum": 1, - "minimum": 0 + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Font Color", + "name": "font_color" + } + ] }, { - "type": "number", - "key": "bg_opacity", - "label": "Background opacity", - "decimal": 2, - "maximum": 1, - "minimum": 0 + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Background Color", + "name": "bg_color" + } + ] }, { "type": "number", @@ -267,6 +323,13 @@ "type": "number", "key": "bg_padding", "label": "Padding aroung text" + }, + { + "type": "path", + "key": "font_filepath", + "label": "Font file path", + "multipath": false, + "multiplatform": true } ] }, @@ -333,6 +396,24 @@ "key": "BOTTOM_RIGHT", "label": "BottomRight", "type": "text" + }, + { + "key": "filter", + "label": "Additional filtering", + "type": "dict", + "highlight_content": true, + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "schema", + "name": "schema_representation_tags" + } + ] } ] } @@ -350,9 +431,80 @@ "is_group": true, "children": [ { - "type": "raw-json", + "type": "list", "key": "template_name_profiles", - "label": "template_name_profiles" + "label": "Template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "label", + "label": "" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name" + } + ] + } + }, + { + "type": "list", + "key": "subset_grouping_profiles", + "label": "Subset grouping profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "label", + "label": "Set all published instances as a part of specific group named according to 'Template'.
Implemented all variants of placeholders [{task},{family},{host},{subset},{renderlayer}]" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "key": "hosts", + "label": "Hosts", + "type": "list", + "object_type": "text" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template", + "label": "Template" + } + ] + } } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 9d2e39edde..4cabf5bb74 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -72,6 +72,56 @@ } ] }, + + { + "type": "dict", + "collapsible": true, + "key": "ValidateRenderSettings", + "label": "ValidateRenderSettings", + "children": [ + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "arnold_render_attributes", + "label": "Arnold Render Attributes", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "vray_render_attributes", + "label": "Vray Render Attributes", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "redshift_render_attributes", + "label": "Redshift Render Attributes", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "renderman_render_attributes", + "label": "Renderman Render Attributes", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { "type": "collapsible-wrap", "label": "Model", @@ -283,34 +333,6 @@ "is_list": true } ] - }, - { - "type": "dict", - "collapsible": true, - "key": "MayaSubmitDeadline", - "label": "Submit maya job to deadline", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "enum", - "key": "tile_assembler_plugin", - "label": "Tile Assembler Plugin", - "multiselection": false, - "enum_items": [ - { - "DraftTileAssembler": "Draft Tile Assembler" - }, - { - "oiio": "Open Image IO" - } - ] - } - ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 0e3770ac78..087e6c13a9 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -146,35 +146,6 @@ "label": "Viewer LUT raw" } ] - }, - { - "type": "dict", - "collapsible": true, - "key": "NukeSubmitDeadline", - "label": "NukeSubmitDeadline", - "is_group": true, - "children": [ - { - "type": "number", - "key": "deadline_priority", - "label": "deadline_priority" - }, - { - "type": "text", - "key": "deadline_pool", - "label": "deadline_pool" - }, - { - "type": "text", - "key": "deadline_pool_secondary", - "label": "deadline_pool_secondary" - }, - { - "type": "number", - "key": "deadline_chunk_size", - "label": "deadline_chunk_size" - } - ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_publish_gui_filter.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_publish_gui_filter.json index 1539bd0738..f27ca9586f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_publish_gui_filter.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_publish_gui_filter.json @@ -4,7 +4,6 @@ "key": "filters", "label": "Publish GUI Filters", "object_type": { - "type": "raw-json", - "label": "Plugins" + "type": "raw-json" } } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json new file mode 100644 index 0000000000..b65de747e5 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -0,0 +1,26 @@ +{ + "key": "tags", + "label": "Tags", + "type": "enum", + "multiselection": true, + "enum_items": [ + { + "burnin": "Add burnins" + }, + { + "ftrackreview": "Add to Ftrack" + }, + { + "delete": "Delete output" + }, + { + "slate-frame": "Add slate frame" + }, + { + "no-handles": "Skip handle frames" + }, + { + "sequence": "Output as image sequence" + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json new file mode 100644 index 0000000000..ffe530175a --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json @@ -0,0 +1,33 @@ +[ + { + "type": "list-strict", + "key": "{name}", + "label": "{label}", + "object_types": [ + { + "label": "R", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "G", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "B", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "A", + "type": "number", + "minimum": 0, + "maximum": 255 + } + ] + } +] diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_blender.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_blender.json index e6e7381e9f..0a6c8ca035 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_blender.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_blender.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_djv.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_djv.json index a95cedf7c3..d09f038892 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_djv.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_djv.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json index 22a5b2e737..5390b9b78f 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_houdini.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_maya.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_maya.json index 7c33671fa7..97854a3945 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_maya.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_maya.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json index ab2b86bf87..8524c92e86 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_resolve.json @@ -28,8 +28,8 @@ "name": "template_host_variant", "template_data": [ { - "app_variant_label": "16", - "app_variant": "16" + "app_variant_label": "stable", + "app_variant": "stable" } ] } diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_shell.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_shell.json index e344f98594..986f83a9fc 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_shell.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_shell.json @@ -19,12 +19,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json index eac09be113..c39e6f7a30 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_tvpaint.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json index c5096197d6..df5ec0e6fa 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_unreal.json @@ -23,12 +23,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/template_host_variant_items.json b/openpype/settings/entities/schemas/system_schema/host_settings/template_host_variant_items.json index 472840d8fc..ab4d2374a3 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/template_host_variant_items.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/template_host_variant_items.json @@ -10,7 +10,8 @@ "key": "executables", "label": "Executables", "multiplatform": true, - "multipath": true + "multipath": true, + "placeholder": "Executable path" }, { "type":"separator" diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/template_nuke.json b/openpype/settings/entities/schemas/system_schema/host_settings/template_nuke.json index 3f25c7d72f..22398ba227 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/template_nuke.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/template_nuke.json @@ -24,12 +24,10 @@ "type": "dict-modifiable", "key": "variants", "collapsible_key": true, - "dynamic_label": false, "use_label_wrap": false, "object_type": { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", "children": [ { "type": "schema_template", diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json index fd650b4a1e..568ccad5b9 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_general.json +++ b/openpype/settings/entities/schemas/system_schema/schema_general.json @@ -18,6 +18,18 @@ { "type": "splitter" }, + { + "type": "label", + "label": "This is NOT a securely stored password!. It only acts as a simple barrier to stop users from accessing studio wide settings." + }, + { + "type": "text", + "key": "admin_password", + "label": "Admin password" + }, + { + "type": "splitter" + }, { "key": "environment", "label": "Environment", diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index a30cafd0c2..d1b498bb86 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -85,11 +85,32 @@ "label": "Site Sync", "collapsible": true, "checkbox_key": "enabled", - "children": [{ - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }] + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "dict-modifiable", + "collapsible": true, + "key": "sites", + "label": "Sites", + "collapsible_key": false, + "is_file": true, + "object_type": + { + "type": "dict", + "children": [ + { + "type": "providers-enum", + "key": "provider", + "label": "Provider" + } + ] + } + } + ] },{ "type": "dict", "key": "deadline", @@ -154,20 +175,6 @@ } ] }, - { - "type": "dict", - "key": "user", - "label": "User setting", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - } - ] - }, { "type": "dict", "key": "standalonepublish_tool", diff --git a/openpype/settings/exceptions.py b/openpype/settings/exceptions.py new file mode 100644 index 0000000000..a06138eeaf --- /dev/null +++ b/openpype/settings/exceptions.py @@ -0,0 +1,11 @@ +class SaveSettingsValidation(Exception): + pass + + +class SaveWarningExc(SaveSettingsValidation): + def __init__(self, warnings): + if isinstance(warnings, str): + warnings = [warnings] + self.warnings = warnings + msg = " | ".join(warnings) + super(SaveWarningExc, self).__init__(msg) diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index b3e1b1b1e1..65ec7291d3 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -12,7 +12,8 @@ from .constants import ( SYSTEM_SETTINGS_KEY, PROJECT_SETTINGS_KEY, PROJECT_ANATOMY_KEY, - LOCAL_SETTING_KEY + LOCAL_SETTING_KEY, + M_OVERRIDEN_KEY ) from .lib import load_json_file @@ -167,6 +168,7 @@ class CacheValues: class MongoSettingsHandler(SettingsHandler): """Settings handler that use mongo for storing and loading of settings.""" + global_general_keys = ("openpype_path", "admin_password") def __init__(self): # Get mongo connection @@ -225,13 +227,106 @@ class MongoSettingsHandler(SettingsHandler): self._prepare_project_settings_keys() return self._attribute_keys - def _prepare_global_settings(self, data): + def _extract_global_settings(self, data): + """Extract global settings data from system settings overrides. + + This is now limited to "general" key in system settings which must be + set as group in schemas. + + Returns: + dict: Global settings extracted from system settings data. + """ output = {} - # Add "openpype_path" key to global settings if is set - if "general" in data and "openpype_path" in data["general"]: - output["openpype_path"] = data["general"]["openpype_path"] + if "general" not in data: + return output + + general_data = data["general"] + + # Add predefined keys to global settings if are set + for key in self.global_general_keys: + if key not in general_data: + continue + # Pop key from values + output[key] = general_data.pop(key) + # Pop key from overriden metadata + if ( + M_OVERRIDEN_KEY in general_data + and key in general_data[M_OVERRIDEN_KEY] + ): + general_data[M_OVERRIDEN_KEY].remove(key) return output + def _apply_global_settings( + self, system_settings_document, globals_document + ): + """Apply global settings data to system settings. + + Applification is skipped if document with global settings is not + available or does not have set data in. + + System settings document is "faked" like it exists if global document + has set values. + + Args: + system_settings_document (dict): System settings document from + MongoDB. + globals_document (dict): Global settings document from MongoDB. + + Returns: + Merged document which has applied global settings data. + """ + # Skip if globals document is not available + if ( + not globals_document + or "data" not in globals_document + or not globals_document["data"] + ): + return system_settings_document + + globals_data = globals_document["data"] + # Check if data contain any key from predefined keys + any_key_found = False + if globals_data: + for key in self.global_general_keys: + if key in globals_data: + any_key_found = True + break + + # Skip if any key from predefined key was not found in globals + if not any_key_found: + return system_settings_document + + # "Fake" system settings document if document does not exist + # - global settings document may exist but system settings not yet + if not system_settings_document: + system_settings_document = {} + + if "data" in system_settings_document: + system_settings_data = system_settings_document["data"] + else: + system_settings_data = {} + system_settings_document["data"] = system_settings_data + + if "general" in system_settings_data: + system_general = system_settings_data["general"] + else: + system_general = {} + system_settings_data["general"] = system_general + + overriden_keys = system_general.get(M_OVERRIDEN_KEY) or [] + for key in self.global_general_keys: + if key not in globals_data: + continue + + system_general[key] = globals_data[key] + if key not in overriden_keys: + overriden_keys.append(key) + + if overriden_keys: + system_general[M_OVERRIDEN_KEY] = overriden_keys + + return system_settings_document + def save_studio_settings(self, data): """Save studio overrides of system settings. @@ -243,23 +338,29 @@ class MongoSettingsHandler(SettingsHandler): Args: data(dict): Data of studio overrides with override metadata. """ - # Store system settings + # Update cache self.system_settings_cache.update_data(data) + + # Get copy of just updated cache + system_settings_data = self.system_settings_cache.data_copy() + + # Extract global settings from system settings + global_settings = self._extract_global_settings( + system_settings_data + ) + + # Store system settings self.collection.replace_one( { "type": SYSTEM_SETTINGS_KEY }, { "type": SYSTEM_SETTINGS_KEY, - "data": self.system_settings_cache.data + "data": system_settings_data }, upsert=True ) - # Get global settings from system settings - global_settings = self._prepare_global_settings( - self.system_settings_cache.data - ) # Store global settings self.collection.replace_one( { @@ -418,11 +519,27 @@ class MongoSettingsHandler(SettingsHandler): def get_studio_system_settings_overrides(self): """Studio overrides of system settings.""" if self.system_settings_cache.is_outdated: - document = self.collection.find_one({ - "type": SYSTEM_SETTINGS_KEY + system_settings_document = None + globals_document = None + docs = self.collection.find({ + # Use `$or` as system settings may have more filters in future + "$or": [ + {"type": GLOBAL_SETTINGS_KEY}, + {"type": SYSTEM_SETTINGS_KEY}, + ] }) + for doc in docs: + doc_type = doc["type"] + if doc_type == GLOBAL_SETTINGS_KEY: + globals_document = doc + elif doc_type == SYSTEM_SETTINGS_KEY: + system_settings_document = doc - self.system_settings_cache.update_from_document(document) + merged_document = self._apply_global_settings( + system_settings_document, globals_document + ) + + self.system_settings_cache.update_from_document(merged_document) return self.system_settings_cache.data_copy() def _get_project_settings_overrides(self, project_name): diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 3bf2141808..f61166fa69 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -4,6 +4,9 @@ import functools import logging import platform import copy +from .exceptions import ( + SaveWarningExc +) from .constants import ( M_OVERRIDEN_KEY, M_ENVIRONMENT_KEY, @@ -101,8 +104,14 @@ def save_studio_settings(data): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: data(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -110,15 +119,25 @@ def save_studio_settings(data): old_data = get_system_settings() default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] new_data = apply_overrides(default_values, copy.deepcopy(data)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager(_system_settings=new_data) + + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_system_settings_save(old_data, new_data, changes) + try: + module.on_system_settings_save( + old_data, new_data, changes, new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_studio_settings(data) + _SETTINGS_HANDLER.save_studio_settings(data) + if warnings: + raise SaveWarningExc(warnings) @require_handler @@ -130,10 +149,16 @@ def save_project_settings(project_name, overrides): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: project_name (str): Project name for which overrides are passed. Default project's value is None. overrides(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -151,17 +176,29 @@ def save_project_settings(project_name, overrides): old_data = get_default_project_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(overrides)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager() + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_project_settings_save( - old_data, new_data, project_name, changes - ) + try: + module.on_project_settings_save( + old_data, + new_data, + project_name, + changes, + new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_project_settings(project_name, overrides) + _SETTINGS_HANDLER.save_project_settings(project_name, overrides) + + if warnings: + raise SaveWarningExc(warnings) @require_handler @@ -173,10 +210,16 @@ def save_project_anatomy(project_name, anatomy_data): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: project_name (str): Project name for which overrides are passed. Default project's value is None. overrides(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -194,17 +237,29 @@ def save_project_anatomy(project_name, anatomy_data): old_data = get_default_anatomy_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager() + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_project_anatomy_save( - old_data, new_data, changes, project_name - ) + try: + module.on_project_anatomy_save( + old_data, + new_data, + changes, + project_name, + new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data) + _SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data) + + if warnings: + raise SaveWarningExc(warnings) @require_handler diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index 6261fe91ca..72c7aece72 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -1,7 +1,7 @@ import os -import importlib -from avalon import api, lib, style +from avalon import api, style +from openpype import PLUGINS_DIR from openpype.api import Logger, resources from openpype.lib import ( ApplictionExecutableNotFound, @@ -10,81 +10,6 @@ from openpype.lib import ( from Qt import QtWidgets, QtGui -class ProjectManagerAction(api.Action): - name = "projectmanager" - label = "Project Manager" - icon = "gear" - order = 999 # at the end - - def is_compatible(self, session): - return "AVALON_PROJECT" in session - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=[ - "-u", "-m", "avalon.tools.projectmanager", - session['AVALON_PROJECT'] - ] - ) - - -class LoaderAction(api.Action): - name = "loader" - label = "Loader" - icon = "cloud-download" - order = 998 - - def is_compatible(self, session): - return "AVALON_PROJECT" in session - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=[ - "-u", "-m", "avalon.tools.loader", session['AVALON_PROJECT'] - ] - ) - - -class LoaderLibrary(api.Action): - name = "loader_os" - label = "Library Loader" - icon = "book" - order = 997 # at the end - - def is_compatible(self, session): - return True - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=["-u", "-m", "avalon.tools.libraryloader"] - ) - - -def register_default_actions(): - """Register default actions for Launcher""" - api.register_plugin(api.Action, ProjectManagerAction) - api.register_plugin(api.Action, LoaderAction) - api.register_plugin(api.Action, LoaderLibrary) - - -def register_config_actions(): - """Register actions from the configuration for Launcher""" - - module_name = os.environ["AVALON_CONFIG"] - config = importlib.import_module(module_name) - if not hasattr(config, "register_launcher_actions"): - print( - "Current configuration `%s` has no 'register_launcher_actions'" - % config.__name__ - ) - return - - config.register_launcher_actions() - - def register_actions_from_paths(paths): if not paths: return @@ -106,6 +31,13 @@ def register_actions_from_paths(paths): api.register_plugin_path(api.Action, path) +def register_config_actions(): + """Register actions from the configuration for Launcher""" + + actions_dir = os.path.join(PLUGINS_DIR, "actions") + register_actions_from_paths([actions_dir]) + + def register_environment_actions(): """Register actions from AVALON_ACTIONS for Launcher.""" diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 09782ea6ac..81aa841eb7 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -15,6 +15,8 @@ import maya.api.OpenMaya as om from . import widgets from . import commands +from . vray_proxies import vrayproxy_assign_look + module = sys.modules[__name__] module.window = None @@ -211,9 +213,17 @@ class App(QtWidgets.QWidget): subset_name, asset)) + self.echo("Getting vray proxy nodes ...") + vray_proxies = set(cmds.ls(type="VRayProxy")) + nodes = set(item["nodes"]).difference(vray_proxies) + # Assign look - assign_look_by_version(nodes=item["nodes"], - version_id=version["_id"]) + if nodes: + assign_look_by_version([nodes], version_id=version["_id"]) + + if vray_proxies: + for vp in vray_proxies: + vrayproxy_assign_look(vp, subset_name) end = time.time() diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 98eb3d37b7..2add5d3499 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -8,6 +8,9 @@ from openpype.hosts.maya.api import lib from avalon import io, api + +import vray_proxies + log = logging.getLogger(__name__) @@ -65,9 +68,7 @@ def get_selected_nodes(): selection = cmds.ls(selection=True, long=True) hierarchy = list_descendents(selection) - nodes = list(set(selection + hierarchy)) - - return nodes + return list(set(selection + hierarchy)) def get_all_asset_nodes(): @@ -132,6 +133,21 @@ def create_items_from_nodes(nodes): asset_view_items = [] id_hashes = create_asset_id_hash(nodes) + + # get ids from alembic + vray_proxy_nodes = cmds.ls(nodes, type="VRayProxy") + for vp in vray_proxy_nodes: + path = cmds.getAttr("{}.fileName".format(vp)) + ids = vray_proxies.get_alembic_ids_cache(path) + parent_id = {} + for k, _ in ids.items(): + pid = k.split(":")[0] + if not parent_id.get(pid): + parent_id.update({pid: [vp]}) + + print("Adding ids from alembic {}".format(path)) + id_hashes.update(parent_id) + if not id_hashes: return asset_view_items @@ -172,7 +188,7 @@ def remove_unused_looks(): host = api.registered_host() - unused = list() + unused = [] for container in host.ls(): if container['loader'] == "LookLoader": members = cmds.sets(container['objectName'], query=True) diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py new file mode 100644 index 0000000000..d2f345e628 --- /dev/null +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -0,0 +1,312 @@ +# -*- coding: utf-8 -*- +"""Tools for loading looks to vray proxies.""" +import os +from collections import defaultdict +import logging +import json + +import six + +import alembic.Abc +from maya import cmds + +import avalon.io as io +import avalon.maya +import avalon.api as api + +import openpype.hosts.maya.api.lib as lib + + +log = logging.getLogger(__name__) + + +def get_alembic_paths_by_property(filename, attr, verbose=False): + # type: (str, str, bool) -> dict + """Return attribute value per objects in the Alembic file. + + Reads an Alembic archive hierarchy and retrieves the + value from the `attr` properties on the objects. + + Args: + filename (str): Full path to Alembic archive to read. + attr (str): Id attribute. + verbose (bool): Whether to verbosely log missing attributes. + + Returns: + dict: Mapping of node full path with its id + + """ + # Normalize alembic path + filename = os.path.normpath(filename) + filename = filename.replace("\\", "/") + filename = str(filename) # path must be string + + archive = alembic.Abc.IArchive(filename) + root = archive.getTop() + + iterator = list(root.children) + obj_ids = {} + + for obj in iterator: + name = obj.getFullName() + + # include children for coming iterations + iterator.extend(obj.children) + + props = obj.getProperties() + if props.getNumProperties() == 0: + # Skip those without properties, e.g. '/materials' in a gpuCache + continue + + # THe custom attribute is under the properties' first container under + # the ".arbGeomParams" + prop = props.getProperty(0) # get base property + + _property = None + try: + geo_params = prop.getProperty('.arbGeomParams') + _property = geo_params.getProperty(attr) + except KeyError: + if verbose: + log.debug("Missing attr on: {0}".format(name)) + continue + + if not _property.isConstant(): + log.warning("Id not constant on: {0}".format(name)) + + # Get first value sample + value = _property.getValue()[0] + + obj_ids[name] = value + + return obj_ids + + +def get_alembic_ids_cache(path): + # type: (str) -> dict + """Build a id to node mapping in Alembic file. + + Nodes without IDs are ignored. + + Returns: + dict: Mapping of id to nodes in the Alembic. + + """ + node_ids = get_alembic_paths_by_property(path, attr="cbId") + id_nodes = defaultdict(list) + for node, _id in six.iteritems(node_ids): + id_nodes[_id].append(node) + + return dict(six.iteritems(id_nodes)) + + +def assign_vrayproxy_shaders(vrayproxy, assignments): + # type: (str, dict) -> None + """Assign shaders to content of Vray Proxy. + + This will create shader overrides on Vray Proxy to assign shaders to its + content. + + Todo: + Allow to optimize and assign a single shader to multiple shapes at + once or maybe even set it to the highest available path? + + Args: + vrayproxy (str): Name of Vray Proxy + assignments (dict): Mapping of shader assignments. + + Returns: + None + + """ + # Clear all current shader assignments + plug = vrayproxy + ".shaders" + num = cmds.getAttr(plug, size=True) + for i in reversed(range(num)): + cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) + + # Create new assignment overrides + index = 0 + for material, paths in assignments.items(): + for path in paths: + plug = "{}.shaders[{}]".format(vrayproxy, index) + cmds.setAttr(plug + ".shadersNames", path, type="string") + cmds.connectAttr(material + ".outColor", + plug + ".shadersConnections", force=True) + index += 1 + + +def get_look_relationships(version_id): + # type: (str) -> dict + """Get relations for the look. + + Args: + version_id (str): Parent version Id. + + Returns: + dict: Dictionary of relations. + + """ + json_representation = io.find_one({"type": "representation", + "parent": version_id, + "name": "json"}) + + # Load relationships + shader_relation = api.get_representation_path(json_representation) + with open(shader_relation, "r") as f: + relationships = json.load(f) + + return relationships + + +def load_look(version_id): + # type: (str) -> list + """Load look from version. + + Get look from version and invoke Loader for it. + + Args: + version_id (str): Version ID + + Returns: + list of shader nodes. + + """ + # Get representations of shader file and relationships + look_representation = io.find_one({"type": "representation", + "parent": version_id, + "name": "ma"}) + + # See if representation is already loaded, if so reuse it. + host = api.registered_host() + representation_id = str(look_representation['_id']) + for container in host.ls(): + if (container['loader'] == "LookLoader" and + container['representation'] == representation_id): + log.info("Reusing loaded look ...") + container_node = container['objectName'] + break + else: + log.info("Using look for the first time ...") + + # Load file + loaders = api.loaders_from_representation(api.discover(api.Loader), + representation_id) + loader = next( + (i for i in loaders if i.__name__ == "LookLoader"), None) + if loader is None: + raise RuntimeError("Could not find LookLoader, this is a bug") + + # Reference the look file + with avalon.maya.maintained_selection(): + container_node = api.load(loader, look_representation) + + # Get container members + shader_nodes = cmds.sets(container_node, query=True) + return shader_nodes + + +def get_latest_version(asset_id, subset): + # type: (str, str) -> dict + """Get latest version of subset. + + Args: + asset_id (str): Asset ID + subset (str): Subset name. + + Returns: + Latest version + + Throws: + RuntimeError: When subset or version doesn't exist. + + """ + subset = io.find_one({"name": subset, + "parent": io.ObjectId(asset_id), + "type": "subset"}) + if not subset: + raise RuntimeError("Subset does not exist: %s" % subset) + + version = io.find_one({"type": "version", + "parent": subset["_id"]}, + sort=[("name", -1)]) + if not version: + raise RuntimeError("Version does not exist.") + + return version + + +def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): + # type: (str, str) -> None + """Assign look to vray proxy. + + Args: + vrayproxy (str): Name of vrayproxy to apply look to. + subset (str): Name of look subset. + + Returns: + None + + """ + path = cmds.getAttr(vrayproxy + ".fileName") + + nodes_by_id = get_alembic_ids_cache(path) + if not nodes_by_id: + log.warning("Alembic file has no cbId attributes: %s" % path) + return + + # Group by asset id so we run over the look per asset + node_ids_by_asset_id = defaultdict(set) + for node_id in nodes_by_id: + asset_id = node_id.split(":", 1)[0] + node_ids_by_asset_id[asset_id].add(node_id) + + for asset_id, node_ids in node_ids_by_asset_id.items(): + + # Get latest look version + try: + version = get_latest_version(asset_id, subset=subset) + except RuntimeError as exc: + print(exc) + continue + + relationships = get_look_relationships(version["_id"]) + shadernodes = load_look(version["_id"]) + + # Get only the node ids and paths related to this asset + # And get the shader edits the look supplies + asset_nodes_by_id = { + node_id: nodes_by_id[node_id] for node_id in node_ids + } + edits = list( + lib.iter_shader_edits( + relationships, shadernodes, asset_nodes_by_id)) + + # Create assignments + assignments = {} + for edit in edits: + if edit["action"] == "assign": + nodes = edit["nodes"] + shader = edit["shader"] + if not cmds.ls(shader, type="shadingEngine"): + print("Skipping non-shader: %s" % shader) + continue + + inputs = cmds.listConnections( + shader + ".surfaceShader", source=True) + if not inputs: + print("Shading engine missing material: %s" % shader) + + # Strip off component assignments + for i, node in enumerate(nodes): + if "." in node: + log.warning( + ("Converting face assignment to full object " + "assignment. This conversion can be lossy: " + "{}").format(node)) + nodes[i] = node.split(".")[0] + + material = inputs[0] + assignments[material] = nodes + + assign_vrayproxy_shaders(vrayproxy, assignments) diff --git a/openpype/tools/mayalookassigner/widgets.py b/openpype/tools/mayalookassigner/widgets.py index bfa8492e69..2dab266af9 100644 --- a/openpype/tools/mayalookassigner/widgets.py +++ b/openpype/tools/mayalookassigner/widgets.py @@ -122,7 +122,7 @@ class AssetOutliner(QtWidgets.QWidget): # Collect the asset item entries per asset # and collect the namespaces we'd like to apply - assets = dict() + assets = {} asset_namespaces = defaultdict(set) for item in items: asset_id = str(item["asset"]["_id"]) diff --git a/openpype/tools/settings/__init__.py b/openpype/tools/settings/__init__.py index 3f47d1c2c3..8f60276cc4 100644 --- a/openpype/tools/settings/__init__.py +++ b/openpype/tools/settings/__init__.py @@ -1,5 +1,11 @@ import sys from Qt import QtWidgets, QtGui +from .lib import ( + is_password_required, + BTN_FIXED_SIZE, + CHILD_OFFSET +) +from .widgets import PasswordDialog from .local_settings import LocalSettingsWindow from .settings import ( style, @@ -24,13 +30,18 @@ def main(user_role=None): widget = MainWidget(user_role) widget.show() - widget.reset() sys.exit(app.exec_()) __all__ = ( + "is_password_required", + "BTN_FIXED_SIZE", + "CHILD_OFFSET", + "style", + + "PasswordDialog", "MainWidget", "ProjectListWidget", "LocalSettingsWindow", diff --git a/openpype/tools/settings/lib.py b/openpype/tools/settings/lib.py new file mode 100644 index 0000000000..4b48746a18 --- /dev/null +++ b/openpype/tools/settings/lib.py @@ -0,0 +1,20 @@ +CHILD_OFFSET = 15 +BTN_FIXED_SIZE = 20 + + +def is_password_required(): + from openpype.settings import ( + get_system_settings, + get_local_settings + ) + + system_settings = get_system_settings() + password = system_settings["general"].get("admin_password") + if not password: + return False + + local_settings = get_local_settings() + is_admin = local_settings.get("general", {}).get("is_admin", False) + if is_admin: + return False + return True diff --git a/openpype/tools/settings/local_settings/apps_widget.py b/openpype/tools/settings/local_settings/apps_widget.py index 2e12c010d1..5f4e5dd1c5 100644 --- a/openpype/tools/settings/local_settings/apps_widget.py +++ b/openpype/tools/settings/local_settings/apps_widget.py @@ -4,7 +4,7 @@ from .widgets import ( Separator, ExpandingWidget ) -from .constants import CHILD_OFFSET +from openpype.tools.settings import CHILD_OFFSET class AppVariantWidget(QtWidgets.QWidget): diff --git a/openpype/tools/settings/local_settings/constants.py b/openpype/tools/settings/local_settings/constants.py index 83c45afba8..7a8774467f 100644 --- a/openpype/tools/settings/local_settings/constants.py +++ b/openpype/tools/settings/local_settings/constants.py @@ -14,8 +14,6 @@ LOCAL_APPS_KEY = "applications" # Roots key constant LOCAL_ROOTS_KEY = "roots" -# Child offset in expandable widget -CHILD_OFFSET = 15 __all__ = ( "LABEL_REMOVE_DEFAULT", diff --git a/openpype/tools/settings/local_settings/general_widget.py b/openpype/tools/settings/local_settings/general_widget.py index e820d8ab8b..d01c16ff82 100644 --- a/openpype/tools/settings/local_settings/general_widget.py +++ b/openpype/tools/settings/local_settings/general_widget.py @@ -1,20 +1,86 @@ -from Qt import QtWidgets +import getpass + +from Qt import QtWidgets, QtCore +from openpype.tools.settings import ( + is_password_required, + PasswordDialog +) class LocalGeneralWidgets(QtWidgets.QWidget): def __init__(self, parent): super(LocalGeneralWidgets, self).__init__(parent) + self._loading_local_settings = False + + username_input = QtWidgets.QLineEdit(self) + username_input.setPlaceholderText(getpass.getuser()) + + is_admin_input = QtWidgets.QCheckBox(self) + + layout = QtWidgets.QFormLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + layout.addRow("OpenPype Username", username_input) + layout.addRow("Admin permissions", is_admin_input) + + is_admin_input.stateChanged.connect(self._on_admin_check_change) + + self.username_input = username_input + self.is_admin_input = is_admin_input def update_local_settings(self, value): - return + self._loading_local_settings = True - # RETURNING EARLY TO HIDE WIDGET WITHOUT CONTENT + username = "" + is_admin = False + if value: + username = value.get("username", username) + is_admin = value.get("is_admin", is_admin) + + self.username_input.setText(username) + + if self.is_admin_input.isChecked() != is_admin: + # Use state as `stateChanged` is connected to callbacks + if is_admin: + state = QtCore.Qt.Checked + else: + state = QtCore.Qt.Unchecked + self.is_admin_input.setCheckState(state) + + self._loading_local_settings = False + + def _on_admin_check_change(self): + if self._loading_local_settings: + return + + if not self.is_admin_input.isChecked(): + return + + if not is_password_required(): + return + + dialog = PasswordDialog(self, False) + dialog.setModal(True) + dialog.exec_() + result = dialog.result() + if self.is_admin_input.isChecked() != result: + # Use state as `stateChanged` is connected to callbacks + if result: + state = QtCore.Qt.Checked + else: + state = QtCore.Qt.Unchecked + self.is_admin_input.setCheckState(state) def settings_value(self): # Add changed # If these have changed then output = {} - # TEMPORARILY EMPTY AS THERE IS NOTHING TO PUT HERE + username = self.username_input.text() + if username: + output["username"] = username + is_admin = self.is_admin_input.isChecked() + if is_admin: + output["is_admin"] = is_admin return output diff --git a/openpype/tools/settings/local_settings/widgets.py b/openpype/tools/settings/local_settings/widgets.py index a262188906..b164f1b407 100644 --- a/openpype/tools/settings/local_settings/widgets.py +++ b/openpype/tools/settings/local_settings/widgets.py @@ -1,5 +1,5 @@ from Qt import QtWidgets, QtCore -from openpype.tools.settings.settings.widgets.widgets import ( +from openpype.tools.settings.settings.widgets import ( ExpandingWidget, SpacerWidget ) diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index a12a2289b5..69562d0b1f 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -7,6 +7,7 @@ from openpype.settings.lib import ( get_local_settings, save_local_settings ) +from openpype.tools.settings import CHILD_OFFSET from openpype.api import ( SystemSettings, ProjectSettings @@ -23,7 +24,6 @@ from .apps_widget import LocalApplicationsWidgets from .projects_widget import ProjectSettingsWidget from .constants import ( - CHILD_OFFSET, LOCAL_GENERAL_KEY, LOCAL_PROJECTS_KEY, LOCAL_APPS_KEY @@ -80,7 +80,6 @@ class LocalSettingsWidget(QtWidgets.QWidget): general_widget = LocalGeneralWidgets(general_content) general_layout.addWidget(general_widget) - general_expand_widget.hide() self.main_layout.addWidget(general_expand_widget) @@ -127,9 +126,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.system_settings.reset() self.project_settings.reset() - # self.general_widget.update_local_settings( - # value.get(LOCAL_GENERAL_KEY) - # ) + self.general_widget.update_local_settings( + value.get(LOCAL_GENERAL_KEY) + ) self.app_widget.update_local_settings( value.get(LOCAL_APPS_KEY) ) @@ -139,9 +138,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): def settings_value(self): output = {} - # general_value = self.general_widget.settings_value() - # if general_value: - # output[LOCAL_GENERAL_KEY] = general_value + general_value = self.general_widget.settings_value() + if general_value: + output[LOCAL_GENERAL_KEY] = general_value app_value = self.app_widget.settings_value() if app_value: @@ -157,6 +156,8 @@ class LocalSettingsWindow(QtWidgets.QWidget): def __init__(self, parent=None): super(LocalSettingsWindow, self).__init__(parent) + self._reset_on_show = True + self.resize(1000, 600) self.setWindowTitle("OpenPype Local settings") @@ -194,9 +195,14 @@ class LocalSettingsWindow(QtWidgets.QWidget): self.reset_btn = reset_btn self.save_btn = save_btn - self.reset() + def showEvent(self, event): + super(LocalSettingsWindow, self).showEvent(event) + if self._reset_on_show: + self.reset() def reset(self): + if self._reset_on_show: + self._reset_on_show = False value = get_local_settings() self.settings_widget.update_local_settings(value) diff --git a/openpype/tools/settings/resources/__init__.py b/openpype/tools/settings/resources/__init__.py new file mode 100644 index 0000000000..83ce1a286f --- /dev/null +++ b/openpype/tools/settings/resources/__init__.py @@ -0,0 +1,8 @@ +import os + + +RESOURCES_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def get_resource(*args): + return os.path.normpath(os.path.join(RESOURCES_DIR, *args)) diff --git a/openpype/tools/settings/resources/images/eye.png b/openpype/tools/settings/resources/images/eye.png new file mode 100644 index 0000000000..5a683e2974 Binary files /dev/null and b/openpype/tools/settings/resources/images/eye.png differ diff --git a/openpype/tools/settings/settings/__init__.py b/openpype/tools/settings/settings/__init__.py index 3c12a73639..6b4cf94357 100644 --- a/openpype/tools/settings/settings/__init__.py +++ b/openpype/tools/settings/settings/__init__.py @@ -1,8 +1,6 @@ from . import style -from .widgets import ( - MainWidget, - ProjectListWidget -) +from .window import MainWidget +from .widgets import ProjectListWidget __all__ = ( diff --git a/openpype/tools/settings/settings/widgets/base.py b/openpype/tools/settings/settings/base.py similarity index 96% rename from openpype/tools/settings/settings/widgets/base.py rename to openpype/tools/settings/settings/base.py index 4010b8ab20..03f920b7dc 100644 --- a/openpype/tools/settings/settings/widgets/base.py +++ b/openpype/tools/settings/settings/base.py @@ -1,5 +1,5 @@ from Qt import QtWidgets, QtGui, QtCore -from .lib import CHILD_OFFSET +from openpype.tools.settings import CHILD_OFFSET from .widgets import ExpandingWidget @@ -71,7 +71,7 @@ class BaseWidget(QtWidgets.QWidget): def _discard_changes_action(self, menu, actions_mapping): # TODO use better condition as unsaved changes may be caused due to # changes in schema. - if not self.entity.can_discard_changes: + if not self.entity.can_trigger_discard_changes: return def discard_changes(): @@ -86,7 +86,7 @@ class BaseWidget(QtWidgets.QWidget): def _add_to_studio_default(self, menu, actions_mapping): """Set values as studio overrides.""" # Skip if not in studio overrides - if not self.entity.can_add_to_studio_default: + if not self.entity.can_trigger_add_to_studio_default: return action = QtWidgets.QAction("Add to studio default") @@ -94,7 +94,7 @@ class BaseWidget(QtWidgets.QWidget): menu.addAction(action) def _remove_from_studio_default_action(self, menu, actions_mapping): - if not self.entity.can_remove_from_studio_default: + if not self.entity.can_trigger_remove_from_studio_default: return def remove_from_studio_default(): @@ -106,7 +106,7 @@ class BaseWidget(QtWidgets.QWidget): menu.addAction(action) def _add_to_project_override_action(self, menu, actions_mapping): - if not self.entity.can_add_to_project_override: + if not self.entity.can_trigger_add_to_project_override: return action = QtWidgets.QAction("Add to project project override") @@ -114,7 +114,7 @@ class BaseWidget(QtWidgets.QWidget): menu.addAction(action) def _remove_from_project_override_action(self, menu, actions_mapping): - if not self.entity.can_remove_from_project_override: + if not self.entity.can_trigger_remove_from_project_override: return def remove_from_project_override(): diff --git a/openpype/tools/settings/settings/widgets/categories.py b/openpype/tools/settings/settings/categories.py similarity index 96% rename from openpype/tools/settings/settings/widgets/categories.py rename to openpype/tools/settings/settings/categories.py index 9d286485a3..4762aa4b6b 100644 --- a/openpype/tools/settings/settings/widgets/categories.py +++ b/openpype/tools/settings/settings/categories.py @@ -27,11 +27,9 @@ from openpype.settings.entities import ( SchemaError ) -from openpype.settings.lib import get_system_settings +from openpype.settings import SaveWarningExc from .widgets import ProjectListWidget -from . import lib - from .base import GUIWidget from .list_item_widget import ListWidget from .list_strict_widget import ListStrictWidget @@ -272,6 +270,22 @@ class SettingsCategoryWidget(QtWidgets.QWidget): # not required. self.reset() + except SaveWarningExc as exc: + warnings = [ + "Settings were saved but few issues happened." + ] + for item in exc.warnings: + warnings.append(item.replace("\n", "
")) + + msg = "

".join(warnings) + + dialog = QtWidgets.QMessageBox(self) + dialog.setText(msg) + dialog.setIcon(QtWidgets.QMessageBox.Warning) + dialog.exec_() + + self.reset() + except Exception as exc: formatted_traceback = traceback.format_exception(*sys.exc_info()) dialog = QtWidgets.QMessageBox(self) @@ -302,9 +316,14 @@ class SettingsCategoryWidget(QtWidgets.QWidget): "`create_root_entity` method not implemented" ) + def _on_reset_start(self): + return + def reset(self): self.set_state(CategoryState.Working) + self._on_reset_start() + self.input_fields = [] while self.content_layout.count() != 0: @@ -469,7 +488,6 @@ class ProjectWidget(SettingsCategoryWidget): def ui_tweaks(self): project_list_widget = ProjectListWidget(self) - project_list_widget.refresh() self.main_layout.insertWidget(0, project_list_widget, 0) @@ -485,6 +503,9 @@ class ProjectWidget(SettingsCategoryWidget): if self is saved_tab_widget: return + def _on_reset_start(self): + self.project_list_widget.refresh() + def _on_reset_crash(self): self.project_list_widget.setEnabled(False) super(ProjectWidget, self)._on_reset_crash() diff --git a/openpype/tools/settings/settings/widgets/dict_mutable_widget.py b/openpype/tools/settings/settings/dict_mutable_widget.py similarity index 94% rename from openpype/tools/settings/settings/widgets/dict_mutable_widget.py rename to openpype/tools/settings/settings/dict_mutable_widget.py index 9bea89c0d6..df6525e86a 100644 --- a/openpype/tools/settings/settings/widgets/dict_mutable_widget.py +++ b/openpype/tools/settings/settings/dict_mutable_widget.py @@ -8,7 +8,7 @@ from .widgets import ( IconButton, SpacerWidget ) -from .lib import ( +from openpype.tools.settings import ( BTN_FIXED_SIZE, CHILD_OFFSET ) @@ -32,13 +32,15 @@ def create_remove_btn(parent): class ModifiableDictEmptyItem(QtWidgets.QWidget): - def __init__(self, entity_widget, parent): + def __init__(self, entity_widget, store_as_list, parent): super(ModifiableDictEmptyItem, self).__init__(parent) self.entity_widget = entity_widget self.collapsible_key = entity_widget.entity.collapsible_key + self.ignore_input_changes = entity_widget.ignore_input_changes + self.store_as_list = store_as_list self.is_duplicated = False - self.key_is_valid = False + self.key_is_valid = store_as_list if self.collapsible_key: self.create_collapsible_ui() @@ -100,7 +102,12 @@ class ModifiableDictEmptyItem(QtWidgets.QWidget): def _on_key_change(self): key = self.key_input.text() - self.key_is_valid = KEY_REGEX.match(key) + if not self.store_as_list: + self.key_is_valid = KEY_REGEX.match(key) + + if self.ignore_input_changes: + return + self.is_duplicated = self.entity_widget.is_key_duplicated(key) key_input_state = "" # Collapsible key and empty key are not invalid @@ -156,9 +163,11 @@ class ModifiableDictEmptyItem(QtWidgets.QWidget): class ModifiableDictItem(QtWidgets.QWidget): - def __init__(self, collapsible_key, entity, entity_widget): + def __init__(self, collapsible_key, store_as_list, entity, entity_widget): super(ModifiableDictItem, self).__init__(entity_widget.content_widget) + self.store_as_list = store_as_list + self.collapsible_key = collapsible_key self.entity = entity self.entity_widget = entity_widget @@ -166,7 +175,7 @@ class ModifiableDictItem(QtWidgets.QWidget): self.ignore_input_changes = entity_widget.ignore_input_changes self.is_key_duplicated = False - self.key_is_valid = False + self.key_is_valid = store_as_list self.is_required = False self.origin_key = None @@ -355,6 +364,7 @@ class ModifiableDictItem(QtWidgets.QWidget): def set_label(self, label): if self.key_label_input and label is not None: self.key_label_input.setText(label) + self.update_key_label() def set_as_required(self, key): self.key_input.setText(key) @@ -386,13 +396,21 @@ class ModifiableDictItem(QtWidgets.QWidget): self.set_edit_mode(False) def _on_key_label_change(self): + if self.ignore_input_changes: + return + label = self.key_label_value() self.entity_widget.change_label(label, self) self.update_key_label() def _on_key_change(self): key = self.key_value() - self.key_is_valid = KEY_REGEX.match(key) + if not self.store_as_list: + self.key_is_valid = KEY_REGEX.match(key) + + if self.ignore_input_changes: + return + is_key_duplicated = self.entity_widget.validate_key_duplication( self.temp_key, key, self ) @@ -422,7 +440,7 @@ class ModifiableDictItem(QtWidgets.QWidget): self.wrapper_widget.label_widget.setText(label) def on_add_clicked(self): - widget = self.entity_widget.add_new_key(None, None, self) + widget = self.entity_widget.add_new_key(None, None) widget.key_input.setFocus(True) def on_edit_pressed(self): @@ -594,7 +612,7 @@ class DictMutableKeysWidget(BaseWidget): self.add_required_keys() self.empty_row = ModifiableDictEmptyItem( - self, self.content_widget + self, self.entity.store_as_list, self.content_widget ) self.content_layout.addWidget(self.empty_row) @@ -621,7 +639,7 @@ class DictMutableKeysWidget(BaseWidget): # TODO implement pass - def add_new_key(self, key, label=None, after_widget=None): + def add_new_key(self, key, label=None): uuid_key = None entity_key = key if not key: @@ -641,7 +659,7 @@ class DictMutableKeysWidget(BaseWidget): # Backup solution (for testing) if input_field is None: - input_field = self.add_widget_for_child(child_entity, after_widget) + input_field = self.add_widget_for_child(child_entity) if key: # Happens when created from collapsible key items where key @@ -719,29 +737,17 @@ class DictMutableKeysWidget(BaseWidget): return self.entity.set_child_label(entity, label) - def add_widget_for_child( - self, child_entity, after_widget=None, first=False - ): - if first: - new_widget_index = 0 - else: - new_widget_index = len(self.input_fields) - - if self.input_fields and not first: - if not after_widget: - after_widget = self.input_fields[-1] - - for idx in range(self.content_layout.count()): - item = self.content_layout.itemAt(idx) - if item.widget() is after_widget: - new_widget_index = idx + 1 - break - + def add_widget_for_child(self, child_entity): input_field = ModifiableDictItem( - self.entity.collapsible_key, child_entity, self + self.entity.collapsible_key, self.entity.store_as_list, + child_entity, self ) self.input_fields.append(input_field) + + new_widget_index = self.content_layout.count() - 1 + self.content_layout.insertWidget(new_widget_index, input_field) + return input_field def remove_row(self, widget): @@ -810,21 +816,15 @@ class DictMutableKeysWidget(BaseWidget): for key, child_entity in self.entity.items(): found = False - previous_input = None for input_field in self.input_fields: - if input_field.entity is not child_entity: - previous_input = input_field - else: + if input_field.entity is child_entity: found = True break if not found: changed = True - args = [previous_input] - if previous_input is None: - args.append(True) - _input_field = self.add_widget_for_child(child_entity, *args) + _input_field = self.add_widget_for_child(child_entity) _input_field.origin_key = key _input_field.set_key(key) if self.entity.collapsible_key: @@ -855,9 +855,8 @@ class DictMutableKeysWidget(BaseWidget): if keys_order: last_required = keys_order[-1] for key in self.entity.keys(): - if key in keys_order: - continue - keys_order.append(key) + if key not in keys_order: + keys_order.append(key) for key in keys_order: child_entity = self.entity[key] diff --git a/openpype/tools/settings/settings/widgets/item_widgets.py b/openpype/tools/settings/settings/item_widgets.py similarity index 99% rename from openpype/tools/settings/settings/widgets/item_widgets.py rename to openpype/tools/settings/settings/item_widgets.py index 6045b05227..11ccb60ae4 100644 --- a/openpype/tools/settings/settings/widgets/item_widgets.py +++ b/openpype/tools/settings/settings/item_widgets.py @@ -19,7 +19,7 @@ from .base import ( BaseWidget, InputWidget ) -from .lib import CHILD_OFFSET +from openpype.tools.settings import CHILD_OFFSET class DictImmutableKeysWidget(BaseWidget): @@ -631,7 +631,9 @@ class PathWidget(BaseWidget): class PathInputWidget(InputWidget): def _add_inputs_to_layout(self): self.input_field = QtWidgets.QLineEdit(self.content_widget) - self.input_field.setPlaceholderText("Executable path") + placeholder = self.entity.placeholder_text + if placeholder: + self.input_field.setPlaceholderText(placeholder) self.setFocusProxy(self.input_field) self.content_layout.addWidget(self.input_field) diff --git a/openpype/tools/settings/settings/widgets/list_item_widget.py b/openpype/tools/settings/settings/list_item_widget.py similarity index 99% rename from openpype/tools/settings/settings/widgets/list_item_widget.py rename to openpype/tools/settings/settings/list_item_widget.py index 699669abab..e1990d0bf6 100644 --- a/openpype/tools/settings/settings/widgets/list_item_widget.py +++ b/openpype/tools/settings/settings/list_item_widget.py @@ -2,7 +2,7 @@ from Qt import QtWidgets, QtCore from .base import InputWidget from .widgets import ExpandingWidget -from .lib import ( +from openpype.tools.settings import ( BTN_FIXED_SIZE, CHILD_OFFSET ) diff --git a/openpype/tools/settings/settings/widgets/list_strict_widget.py b/openpype/tools/settings/settings/list_strict_widget.py similarity index 100% rename from openpype/tools/settings/settings/widgets/list_strict_widget.py rename to openpype/tools/settings/settings/list_strict_widget.py diff --git a/openpype/tools/settings/settings/widgets/multiselection_combobox.py b/openpype/tools/settings/settings/multiselection_combobox.py similarity index 100% rename from openpype/tools/settings/settings/widgets/multiselection_combobox.py rename to openpype/tools/settings/settings/multiselection_combobox.py diff --git a/openpype/tools/settings/settings/widgets/tests.py b/openpype/tools/settings/settings/tests.py similarity index 100% rename from openpype/tools/settings/settings/widgets/tests.py rename to openpype/tools/settings/settings/tests.py diff --git a/openpype/tools/settings/settings/widgets/widgets.py b/openpype/tools/settings/settings/widgets.py similarity index 98% rename from openpype/tools/settings/settings/widgets/widgets.py rename to openpype/tools/settings/settings/widgets.py index aa79cc4b62..249b4e305d 100644 --- a/openpype/tools/settings/settings/widgets/widgets.py +++ b/openpype/tools/settings/settings/widgets.py @@ -661,8 +661,14 @@ class ProjectListWidget(QtWidgets.QWidget): self.current_project = None if self.dbcon: - for project_name in self.dbcon.database.collection_names(): - items.append(project_name) + database = self.dbcon.database + for project_name in database.collection_names(): + project_doc = database[project_name].find_one( + {"type": "project"}, + {"name": 1} + ) + if project_doc: + items.append(project_doc["name"]) for item in items: model.appendRow(QtGui.QStandardItem(item)) diff --git a/openpype/tools/settings/settings/widgets/__init__.py b/openpype/tools/settings/settings/widgets/__init__.py deleted file mode 100644 index c9fec16f6e..0000000000 --- a/openpype/tools/settings/settings/widgets/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .window import MainWidget -from .widgets import ProjectListWidget - - -__all__ = [ - "MainWidget", - "ProjectListWidget" -] diff --git a/openpype/tools/settings/settings/widgets/lib.py b/openpype/tools/settings/settings/widgets/lib.py deleted file mode 100644 index aeca943617..0000000000 --- a/openpype/tools/settings/settings/widgets/lib.py +++ /dev/null @@ -1,601 +0,0 @@ -import os -import re -import json -import copy -from openpype.settings.constants import ( - M_OVERRIDEN_KEY, - M_ENVIRONMENT_KEY, - M_DYNAMIC_KEY_LABEL -) -from queue import Queue - - -# Singleton database of available inputs -class TypeToKlass: - types = {} - - -NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})() -METADATA_KEY = type("METADATA_KEY", (), {})() -OVERRIDE_VERSION = 1 -CHILD_OFFSET = 15 -BTN_FIXED_SIZE = 20 - -key_pattern = re.compile(r"(\{.*?[^{0]*\})") - - -def convert_gui_data_with_metadata(data, ignored_keys=None): - if not data or not isinstance(data, dict): - return data - - if ignored_keys is None: - ignored_keys = tuple() - - output = {} - if METADATA_KEY in data: - metadata = data.pop(METADATA_KEY) - for key, value in metadata.items(): - if key in ignored_keys or key == "groups": - continue - - if key == "environments": - output[M_ENVIRONMENT_KEY] = value - elif key == "dynamic_key_label": - output[M_DYNAMIC_KEY_LABEL] = value - else: - raise KeyError("Unknown metadata key \"{}\"".format(key)) - - for key, value in data.items(): - output[key] = convert_gui_data_with_metadata(value, ignored_keys) - return output - - -def convert_data_to_gui_data(data, first=True): - if not data or not isinstance(data, dict): - return data - - output = {} - if M_ENVIRONMENT_KEY in data: - data.pop(M_ENVIRONMENT_KEY) - - if M_DYNAMIC_KEY_LABEL in data: - if METADATA_KEY not in data: - data[METADATA_KEY] = {} - data[METADATA_KEY]["dynamic_key_label"] = data.pop(M_DYNAMIC_KEY_LABEL) - - for key, value in data.items(): - output[key] = convert_data_to_gui_data(value, False) - - return output - - -def convert_gui_data_to_overrides(data, first=True): - if not data or not isinstance(data, dict): - return data - - output = {} - if first: - output["__override_version__"] = OVERRIDE_VERSION - data = convert_gui_data_with_metadata(data) - - if METADATA_KEY in data: - metadata = data.pop(METADATA_KEY) - for key, value in metadata.items(): - if key == "groups": - output[M_OVERRIDEN_KEY] = value - else: - raise KeyError("Unknown metadata key \"{}\"".format(key)) - - for key, value in data.items(): - output[key] = convert_gui_data_to_overrides(value, False) - return output - - -def convert_overrides_to_gui_data(data, first=True): - if not data or not isinstance(data, dict): - return data - - if first: - data = convert_data_to_gui_data(data) - - output = {} - if M_OVERRIDEN_KEY in data: - groups = data.pop(M_OVERRIDEN_KEY) - if METADATA_KEY not in output: - output[METADATA_KEY] = {} - output[METADATA_KEY]["groups"] = groups - - for key, value in data.items(): - output[key] = convert_overrides_to_gui_data(value, False) - - return output - - -def _fill_schema_template_data( - template, template_data, required_keys=None, missing_keys=None -): - first = False - if required_keys is None: - first = True - required_keys = set() - missing_keys = set() - - _template = [] - default_values = {} - for item in template: - if isinstance(item, dict) and "__default_values__" in item: - default_values = item["__default_values__"] - else: - _template.append(item) - template = _template - - for key, value in default_values.items(): - if key not in template_data: - template_data[key] = value - - if not template: - output = template - - elif isinstance(template, list): - output = [] - for item in template: - output.append(_fill_schema_template_data( - item, template_data, required_keys, missing_keys - )) - - elif isinstance(template, dict): - output = {} - for key, value in template.items(): - output[key] = _fill_schema_template_data( - value, template_data, required_keys, missing_keys - ) - - elif isinstance(template, str): - # TODO find much better way how to handle filling template data - for replacement_string in key_pattern.findall(template): - key = str(replacement_string[1:-1]) - required_keys.add(key) - if key not in template_data: - missing_keys.add(key) - continue - - value = template_data[key] - if replacement_string == template: - # Replace the value with value from templates data - # - with this is possible to set value with different type - template = value - else: - # Only replace the key in string - template = template.replace(replacement_string, value) - output = template - - else: - output = template - - if first and missing_keys: - raise SchemaTemplateMissingKeys(missing_keys, required_keys) - - return output - - -def _fill_schema_template(child_data, schema_collection, schema_templates): - template_name = child_data["name"] - template = schema_templates.get(template_name) - if template is None: - if template_name in schema_collection: - raise KeyError(( - "Schema \"{}\" is used as `schema_template`" - ).format(template_name)) - raise KeyError("Schema template \"{}\" was not found".format( - template_name - )) - - # Default value must be dictionary (NOT list) - # - empty list would not add any item if `template_data` are not filled - template_data = child_data.get("template_data") or {} - if isinstance(template_data, dict): - template_data = [template_data] - - output = [] - for single_template_data in template_data: - try: - filled_child = _fill_schema_template_data( - template, single_template_data - ) - - except SchemaTemplateMissingKeys as exc: - raise SchemaTemplateMissingKeys( - exc.missing_keys, exc.required_keys, template_name - ) - - for item in filled_child: - filled_item = _fill_inner_schemas( - item, schema_collection, schema_templates - ) - if filled_item["type"] == "schema_template": - output.extend(_fill_schema_template( - filled_item, schema_collection, schema_templates - )) - else: - output.append(filled_item) - return output - - -def _fill_inner_schemas(schema_data, schema_collection, schema_templates): - if schema_data["type"] == "schema": - raise ValueError("First item in schema data can't be schema.") - - children_key = "children" - object_type_key = "object_type" - for item_key in (children_key, object_type_key): - children = schema_data.get(item_key) - if not children: - continue - - if object_type_key == item_key: - if not isinstance(children, dict): - continue - children = [children] - - new_children = [] - for child in children: - child_type = child["type"] - if child_type == "schema": - schema_name = child["name"] - if schema_name not in schema_collection: - if schema_name in schema_templates: - raise KeyError(( - "Schema template \"{}\" is used as `schema`" - ).format(schema_name)) - raise KeyError( - "Schema \"{}\" was not found".format(schema_name) - ) - - filled_child = _fill_inner_schemas( - schema_collection[schema_name], - schema_collection, - schema_templates - ) - - elif child_type == "schema_template": - for filled_child in _fill_schema_template( - child, schema_collection, schema_templates - ): - new_children.append(filled_child) - continue - - else: - filled_child = _fill_inner_schemas( - child, schema_collection, schema_templates - ) - - new_children.append(filled_child) - - if item_key == object_type_key: - if len(new_children) != 1: - raise KeyError(( - "Failed to fill object type with type: {} | name {}" - ).format( - child_type, str(child.get("name")) - )) - new_children = new_children[0] - - schema_data[item_key] = new_children - return schema_data - - -class SchemaTemplateMissingKeys(Exception): - def __init__(self, missing_keys, required_keys, template_name=None): - self.missing_keys = missing_keys - self.required_keys = required_keys - if template_name: - msg = f"Schema template \"{template_name}\" require more keys.\n" - else: - msg = "" - msg += "Required keys: {}\nMissing keys: {}".format( - self.join_keys(required_keys), - self.join_keys(missing_keys) - ) - super(SchemaTemplateMissingKeys, self).__init__(msg) - - def join_keys(self, keys): - return ", ".join([ - f"\"{key}\"" for key in keys - ]) - - -class SchemaMissingFileInfo(Exception): - def __init__(self, invalid): - full_path_keys = [] - for item in invalid: - full_path_keys.append("\"{}\"".format("/".join(item))) - - msg = ( - "Schema has missing definition of output file (\"is_file\" key)" - " for keys. [{}]" - ).format(", ".join(full_path_keys)) - super(SchemaMissingFileInfo, self).__init__(msg) - - -class SchemeGroupHierarchyBug(Exception): - def __init__(self, invalid): - full_path_keys = [] - for item in invalid: - full_path_keys.append("\"{}\"".format("/".join(item))) - - msg = ( - "Items with attribute \"is_group\" can't have another item with" - " \"is_group\" attribute as child. Error happened for keys: [{}]" - ).format(", ".join(full_path_keys)) - super(SchemeGroupHierarchyBug, self).__init__(msg) - - -class SchemaDuplicatedKeys(Exception): - def __init__(self, invalid): - items = [] - for key_path, keys in invalid.items(): - joined_keys = ", ".join([ - "\"{}\"".format(key) for key in keys - ]) - items.append("\"{}\" ({})".format(key_path, joined_keys)) - - msg = ( - "Schema items contain duplicated keys in one hierarchy level. {}" - ).format(" || ".join(items)) - super(SchemaDuplicatedKeys, self).__init__(msg) - - -class SchemaDuplicatedEnvGroupKeys(Exception): - def __init__(self, invalid): - items = [] - for key_path, keys in invalid.items(): - joined_keys = ", ".join([ - "\"{}\"".format(key) for key in keys - ]) - items.append("\"{}\" ({})".format(key_path, joined_keys)) - - msg = ( - "Schema items contain duplicated environment group keys. {}" - ).format(" || ".join(items)) - super(SchemaDuplicatedEnvGroupKeys, self).__init__(msg) - - -def file_keys_from_schema(schema_data): - output = [] - item_type = schema_data["type"] - klass = TypeToKlass.types[item_type] - if not klass.is_input_type: - return output - - keys = [] - key = schema_data.get("key") - if key: - keys.append(key) - - for child in schema_data["children"]: - if child.get("is_file"): - _keys = copy.deepcopy(keys) - _keys.append(child["key"]) - output.append(_keys) - continue - - for result in file_keys_from_schema(child): - _keys = copy.deepcopy(keys) - _keys.extend(result) - output.append(_keys) - return output - - -def validate_all_has_ending_file(schema_data, is_top=True): - item_type = schema_data["type"] - klass = TypeToKlass.types[item_type] - if not klass.is_input_type: - return None - - if schema_data.get("is_file"): - return None - - children = schema_data.get("children") - if not children: - return [[schema_data["key"]]] - - invalid = [] - keyless = "key" not in schema_data - for child in children: - result = validate_all_has_ending_file(child, False) - if result is None: - continue - - if keyless: - invalid.extend(result) - else: - for item in result: - new_invalid = [schema_data["key"]] - new_invalid.extend(item) - invalid.append(new_invalid) - - if not invalid: - return None - - if not is_top: - return invalid - - raise SchemaMissingFileInfo(invalid) - - -def validate_is_group_is_unique_in_hierarchy( - schema_data, any_parent_is_group=False, keys=None -): - is_top = keys is None - if keys is None: - keys = [] - - keyless = "key" not in schema_data - - if not keyless: - keys.append(schema_data["key"]) - - invalid = [] - is_group = schema_data.get("is_group") - if is_group and any_parent_is_group: - invalid.append(copy.deepcopy(keys)) - - if is_group: - any_parent_is_group = is_group - - children = schema_data.get("children") - if not children: - return invalid - - for child in children: - result = validate_is_group_is_unique_in_hierarchy( - child, any_parent_is_group, copy.deepcopy(keys) - ) - if not result: - continue - - invalid.extend(result) - - if invalid and is_group and keys not in invalid: - invalid.append(copy.deepcopy(keys)) - - if not is_top: - return invalid - - if invalid: - raise SchemeGroupHierarchyBug(invalid) - - -def validate_keys_are_unique(schema_data, keys=None): - children = schema_data.get("children") - if not children: - return - - is_top = keys is None - if keys is None: - keys = [schema_data["key"]] - else: - keys.append(schema_data["key"]) - - child_queue = Queue() - for child in children: - child_queue.put(child) - - child_inputs = [] - while not child_queue.empty(): - child = child_queue.get() - if "key" not in child: - _children = child.get("children") or [] - for _child in _children: - child_queue.put(_child) - else: - child_inputs.append(child) - - duplicated_keys = set() - child_keys = set() - for child in child_inputs: - key = child["key"] - if key in child_keys: - duplicated_keys.add(key) - else: - child_keys.add(key) - - invalid = {} - if duplicated_keys: - joined_keys = "/".join(keys) - invalid[joined_keys] = duplicated_keys - - for child in child_inputs: - result = validate_keys_are_unique(child, copy.deepcopy(keys)) - if result: - invalid.update(result) - - if not is_top: - return invalid - - if invalid: - raise SchemaDuplicatedKeys(invalid) - - -def validate_environment_groups_uniquenes( - schema_data, env_groups=None, keys=None -): - is_first = False - if env_groups is None: - is_first = True - env_groups = {} - keys = [] - - my_keys = copy.deepcopy(keys) - key = schema_data.get("key") - if key: - my_keys.append(key) - - env_group_key = schema_data.get("env_group_key") - if env_group_key: - if env_group_key not in env_groups: - env_groups[env_group_key] = [] - env_groups[env_group_key].append("/".join(my_keys)) - - children = schema_data.get("children") - if not children: - return - - for child in children: - validate_environment_groups_uniquenes( - child, env_groups, copy.deepcopy(my_keys) - ) - - if is_first: - invalid = {} - for env_group_key, key_paths in env_groups.items(): - if len(key_paths) > 1: - invalid[env_group_key] = key_paths - - if invalid: - raise SchemaDuplicatedEnvGroupKeys(invalid) - - -def validate_schema(schema_data): - validate_all_has_ending_file(schema_data) - validate_is_group_is_unique_in_hierarchy(schema_data) - validate_keys_are_unique(schema_data) - validate_environment_groups_uniquenes(schema_data) - - -def gui_schema(subfolder, main_schema_name): - subfolder, main_schema_name - dirpath = os.path.join( - os.path.dirname(os.path.dirname(__file__)), - "gui_schemas", - subfolder - ) - - loaded_schemas = {} - loaded_schema_templates = {} - for root, _, filenames in os.walk(dirpath): - for filename in filenames: - basename, ext = os.path.splitext(filename) - if ext != ".json": - continue - - filepath = os.path.join(root, filename) - with open(filepath, "r") as json_stream: - try: - schema_data = json.load(json_stream) - except Exception as exc: - raise Exception(( - f"Unable to parse JSON file {filepath}\n{exc}" - )) from exc - if isinstance(schema_data, list): - loaded_schema_templates[basename] = schema_data - else: - loaded_schemas[basename] = schema_data - - main_schema = _fill_inner_schemas( - loaded_schemas[main_schema_name], - loaded_schemas, - loaded_schema_templates - ) - validate_schema(main_schema) - return main_schema diff --git a/openpype/tools/settings/settings/widgets/window.py b/openpype/tools/settings/settings/window.py similarity index 60% rename from openpype/tools/settings/settings/widgets/window.py rename to openpype/tools/settings/settings/window.py index 96275facff..9b368588c3 100644 --- a/openpype/tools/settings/settings/widgets/window.py +++ b/openpype/tools/settings/settings/window.py @@ -1,11 +1,16 @@ -from Qt import QtWidgets, QtGui +from Qt import QtWidgets, QtGui, QtCore from .categories import ( CategoryState, SystemWidget, ProjectWidget ) from .widgets import ShadowWidget -from .. import style +from . import style + +from openpype.tools.settings import ( + is_password_required, + PasswordDialog +) class MainWidget(QtWidgets.QWidget): @@ -14,6 +19,12 @@ class MainWidget(QtWidgets.QWidget): def __init__(self, user_role, parent=None): super(MainWidget, self).__init__(parent) + + self._user_passed = False + self._reset_on_show = True + + self._password_dialog = None + self.setObjectName("MainWidget") self.setWindowTitle("OpenPype Settings") @@ -44,6 +55,7 @@ class MainWidget(QtWidgets.QWidget): self.setLayout(layout) self._shadow_widget = ShadowWidget("Working...", self) + self._shadow_widget.setVisible(False) for tab_widget in tab_widgets: tab_widget.saved.connect(self._on_tab_save) @@ -75,6 +87,48 @@ class MainWidget(QtWidgets.QWidget): if app: app.processEvents() + def showEvent(self, event): + super(MainWidget, self).showEvent(event) + if self._reset_on_show: + self.reset() + + def _show_password_dialog(self): + if self._password_dialog: + self._password_dialog.open() + + def _on_password_dialog_close(self, password_passed): + # Store result for future settings reset + self._user_passed = password_passed + # Remove reference to password dialog + self._password_dialog = None + if password_passed: + self.reset() + else: + self.close() + def reset(self): + if self._password_dialog: + return + + if not self._user_passed: + self._user_passed = not is_password_required() + + self._on_state_change() + + if not self._user_passed: + # Avoid doubled dialog + dialog = PasswordDialog(self) + dialog.setModal(True) + dialog.finished.connect(self._on_password_dialog_close) + + self._password_dialog = dialog + + QtCore.QTimer.singleShot(100, self._show_password_dialog) + + return + + if self._reset_on_show: + self._reset_on_show = False + for tab_widget in self.tab_widgets: tab_widget.reset() diff --git a/openpype/tools/settings/settings/widgets/wrapper_widgets.py b/openpype/tools/settings/settings/wrapper_widgets.py similarity index 98% rename from openpype/tools/settings/settings/widgets/wrapper_widgets.py rename to openpype/tools/settings/settings/wrapper_widgets.py index 9d5fdeb213..915a2cf875 100644 --- a/openpype/tools/settings/settings/widgets/wrapper_widgets.py +++ b/openpype/tools/settings/settings/wrapper_widgets.py @@ -5,7 +5,7 @@ from .widgets import ( ExpandingWidget, GridLabelWidget ) -from .lib import CHILD_OFFSET +from openpype.tools.settings import CHILD_OFFSET class WrapperWidget(QtWidgets.QWidget): diff --git a/openpype/tools/settings/widgets.py b/openpype/tools/settings/widgets.py new file mode 100644 index 0000000000..e2662f350f --- /dev/null +++ b/openpype/tools/settings/widgets.py @@ -0,0 +1,164 @@ +from Qt import QtWidgets, QtCore, QtGui + +from .resources import get_resource + +from openpype.api import get_system_settings +from openpype.settings.lib import ( + get_local_settings, + save_local_settings +) + + +class PressHoverButton(QtWidgets.QPushButton): + _mouse_pressed = False + _mouse_hovered = False + change_state = QtCore.Signal(bool) + + def mousePressEvent(self, event): + self._mouse_pressed = True + self._mouse_hovered = True + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mousePressEvent(event) + + def mouseReleaseEvent(self, event): + self._mouse_pressed = False + self._mouse_hovered = False + self.change_state.emit(self._mouse_hovered) + super(PressHoverButton, self).mouseReleaseEvent(event) + + def mouseMoveEvent(self, event): + mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos()) + under_mouse = self.rect().contains(mouse_pos) + if under_mouse != self._mouse_hovered: + self._mouse_hovered = under_mouse + self.change_state.emit(self._mouse_hovered) + + super(PressHoverButton, self).mouseMoveEvent(event) + + +class PasswordDialog(QtWidgets.QDialog): + """Stupidly simple dialog to compare password from general settings.""" + finished = QtCore.Signal(bool) + + def __init__(self, parent=None, allow_remember=True): + super(PasswordDialog, self).__init__(parent) + + self.setWindowTitle("Settings Password") + self.resize(300, 120) + + system_settings = get_system_settings() + + self._expected_result = ( + system_settings["general"].get("admin_password") + ) + self._final_result = None + self._allow_remember = allow_remember + + # Password input + password_widget = QtWidgets.QWidget(self) + + password_label = QtWidgets.QLabel("Password:", password_widget) + + password_input = QtWidgets.QLineEdit(password_widget) + password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + show_password_icon_path = get_resource("images", "eye.png") + show_password_icon = QtGui.QIcon(show_password_icon_path) + show_password_btn = PressHoverButton(password_widget) + show_password_btn.setIcon(show_password_icon) + show_password_btn.setStyleSheet(( + "border: none;padding:0.1em;" + )) + show_password_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + + password_layout = QtWidgets.QHBoxLayout(password_widget) + password_layout.setContentsMargins(0, 0, 0, 0) + password_layout.addWidget(password_label) + password_layout.addWidget(password_input) + password_layout.addWidget(show_password_btn) + + message_label = QtWidgets.QLabel("", self) + + # Buttons + buttons_widget = QtWidgets.QWidget(self) + + remember_checkbox = QtWidgets.QCheckBox("Remember", buttons_widget) + remember_checkbox.setVisible(allow_remember) + remember_checkbox.setStyleSheet(( + "spacing: 0.5em;" + )) + + ok_btn = QtWidgets.QPushButton("Ok", buttons_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", buttons_widget) + + buttons_layout = QtWidgets.QHBoxLayout(buttons_widget) + buttons_layout.setContentsMargins(0, 0, 0, 0) + buttons_layout.addWidget(remember_checkbox) + buttons_layout.addStretch(1) + buttons_layout.addWidget(ok_btn) + buttons_layout.addWidget(cancel_btn) + + # Main layout + layout = QtWidgets.QVBoxLayout(self) + layout.addSpacing(10) + layout.addWidget(password_widget, 0) + layout.addWidget(message_label, 0) + layout.addStretch(1) + layout.addWidget(buttons_widget, 0) + + ok_btn.clicked.connect(self._on_ok_click) + cancel_btn.clicked.connect(self._on_cancel_click) + show_password_btn.change_state.connect(self._on_show_password) + + self.password_input = password_input + self.remember_checkbox = remember_checkbox + self.message_label = message_label + + def remember_password(self): + if not self._allow_remember: + return False + return self.remember_checkbox.isChecked() + + def result(self): + if self._final_result is None: + return False + return self._final_result == self._expected_result + + def keyPressEvent(self, event): + if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter): + self._on_ok_click() + return event.accept() + super(PasswordDialog, self).keyPressEvent(event) + + def closeEvent(self, event): + super(PasswordDialog, self).closeEvent(event) + self.finished.emit(self.result()) + + def _on_ok_click(self): + input_value = self.password_input.text() + if input_value != self._expected_result: + self.message_label.setText("Invalid password. Try it again...") + self.password_input.setFocus() + return + + if self.remember_password(): + local_settings = get_local_settings() + if "general" not in local_settings: + local_settings["general"] = {} + + local_settings["general"]["is_admin"] = True + + save_local_settings(local_settings) + + self._final_result = input_value + self.close() + + def _on_show_password(self, show_password): + if show_password: + echo_mode = QtWidgets.QLineEdit.Normal + else: + echo_mode = QtWidgets.QLineEdit.Password + self.password_input.setEchoMode(echo_mode) + + def _on_cancel_click(self): + self.close() diff --git a/openpype/tools/standalonepublish/widgets/widget_family.py b/openpype/tools/standalonepublish/widgets/widget_family.py index 50335e3109..86663c8ee0 100644 --- a/openpype/tools/standalonepublish/widgets/widget_family.py +++ b/openpype/tools/standalonepublish/widgets/widget_family.py @@ -255,9 +255,9 @@ class FamilyWidget(QtWidgets.QWidget): defaults = list(plugin.defaults) # Replace - compare_regex = re.compile( - subset_name.replace(user_input_text, "(.+)") - ) + compare_regex = re.compile(re.sub( + user_input_text, "(.+)", subset_name, flags=re.IGNORECASE + )) subset_hints = set() if user_input_text: for _name in existing_subset_names: diff --git a/openpype/version.py b/openpype/version.py index dedf799055..27186ad2bb 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.0.0-beta2" +__version__ = "3.0.0-rc4" diff --git a/poetry.lock b/poetry.lock index 41a1f636ec..09e2d133e8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -80,11 +80,11 @@ python-dateutil = ">=2.7.0" [[package]] name = "astroid" -version = "2.5.3" +version = "2.5.6" description = "An abstract syntax tree for Python with inference support." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = "~=3.6" [package.dependencies] lazy-object-proxy = ">=1.4.0" @@ -109,21 +109,21 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "attrs" -version = "20.3.0" +version = "21.2.0" description = "Classes Without Boilerplate" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "furo", "sphinx", "pre-commit"] -docs = ["furo", "sphinx", "zope.interface"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"] +docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins"] [[package]] name = "autopep8" -version = "1.5.6" +version = "1.5.7" description = "A tool that automatically formats Python code to conform to the PEP 8 style guide" category = "dev" optional = false @@ -135,7 +135,7 @@ toml = "*" [[package]] name = "babel" -version = "2.9.0" +version = "2.9.1" description = "Internationalization utilities" category = "dev" optional = false @@ -159,7 +159,7 @@ wcwidth = ">=0.1.4" [[package]] name = "cachetools" -version = "4.2.1" +version = "4.2.2" description = "Extensible memoizing collections and decorators" category = "main" optional = false @@ -335,7 +335,7 @@ python-versions = "*" [[package]] name = "flake8" -version = "3.9.1" +version = "3.9.2" description = "the modular source code checker: pep8 pyflakes and co" category = "dev" optional = false @@ -413,7 +413,7 @@ uritemplate = ">=3.0.0,<4dev" [[package]] name = "google-auth" -version = "1.29.0" +version = "1.30.0" description = "Google Authentication Library" category = "main" optional = false @@ -486,7 +486,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.0.0" +version = "4.0.1" description = "Read metadata from Python packages" category = "main" optional = false @@ -736,7 +736,7 @@ python-versions = "*" [[package]] name = "protobuf" -version = "3.15.8" +version = "3.17.0" description = "Protocol Buffers" category = "main" optional = false @@ -826,7 +826,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "pygments" -version = "2.8.1" +version = "2.9.0" description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false @@ -834,25 +834,22 @@ python-versions = ">=3.5" [[package]] name = "pylint" -version = "2.7.4" +version = "2.8.2" description = "python code static checker" category = "dev" optional = false python-versions = "~=3.6" [package.dependencies] -astroid = ">=2.5.2,<2.7" +astroid = ">=2.5.6,<2.7" colorama = {version = "*", markers = "sys_platform == \"win32\""} isort = ">=4.2.5,<6" mccabe = ">=0.6,<0.7" toml = ">=0.7.1" -[package.extras] -docs = ["sphinx (==3.5.1)", "python-docs-theme (==2020.12)"] - [[package]] name = "pymongo" -version = "3.11.3" +version = "3.11.4" description = "Python driver for MongoDB " category = "main" optional = false @@ -884,7 +881,7 @@ six = "*" [[package]] name = "pyobjc-core" -version = "7.1" +version = "7.2" description = "Python<->ObjC Interoperability Module" category = "main" optional = false @@ -892,26 +889,26 @@ python-versions = ">=3.6" [[package]] name = "pyobjc-framework-cocoa" -version = "7.1" +version = "7.2" description = "Wrappers for the Cocoa frameworks on macOS" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -pyobjc-core = ">=7.1" +pyobjc-core = ">=7.2" [[package]] name = "pyobjc-framework-quartz" -version = "7.1" +version = "7.2" description = "Wrappers for the Quartz frameworks on macOS" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -pyobjc-core = ">=7.1" -pyobjc-framework-Cocoa = ">=7.1" +pyobjc-core = ">=7.2" +pyobjc-framework-Cocoa = ">=7.2" [[package]] name = "pyparsing" @@ -943,7 +940,7 @@ python-versions = "*" [[package]] name = "pyqt5-sip" -version = "12.8.1" +version = "12.9.0" description = "The sip module support for PyQt5" category = "main" optional = false @@ -959,7 +956,7 @@ python-versions = ">=3.5" [[package]] name = "pytest" -version = "6.2.3" +version = "6.2.4" description = "pytest: simple powerful testing with Python" category = "dev" optional = false @@ -1124,9 +1121,17 @@ python-versions = ">=3.6" cryptography = ">=2.0" jeepney = ">=0.6" +[[package]] +name = "semver" +version = "2.13.0" +description = "Python helper for Semantic Versioning (http://semver.org/)" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + [[package]] name = "six" -version = "1.15.0" +version = "1.16.0" description = "Python 2 and 3 compatibility utilities" category = "main" optional = false @@ -1150,19 +1155,20 @@ python-versions = "*" [[package]] name = "sphinx" -version = "3.5.4" +version = "4.0.1" description = "Python documentation generator" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=1.3" colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.12,<0.17" +docutils = ">=0.14,<0.18" imagesize = "*" -Jinja2 = ">=2.3" +Jinja2 = ">=2.3,<3.0" +MarkupSafe = "<2.0" packaging = "*" Pygments = ">=2.0" requests = ">=2.5.0" @@ -1318,7 +1324,7 @@ python-versions = "*" [[package]] name = "typing-extensions" -version = "3.7.4.3" +version = "3.10.0.0" description = "Backported and Experimental Type Hints for Python 3.5+" category = "main" optional = false @@ -1355,7 +1361,7 @@ python-versions = "*" [[package]] name = "websocket-client" -version = "0.58.0" +version = "0.59.0" description = "WebSocket client for Python with low level API options" category = "main" optional = false @@ -1417,7 +1423,7 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pyt [metadata] lock-version = "1.1" python-versions = "3.7.*" -content-hash = "80fde42aade7fc90bb68d85f0d9b3feb27fc3744d72eb5af6a11b6c9d9836aca" +content-hash = "9e067714903bf7e438bc11556b58b6b96be6b079e9a245690c84de8493fa516e" [metadata.files] acre = [] @@ -1481,8 +1487,8 @@ arrow = [ {file = "arrow-0.17.0.tar.gz", hash = "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4"}, ] astroid = [ - {file = "astroid-2.5.3-py3-none-any.whl", hash = "sha256:bea3f32799fbb8581f58431c12591bc20ce11cbc90ad82e2ea5717d94f2080d5"}, - {file = "astroid-2.5.3.tar.gz", hash = "sha256:ad63b8552c70939568966811a088ef0bc880f99a24a00834abd0e3681b514f91"}, + {file = "astroid-2.5.6-py3-none-any.whl", hash = "sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e"}, + {file = "astroid-2.5.6.tar.gz", hash = "sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975"}, ] async-timeout = [ {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"}, @@ -1493,24 +1499,24 @@ atomicwrites = [ {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, ] attrs = [ - {file = "attrs-20.3.0-py2.py3-none-any.whl", hash = "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6"}, - {file = "attrs-20.3.0.tar.gz", hash = "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"}, + {file = "attrs-21.2.0-py2.py3-none-any.whl", hash = "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1"}, + {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"}, ] autopep8 = [ - {file = "autopep8-1.5.6-py2.py3-none-any.whl", hash = "sha256:f01b06a6808bc31698db907761e5890eb2295e287af53f6693b39ce55454034a"}, - {file = "autopep8-1.5.6.tar.gz", hash = "sha256:5454e6e9a3d02aae38f866eec0d9a7de4ab9f93c10a273fb0340f3d6d09f7514"}, + {file = "autopep8-1.5.7-py2.py3-none-any.whl", hash = "sha256:aa213493c30dcdac99537249ee65b24af0b2c29f2e83cd8b3f68760441ed0db9"}, + {file = "autopep8-1.5.7.tar.gz", hash = "sha256:276ced7e9e3cb22e5d7c14748384a5cf5d9002257c0ed50c0e075b68011bb6d0"}, ] babel = [ - {file = "Babel-2.9.0-py2.py3-none-any.whl", hash = "sha256:9d35c22fcc79893c3ecc85ac4a56cde1ecf3f19c540bba0922308a6c06ca6fa5"}, - {file = "Babel-2.9.0.tar.gz", hash = "sha256:da031ab54472314f210b0adcff1588ee5d1d1d0ba4dbd07b94dba82bde791e05"}, + {file = "Babel-2.9.1-py2.py3-none-any.whl", hash = "sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9"}, + {file = "Babel-2.9.1.tar.gz", hash = "sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"}, ] blessed = [ {file = "blessed-1.18.0-py2.py3-none-any.whl", hash = "sha256:5b5e2f0563d5a668c282f3f5946f7b1abb70c85829461900e607e74d7725106e"}, {file = "blessed-1.18.0.tar.gz", hash = "sha256:1312879f971330a1b7f2c6341f2ae7e2cbac244bfc9d0ecfbbecd4b0293bc755"}, ] cachetools = [ - {file = "cachetools-4.2.1-py3-none-any.whl", hash = "sha256:1d9d5f567be80f7c07d765e21b814326d78c61eb0c3a637dffc0e5d1796cb2e2"}, - {file = "cachetools-4.2.1.tar.gz", hash = "sha256:f469e29e7aa4cff64d8de4aad95ce76de8ea1125a16c68e0d93f65c3c3dc92e9"}, + {file = "cachetools-4.2.2-py3-none-any.whl", hash = "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001"}, + {file = "cachetools-4.2.2.tar.gz", hash = "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff"}, ] certifi = [ {file = "certifi-2020.12.5-py2.py3-none-any.whl", hash = "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830"}, @@ -1689,8 +1695,8 @@ evdev = [ {file = "evdev-1.4.0.tar.gz", hash = "sha256:8782740eb1a86b187334c07feb5127d3faa0b236e113206dfe3ae8f77fb1aaf1"}, ] flake8 = [ - {file = "flake8-3.9.1-py2.py3-none-any.whl", hash = "sha256:3b9f848952dddccf635be78098ca75010f073bfe14d2c6bda867154bea728d2a"}, - {file = "flake8-3.9.1.tar.gz", hash = "sha256:1aa8990be1e689d96c745c5682b687ea49f2e05a443aff1f8251092b0014e378"}, + {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, + {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, ] ftrack-python-api = [ {file = "ftrack-python-api-2.0.0.tar.gz", hash = "sha256:dd6f02c31daf5a10078196dc9eac4671e4297c762fbbf4df98de668ac12281d9"}, @@ -1708,8 +1714,8 @@ google-api-python-client = [ {file = "google_api_python_client-1.12.8-py2.py3-none-any.whl", hash = "sha256:3c4c4ca46b5c21196bec7ee93453443e477d82cbfa79234d1ce0645f81170eaf"}, ] google-auth = [ - {file = "google-auth-1.29.0.tar.gz", hash = "sha256:010f011c4e27d3d5eb01106fba6aac39d164842dfcd8709955c4638f5b11ccf8"}, - {file = "google_auth-1.29.0-py2.py3-none-any.whl", hash = "sha256:f30a672a64d91cc2e3137765d088c5deec26416246f7a9e956eaf69a8d7ed49c"}, + {file = "google-auth-1.30.0.tar.gz", hash = "sha256:9ad25fba07f46a628ad4d0ca09f38dcb262830df2ac95b217f9b0129c9e42206"}, + {file = "google_auth-1.30.0-py2.py3-none-any.whl", hash = "sha256:588bdb03a41ecb4978472b847881e5518b5d9ec6153d3d679aa127a55e13b39f"}, ] google-auth-httplib2 = [ {file = "google-auth-httplib2-0.1.0.tar.gz", hash = "sha256:a07c39fd632becacd3f07718dfd6021bf396978f03ad3ce4321d060015cc30ac"}, @@ -1732,8 +1738,8 @@ imagesize = [ {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.0.0-py3-none-any.whl", hash = "sha256:19192b88d959336bfa6bdaaaef99aeafec179eca19c47c804e555703ee5f07ef"}, - {file = "importlib_metadata-4.0.0.tar.gz", hash = "sha256:2e881981c9748d7282b374b68e759c87745c25427b67ecf0cc67fb6637a1bff9"}, + {file = "importlib_metadata-4.0.1-py3-none-any.whl", hash = "sha256:d7eb1dea6d6a6086f8be21784cc9e3bcfa55872b52309bc5fad53a8ea444465d"}, + {file = "importlib_metadata-4.0.1.tar.gz", hash = "sha256:8c501196e49fb9df5df43833bdb1e4328f64847763ec8a50703148b73784d581"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, @@ -1948,26 +1954,29 @@ prefixed = [ {file = "prefixed-0.3.2.tar.gz", hash = "sha256:ca48277ba5fa8346dd4b760847da930c7b84416387c39e93affef086add2c029"}, ] protobuf = [ - {file = "protobuf-3.15.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fad4f971ec38d8df7f4b632c819bf9bbf4f57cfd7312cf526c69ce17ef32436a"}, - {file = "protobuf-3.15.8-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f17b352d7ce33c81773cf81d536ca70849de6f73c96413f17309f4b43ae7040b"}, - {file = "protobuf-3.15.8-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:4a054b0b5900b7ea7014099e783fb8c4618e4209fffcd6050857517b3f156e18"}, - {file = "protobuf-3.15.8-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:efa4c4d4fc9ba734e5e85eaced70e1b63fb3c8d08482d839eb838566346f1737"}, - {file = "protobuf-3.15.8-cp35-cp35m-win32.whl", hash = "sha256:07eec4e2ccbc74e95bb9b3afe7da67957947ee95bdac2b2e91b038b832dd71f0"}, - {file = "protobuf-3.15.8-cp35-cp35m-win_amd64.whl", hash = "sha256:f9cadaaa4065d5dd4d15245c3b68b967b3652a3108e77f292b58b8c35114b56c"}, - {file = "protobuf-3.15.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2dc0e8a9e4962207bdc46a365b63a3f1aca6f9681a5082a326c5837ef8f4b745"}, - {file = "protobuf-3.15.8-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f80afc0a0ba13339bbab25ca0409e9e2836b12bb012364c06e97c2df250c3343"}, - {file = "protobuf-3.15.8-cp36-cp36m-win32.whl", hash = "sha256:c5566f956a26cda3abdfacc0ca2e21db6c9f3d18f47d8d4751f2209d6c1a5297"}, - {file = "protobuf-3.15.8-cp36-cp36m-win_amd64.whl", hash = "sha256:dab75b56a12b1ceb3e40808b5bd9dfdaef3a1330251956e6744e5b6ed8f8830b"}, - {file = "protobuf-3.15.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3053f13207e7f13dc7be5e9071b59b02020172f09f648e85dc77e3fcb50d1044"}, - {file = "protobuf-3.15.8-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1f0b5d156c3df08cc54bc2c8b8b875648ea4cd7ebb2a9a130669f7547ec3488c"}, - {file = "protobuf-3.15.8-cp37-cp37m-win32.whl", hash = "sha256:90270fe5732c1f1ff664a3bd7123a16456d69b4e66a09a139a00443a32f210b8"}, - {file = "protobuf-3.15.8-cp37-cp37m-win_amd64.whl", hash = "sha256:f42c2f5fb67da5905bfc03733a311f72fa309252bcd77c32d1462a1ad519521e"}, - {file = "protobuf-3.15.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6077db37bfa16494dca58a4a02bfdacd87662247ad6bc1f7f8d13ff3f0013e1"}, - {file = "protobuf-3.15.8-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:510e66491f1a5ac5953c908aa8300ec47f793130097e4557482803b187a8ee05"}, - {file = "protobuf-3.15.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5ff9fa0e67fcab442af9bc8d4ec3f82cb2ff3be0af62dba047ed4187f0088b7d"}, - {file = "protobuf-3.15.8-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:1c0e9e56202b9dccbc094353285a252e2b7940b74fdf75f1b4e1b137833fabd7"}, - {file = "protobuf-3.15.8-py2.py3-none-any.whl", hash = "sha256:a0a08c6b2e6d6c74a6eb5bf6184968eefb1569279e78714e239d33126e753403"}, - {file = "protobuf-3.15.8.tar.gz", hash = "sha256:0277f62b1e42210cafe79a71628c1d553348da81cbd553402a7f7549c50b11d0"}, + {file = "protobuf-3.17.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:15351df904347da2081a2eebc42b192c29724eb57dbe56dae440be843f1e4779"}, + {file = "protobuf-3.17.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:5356981c1919782b8c2e3ea5c5d85ad5937b8178a025ac9edc2f2ca5b4a717ae"}, + {file = "protobuf-3.17.0-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:eac0a2a7ea99e17175f6e7b53cdc9004ed786c072fbdf933def0e454e14fd323"}, + {file = "protobuf-3.17.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4c8d0997fdc0a4cf9de7950d598ce6974b22e8618bbcf1d15e9842010cf8420a"}, + {file = "protobuf-3.17.0-cp35-cp35m-win32.whl", hash = "sha256:9ae321459d4890c3939c536382f75e232c9e91ce506310353c8a15ad5c379e0d"}, + {file = "protobuf-3.17.0-cp35-cp35m-win_amd64.whl", hash = "sha256:295944ef0772498d7bf75f6aa5d4dfcfd02f5ce70f735b406e52e43ac3914d38"}, + {file = "protobuf-3.17.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:850f429bd2399525d339d05bc809f090f16d3d88737bed637d355a5ee8d3b81a"}, + {file = "protobuf-3.17.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:809a96d5a1a74538728710f9104f43ae77f5e48bde274ee321b10a324ba52e4f"}, + {file = "protobuf-3.17.0-cp36-cp36m-win32.whl", hash = "sha256:8a3ac375539055164f31a330770f137875307e6f04c21e2647f2e7139c501295"}, + {file = "protobuf-3.17.0-cp36-cp36m-win_amd64.whl", hash = "sha256:3d338910b10b88b18581cf6877b3938b2e262e8fdc2c1057f5a291787de63183"}, + {file = "protobuf-3.17.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1488f786bd1912f97796cf5def8cacf433735616896cf7ed9dc786cee693dfc8"}, + {file = "protobuf-3.17.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:bcaff977db178f0bfde10bab0d23a5f5adf5964adba70c315e45922a1c55eb90"}, + {file = "protobuf-3.17.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:939ce06846ddfec99c0bff510510b3ee45778e7a3aec6544d1f36526e5fecb67"}, + {file = "protobuf-3.17.0-cp37-cp37m-win32.whl", hash = "sha256:3237acce5b666c7b0f45785cc2d0809796d4df3593bd68338aebf25408139188"}, + {file = "protobuf-3.17.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2f77afe33bb86c7d34221a86193256d69aa10818620fe4a7513d98211d67d672"}, + {file = "protobuf-3.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:acc9f2091ace3de429eee424ab7ba0bc52a6aa9ffc9909e5c4de259a3f71db46"}, + {file = "protobuf-3.17.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a29631f4f8bcf79b12a59e83d238d888de5034871461d788c74c68218ad75049"}, + {file = "protobuf-3.17.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:05c304396e309661c45e3a97bd2d8da1fc2bab743ed2ca880bcb757271c40c0e"}, + {file = "protobuf-3.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:baea44967071e6a51e705e4e88aebf35f530a14004cc69f60a185e5d7e13de7e"}, + {file = "protobuf-3.17.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3b5c461af5a3cebd796c73370db929b7e24cbaba655eefdc044226bc8a843d6b"}, + {file = "protobuf-3.17.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44399393c3a8cc04a4cfbdc721dd7f2114497efda582e946a91b8c4290ae5ff5"}, + {file = "protobuf-3.17.0-py2.py3-none-any.whl", hash = "sha256:e32ef0c9f4b548c80d94dfff8b4130ca2ff3d50caaf2455889e3f5b8a01e8038"}, + {file = "protobuf-3.17.0.tar.gz", hash = "sha256:05dfe9319939a8473c21b469f34f6486646e54fb8542637cf7ed8e2fbfe21538"}, ] py = [ {file = "py-1.10.0-py2.py3-none-any.whl", hash = "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"}, @@ -2028,78 +2037,78 @@ pyflakes = [ {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, ] pygments = [ - {file = "Pygments-2.8.1-py3-none-any.whl", hash = "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8"}, - {file = "Pygments-2.8.1.tar.gz", hash = "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94"}, + {file = "Pygments-2.9.0-py3-none-any.whl", hash = "sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e"}, + {file = "Pygments-2.9.0.tar.gz", hash = "sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f"}, ] pylint = [ - {file = "pylint-2.7.4-py3-none-any.whl", hash = "sha256:209d712ec870a0182df034ae19f347e725c1e615b2269519ab58a35b3fcbbe7a"}, - {file = "pylint-2.7.4.tar.gz", hash = "sha256:bd38914c7731cdc518634a8d3c5585951302b6e2b6de60fbb3f7a0220e21eeee"}, + {file = "pylint-2.8.2-py3-none-any.whl", hash = "sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b"}, + {file = "pylint-2.8.2.tar.gz", hash = "sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217"}, ] pymongo = [ - {file = "pymongo-3.11.3-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:4d959e929cec805c2bf391418b1121590b4e7d5cb00af7b1ba521443d45a0918"}, - {file = "pymongo-3.11.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:9fbffc5bad4df99a509783cbd449ed0d24fcd5a450c28e7756c8f20eda3d2aa5"}, - {file = "pymongo-3.11.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:bd351ceb2decd23d523fc50bad631ee9ae6e97e7cdc355ce5600fe310484f96e"}, - {file = "pymongo-3.11.3-cp27-cp27m-win32.whl", hash = "sha256:7d2ae2f7c50adec20fde46a73465de31a6a6fbb4903240f8b7304549752ca7a1"}, - {file = "pymongo-3.11.3-cp27-cp27m-win_amd64.whl", hash = "sha256:b1aa62903a2c5768b0001632efdea2e8da6c80abdd520c2e8a16001cc9affb23"}, - {file = "pymongo-3.11.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:180511abfef70feb022360b35f4863dd68e08334197089201d5c52208de9ca2e"}, - {file = "pymongo-3.11.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:42f9ec9d77358f557fe17cc15e796c4d4d492ede1a30cba3664822cae66e97c5"}, - {file = "pymongo-3.11.3-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:3dbc67754882d740f17809342892f0b24398770bd99d48c5cb5ba89f5f5dee4e"}, - {file = "pymongo-3.11.3-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:733e1cfffc4cd99848230e2999c8a86e284c6af6746482f8ad2ad554dce14e39"}, - {file = "pymongo-3.11.3-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:622a5157ffcd793d305387c1c9fb94185f496c8c9fd66dafb59de0807bc14ad7"}, - {file = "pymongo-3.11.3-cp34-cp34m-win32.whl", hash = "sha256:2aeb108da1ed8e066800fb447ba5ae89d560e6773d228398a87825ac3630452d"}, - {file = "pymongo-3.11.3-cp34-cp34m-win_amd64.whl", hash = "sha256:7c77801620e5e75fb9c7abae235d3cc45d212a67efa98f4972eef63e736a8daa"}, - {file = "pymongo-3.11.3-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:29390c39ca873737689a0749c9c3257aad96b323439b11279fbc0ba8626ec9c5"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a8b02e0119d6ee381a265d8d2450a38096f82916d895fed2dfd81d4c7a54d6e4"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:28633868be21a187702a8613913e13d1987d831529358c29fc6f6670413df040"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:685b884fa41bd2913fd20af85866c4ff886b7cbb7e4833b918996aa5d45a04be"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:7cd42c66d49ffb68dea065e1c8a4323e7ceab386e660fee9863d4fa227302ba9"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux2014_ppc64le.whl", hash = "sha256:950710f7370613a6bfa2ccd842b488c5b8072e83fb6b7d45d99110bf44651d06"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux2014_s390x.whl", hash = "sha256:c7fd18d4b7939408df9315fedbdb05e179760960a92b3752498e2fcd03f24c3d"}, - {file = "pymongo-3.11.3-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:cc359e408712faf9ea775f4c0ec8f2bfc843afe47747a657808d9595edd34d71"}, - {file = "pymongo-3.11.3-cp35-cp35m-win32.whl", hash = "sha256:7814b2cf23aad23464859973c5cd2066ca2fd99e0b934acefbb0b728ac2525bf"}, - {file = "pymongo-3.11.3-cp35-cp35m-win_amd64.whl", hash = "sha256:e1414599a97554d451e441afb362dbee1505e4550852c0068370d843757a3fe2"}, - {file = "pymongo-3.11.3-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:0384d76b409278ddb34ac19cdc4664511685959bf719adbdc051875ded4689aa"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:22ee2c94fee1e391735be63aa1c9af4c69fdcb325ae9e5e4ddff770248ef60a6"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:db6fd53ef5f1914ad801830406440c3bfb701e38a607eda47c38adba267ba300"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:66b688fc139c6742057795510e3b12c4acbf90d11af1eff9689a41d9c84478d6"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:6a5834e392c97f19f36670e34bf9d346d733ad89ee0689a6419dd737dfa4308a"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:87981008d565f647142869d99915cc4760b7725858da3d39ecb2a606e23f36fd"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:413b18ac2222f5d961eb8d1c8dcca6c6ca176c8613636d8c13aa23abae7f7a21"}, - {file = "pymongo-3.11.3-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:610d5cbbfd026e2f6d15665af51e048e49b68363fedece2ed318cc8fe080dd94"}, - {file = "pymongo-3.11.3-cp36-cp36m-win32.whl", hash = "sha256:3873866534b6527e6863e742eb23ea2a539e3c7ee00ad3f9bec9da27dbaaff6f"}, - {file = "pymongo-3.11.3-cp36-cp36m-win_amd64.whl", hash = "sha256:b17e627844d86031c77147c40bf992a6e1114025a460874deeda6500d0f34862"}, - {file = "pymongo-3.11.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:05e2bda928a3a6bc6ddff9e5a8579d41928b75d7417b18f9a67c82bb52150ac6"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:19d52c60dc37520385f538d6d1a4c40bc398e0885f4ed6a36ce10b631dab2852"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2163d736d6f62b20753be5da3dc07a188420b355f057fcbb3075b05ee6227b2f"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b4535d98df83abebb572035754fb3d4ad09ce7449375fa09fa9ede2dbc87b62b"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:cd8fc35d4c0c717cc29b0cb894871555cb7137a081e179877ecc537e2607f0b9"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:92e2376ce3ca0e3e443b3c5c2bb5d584c7e59221edfb0035313c6306049ba55a"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:4ca92e15fcf02e02e7c24b448a16599b98c9d0e6a46cd85cc50804450ebf7245"}, - {file = "pymongo-3.11.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5a03ae5ac85b04b2034a0689add9ff597b16d5e24066a87f6ab0e9fa67049156"}, - {file = "pymongo-3.11.3-cp37-cp37m-win32.whl", hash = "sha256:bc2eb67387b8376120a2be6cba9d23f9d6a6c3828e00fb0a64c55ad7b54116d1"}, - {file = "pymongo-3.11.3-cp37-cp37m-win_amd64.whl", hash = "sha256:5e1341276ce8b7752db9aeac6bbb0cbe82a3f6a6186866bf6b4906d8d328d50b"}, - {file = "pymongo-3.11.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4ac387ac1be71b798d1c372a924f9c30352f30e684e06f086091297352698ac0"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:728313cc0d59d1a1a004f675607dcf5c711ced3f55e75d82b3f264fd758869f3"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:daa44cefde19978af57ac1d50413cd86ebf2b497328e7a27832f5824bda47439"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:322f6cc7bf23a264151ebc5229a92600c4b55ac83c83c91c9bab1ec92c888a8d"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:6043d251fac27ca04ff22ed8deb5ff7a43dc18e8a4a15b4c442d2a20fa313162"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:66573c8c7808cce4f3b56c23cb7cad6c3d7f4c464b9016d35f5344ad743896d7"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:bf70097bd497089f1baabf9cbb3ec4f69c022dc7a70c41ba9c238fa4d0fff7ab"}, - {file = "pymongo-3.11.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:f23abcf6eca5859a2982beadfb5111f8c5e76e30ff99aaee3c1c327f814f9f10"}, - {file = "pymongo-3.11.3-cp38-cp38-win32.whl", hash = "sha256:1d559a76ae87143ad96c2ecd6fdd38e691721e175df7ced3fcdc681b4638bca1"}, - {file = "pymongo-3.11.3-cp38-cp38-win_amd64.whl", hash = "sha256:152e4ac3158b776135d8fce28d2ac06e682b885fcbe86690d66465f262ab244e"}, - {file = "pymongo-3.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:34c15f5798f23488e509eae82fbf749c3d17db74379a88c07c869ece1aa806b9"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:210ec4a058480b9c3869082e52b66d80c4a48eda9682d7a569a1a5a48100ea54"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:b44fa04720bbfd617b6aef036989c8c30435f11450c0a59136291d7b41ed647f"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b32e4eed2ef19a20dfb57698497a9bc54e74efb2e260c003e9056c145f130dc7"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:5091aacbdb667b418b751157f48f6daa17142c4f9063d58e5a64c90b2afbdf9a"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:bb6a5777bf558f444cd4883d617546182cfeff8f2d4acd885253f11a16740534"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:980527f4ccc6644855bb68056fe7835da6d06d37776a52df5bcc1882df57c3db"}, - {file = "pymongo-3.11.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:65b67637f0a25ac9d25efb13c1578eb065870220ffa82f132c5b2d8e43ac39c3"}, - {file = "pymongo-3.11.3-cp39-cp39-win32.whl", hash = "sha256:f6748c447feeadda059719ef5ab1fb9d84bd370e205b20049a0e8b45ef4ad593"}, - {file = "pymongo-3.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:ee42a8f850143ae7c67ea09a183a6a4ad8d053e1dbd9a1134e21a7b5c1bc6c73"}, - {file = "pymongo-3.11.3-py2.7-macosx-10.14-intel.egg", hash = "sha256:7edff02e44dd0badd749d7342e40705a398d98c5d8f7570f57cff9568c2351fa"}, - {file = "pymongo-3.11.3.tar.gz", hash = "sha256:db5098587f58fbf8582d9bda2462762b367207246d3e19623782fb449c3c5fcc"}, + {file = "pymongo-3.11.4-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:b7efc7e7049ef366777cfd35437c18a4166bb50a5606a1c840ee3b9624b54fc9"}, + {file = "pymongo-3.11.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:517ba47ca04a55b1f50ee8df9fd97f6c37df5537d118fb2718952b8623860466"}, + {file = "pymongo-3.11.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:225c61e08fe517aede7912937939e09adf086c8e6f7e40d4c85ad678c2c2aea3"}, + {file = "pymongo-3.11.4-cp27-cp27m-win32.whl", hash = "sha256:e4e9db78b71db2b1684ee4ecc3e32c4600f18cdf76e6b9ae03e338e52ee4b168"}, + {file = "pymongo-3.11.4-cp27-cp27m-win_amd64.whl", hash = "sha256:8e0004b0393d72d76de94b4792a006cb960c1c65c7659930fbf9a81ce4341982"}, + {file = "pymongo-3.11.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:fedf0dee7a412ca6d1d6d92c158fe9cbaa8ea0cae90d268f9ccc0744de7a97d0"}, + {file = "pymongo-3.11.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f947b359cc4769af8b49be7e37af01f05fcf15b401da2528021148e4a54426d1"}, + {file = "pymongo-3.11.4-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:3a3498a8326111221560e930f198b495ea6926937e249f475052ffc6893a6680"}, + {file = "pymongo-3.11.4-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:9a4f6e0b01df820ba9ed0b4e618ca83a1c089e48d4f268d0e00dcd49893d4549"}, + {file = "pymongo-3.11.4-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:d65bac5f6724d9ea6f0b5a0f0e4952fbbf209adcf6b5583b54c54bd2fcd74dc0"}, + {file = "pymongo-3.11.4-cp34-cp34m-win32.whl", hash = "sha256:15b083d1b789b230e5ac284442d9ecb113c93f3785a6824f748befaab803b812"}, + {file = "pymongo-3.11.4-cp34-cp34m-win_amd64.whl", hash = "sha256:f08665d3cc5abc2f770f472a9b5f720a9b3ab0b8b3bb97c7c1487515e5653d39"}, + {file = "pymongo-3.11.4-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:977b1d4f868986b4ba5d03c317fde4d3b66e687d74473130cd598e3103db34fa"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:510cd3bfabb63a07405b7b79fae63127e34c118b7531a2cbbafc7a24fd878594"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:071552b065e809d24c5653fcc14968cfd6fde4e279408640d5ac58e3353a3c5f"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:f4ba58157e8ae33ee86fadf9062c506e535afd904f07f9be32731f4410a23b7f"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_i686.whl", hash = "sha256:b413117210fa6d92664c3d860571e8e8727c3e8f2ff197276c5d0cb365abd3ad"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_ppc64le.whl", hash = "sha256:08b8723248730599c9803ae4c97b8f3f76c55219104303c88cb962a31e3bb5ee"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_s390x.whl", hash = "sha256:8a41fdc751dc4707a4fafb111c442411816a7c225ebb5cadb57599534b5d5372"}, + {file = "pymongo-3.11.4-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:f664ed7613b8b18f0ce5696b146776266a038c19c5cd6efffa08ecc189b01b73"}, + {file = "pymongo-3.11.4-cp35-cp35m-win32.whl", hash = "sha256:5c36428cc4f7fae56354db7f46677fd21222fc3cb1e8829549b851172033e043"}, + {file = "pymongo-3.11.4-cp35-cp35m-win_amd64.whl", hash = "sha256:d0a70151d7de8a3194cdc906bcc1a42e14594787c64b0c1c9c975e5a2af3e251"}, + {file = "pymongo-3.11.4-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:9b9298964389c180a063a9e8bac8a80ed42de11d04166b20249bfa0a489e0e0f"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:b2f41261b648cf5dee425f37ff14f4ad151c2f24b827052b402637158fd056ef"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e02beaab433fd1104b2804f909e694cfbdb6578020740a9051597adc1cd4e19f"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:8898f6699f740ca93a0879ed07d8e6db02d68af889d0ebb3d13ab017e6b1af1e"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:62c29bc36a6d9be68fe7b5aaf1e120b4aa66a958d1e146601fcd583eb12cae7b"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:424799c71ff435094e5fb823c40eebb4500f0e048133311e9c026467e8ccebac"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:3551912f5c34d8dd7c32c6bb00ae04192af47f7b9f653608f107d19c1a21a194"}, + {file = "pymongo-3.11.4-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:5db59223ed1e634d842a053325f85f908359c6dac9c8ddce8ef145061fae7df8"}, + {file = "pymongo-3.11.4-cp36-cp36m-win32.whl", hash = "sha256:fea5cb1c63efe1399f0812532c7cf65458d38fd011be350bc5021dfcac39fba8"}, + {file = "pymongo-3.11.4-cp36-cp36m-win_amd64.whl", hash = "sha256:d4e62417e89b717a7bcd8576ac3108cd063225942cc91c5b37ff5465fdccd386"}, + {file = "pymongo-3.11.4-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:4c7e8c8e1e1918dcf6a652ac4b9d87164587c26fd2ce5dd81e73a5ab3b3d492f"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:38a7b5140a48fc91681cdb5cb95b7cd64640b43d19259fdd707fa9d5a715f2b2"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:aff3656af2add93f290731a6b8930b23b35c0c09569150130a58192b3ec6fc61"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:03be7ad107d252bb7325d4af6309fdd2c025d08854d35f0e7abc8bf048f4245e"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:6060794aac9f7b0644b299f46a9c6cbc0bc470bd01572f4134df140afd41ded6"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:73326b211e7410c8bd6a74500b1e3f392f39cf10862e243d00937e924f112c01"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:20d75ea11527331a2980ab04762a9d960bcfea9475c54bbeab777af880de61cd"}, + {file = "pymongo-3.11.4-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:3135dd574ef1286189f3f04a36c8b7a256376914f8cbbce66b94f13125ded858"}, + {file = "pymongo-3.11.4-cp37-cp37m-win32.whl", hash = "sha256:7c97554ea521f898753d9773891d0347ebfaddcc1dee2ad94850b163171bf1f1"}, + {file = "pymongo-3.11.4-cp37-cp37m-win_amd64.whl", hash = "sha256:a08c8b322b671857c81f4c30cd3c8df2895fd3c0e9358714f39e0ef8fb327702"}, + {file = "pymongo-3.11.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3d851af3852f16ad4adc7ee054fd9c90a7a5063de94d815b7f6a88477b9f4c6"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3bfc7689a1bacb9bcd2f2d5185d99507aa29f667a58dd8adaa43b5a348139e46"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b8f94acd52e530a38f25e4d5bf7ddfdd4bea9193e718f58419def0d4406b58d3"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e4b631688dfbdd61b5610e20b64b99d25771c6d52d9da73349342d2a0f11c46a"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:474e21d0e07cd09679e357d1dac76e570dab86665e79a9d3354b10a279ac6fb3"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:421d13523d11c57f57f257152bc4a6bb463aadf7a3918e9c96fefdd6be8dbfb8"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:0cabfc297f4cf921f15bc789a8fbfd7115eb9f813d3f47a74b609894bc66ab0d"}, + {file = "pymongo-3.11.4-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:fe4189846448df013cd9df11bba38ddf78043f8c290a9f06430732a7a8601cce"}, + {file = "pymongo-3.11.4-cp38-cp38-win32.whl", hash = "sha256:eb4d176394c37a76e8b0afe54b12d58614a67a60a7f8c0dd3a5afbb013c01092"}, + {file = "pymongo-3.11.4-cp38-cp38-win_amd64.whl", hash = "sha256:fffff7bfb6799a763d3742c59c6ee7ffadda21abed557637bc44ed1080876484"}, + {file = "pymongo-3.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:13acf6164ead81c9fc2afa0e1ea6d6134352973ce2bb35496834fee057063c04"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d360e5d5dd3d55bf5d1776964625018d85b937d1032bae1926dd52253decd0db"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:0aaf4d44f1f819360f9432df538d54bbf850f18152f34e20337c01b828479171"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:08bda7b2c522ff9f1e554570da16298271ebb0c56ab9699446aacba249008988"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:1a994a42f49dab5b6287e499be7d3d2751776486229980d8857ad53b8333d469"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:161fcd3281c42f644aa8dec7753cca2af03ce654e17d76da4f0dab34a12480ca"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:78f07961f4f214ea8e80be63cffd5cc158eb06cd922ffbf6c7155b11728f28f9"}, + {file = "pymongo-3.11.4-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ad31f184dcd3271de26ab1f9c51574afb99e1b0e484ab1da3641256b723e4994"}, + {file = "pymongo-3.11.4-cp39-cp39-win32.whl", hash = "sha256:5e606846c049ed40940524057bfdf1105af6066688c0e6a1a3ce2038589bae70"}, + {file = "pymongo-3.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:3491c7de09e44eded16824cb58cf9b5cc1dc6f066a0bb7aa69929d02aa53b828"}, + {file = "pymongo-3.11.4-py2.7-macosx-10.14-intel.egg", hash = "sha256:506a6dab4c7ffdcacdf0b8e70bd20eb2e77fa994519547c9d88d676400fcad58"}, + {file = "pymongo-3.11.4.tar.gz", hash = "sha256:539d4cb1b16b57026999c53e5aab857fe706e70ae5310cc8c232479923f932e6"}, ] pynput = [ {file = "pynput-1.7.3-py2.py3-none-any.whl", hash = "sha256:fea5777454f896bd79d35393088cd29a089f3b2da166f0848a922b1d5a807d4f"}, @@ -2107,28 +2116,28 @@ pynput = [ {file = "pynput-1.7.3.tar.gz", hash = "sha256:4e50b1a0ab86847e87e58f6d1993688b9a44f9f4c88d4712315ea8eb552ef828"}, ] pyobjc-core = [ - {file = "pyobjc-core-7.1.tar.gz", hash = "sha256:a0616d5d816b4471f8f782c3a9a8923d2cc85014d88ad4f7fec694be9e6ea349"}, - {file = "pyobjc_core-7.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9fb45c9916f2a03ecd6b9ecde4c35d1d0f1a590ae2ea2372f9d9a360226ac1d"}, - {file = "pyobjc_core-7.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fff8e87358c6195a2937004f279050cce3d4c02cd77acd73c5ad367307def855"}, - {file = "pyobjc_core-7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:afb38efd3f2960eb49eb78552d465cfd025a9d6efa06cd4cd8694dafbe7c6e06"}, - {file = "pyobjc_core-7.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7cb329c4119044fe83bcb3c5d4794d636c706ff0cb7c1c77d36ef5c373100082"}, - {file = "pyobjc_core-7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7913d7b20217c294900537faf58e5cc15942ed7af277bf05db25667d18255114"}, + {file = "pyobjc-core-7.2.tar.gz", hash = "sha256:9e9ec482d80ea030cdb1613d05a247f31eedabe6666d884d42dd890cc5fb0e05"}, + {file = "pyobjc_core-7.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:94b4d9de9d228db52dd35012096d63bdf8c1ace58ea3be1d5f6f39313cd502f2"}, + {file = "pyobjc_core-7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:971cbd7189ae1aa03ef0d16124aa5bcd053779e0e6b6011a41c3dbd5b4ea7e88"}, + {file = "pyobjc_core-7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9d93b20394008373d6d2856d49aaff26f4b97ff42d924a14516c8a82313ec8c0"}, + {file = "pyobjc_core-7.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:860183540d1be792c26426018139ac8ba75e85f675c59ba080ccdc52d8e74c7a"}, + {file = "pyobjc_core-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ffe61d3c2a404354daf2d895e34e38c5044453353581b3c396bf5365de26250c"}, ] pyobjc-framework-cocoa = [ - {file = "pyobjc-framework-Cocoa-7.1.tar.gz", hash = "sha256:67966152b3d38a0225176fceca2e9f56d849c8e7445548da09a00cb13155ec3e"}, - {file = "pyobjc_framework_Cocoa-7.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:bef77eafaac5eaf1d91d479d5483fd02216caa3edc27e8f5adc9af0b3fecdac3"}, - {file = "pyobjc_framework_Cocoa-7.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b2ea3582c456827dc20e648c905fdbcf8d3dfae89434f981e9b761cd07262049"}, - {file = "pyobjc_framework_Cocoa-7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a4050f2d776f40c2409a151c6f7896420e936934b3bdbfabedf91509637ed9b"}, - {file = "pyobjc_framework_Cocoa-7.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f68f022f1f6d5985c418e10c6608c562fcf4bfe3714ec64fd10ce3dc6221bd4"}, - {file = "pyobjc_framework_Cocoa-7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ecfefd4c48dae42275c18679c69f6f2fff970e711097515a0a8732fc10194018"}, + {file = "pyobjc-framework-Cocoa-7.2.tar.gz", hash = "sha256:c8b23f03dc3f4436d36c0fd006a8a084835c4f6015187df7c3aa5de8ecd5c653"}, + {file = "pyobjc_framework_Cocoa-7.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8e5dd5daa0096755937ec24c345a4b07c3fa131a457f99e0fdeeb01979178ec7"}, + {file = "pyobjc_framework_Cocoa-7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:828d183947fc7746953fd0c9b1092cc423745ba0b49719e7b7d1e1614aaa20ec"}, + {file = "pyobjc_framework_Cocoa-7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e4c6d7baa0c2ab5ea5efb8836ad0b3b3976cffcfc6195c1f195e826c6eb5744"}, + {file = "pyobjc_framework_Cocoa-7.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9a9d1d49cc5a810773c88d6de821e60c8cc41d01113cf1b9e7662938f5f7d66"}, + {file = "pyobjc_framework_Cocoa-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:506c2cd09f421eac92b9008a0142174c3d1d70ecd4b0e3fa2b924767995fd14e"}, ] pyobjc-framework-quartz = [ - {file = "pyobjc-framework-Quartz-7.1.tar.gz", hash = "sha256:73102c9f4dbfa13275621014785ab3b684cf03ce93a4b0b270500c795349bea9"}, - {file = "pyobjc_framework_Quartz-7.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7207a26244f02d4534ebb007fa55a9dc7c1b7fbb490d1e89e0d62cfd175e20f3"}, - {file = "pyobjc_framework_Quartz-7.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5bc7a4fb3ea80b5af6910cc27729a0774a96327a69583fcf28057cb2ffce33ac"}, - {file = "pyobjc_framework_Quartz-7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c0469d60d4a79fc252f74adaa8177d2c680621d858c1b8ef19c411e903e2c892"}, - {file = "pyobjc_framework_Quartz-7.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:04953c031fc35020682bd4613b9b5a9688bdb9eab7ed76fd8dcf028783568b4f"}, - {file = "pyobjc_framework_Quartz-7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d8e0c086faf649f86386d0ed99194c6d0704b602576e2b258532b635b510b790"}, + {file = "pyobjc-framework-Quartz-7.2.tar.gz", hash = "sha256:ea554e5697bc6747a4ce793c0b0036da16622b44ff75196d6124603008922afa"}, + {file = "pyobjc_framework_Quartz-7.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dc61fe61d26f797e4335f3ffc891bcef64624c728c2603e3307b3910580b2cb8"}, + {file = "pyobjc_framework_Quartz-7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ad8103cc38923f2708904db11a0992ea960125ce6adf7b4c7a77d8fdafd412c4"}, + {file = "pyobjc_framework_Quartz-7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4549d17ca41f0bf62792d5bc4b4293ba9a6cc560014b3e18ba22c65e4a5030d2"}, + {file = "pyobjc_framework_Quartz-7.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:da16e4f1e13cb7b02e30fa538cbb3a356e4a694bbc2bb26d2bd100ca12a54ff6"}, + {file = "pyobjc_framework_Quartz-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1f6471177a39535cd0358ae29b8f3d31fe778a21deb74105c448c4e726619d7"}, ] pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, @@ -2148,34 +2157,26 @@ pyqt5-qt5 = [ {file = "PyQt5_Qt5-5.15.2-py3-none-win_amd64.whl", hash = "sha256:750b78e4dba6bdf1607febedc08738e318ea09e9b10aea9ff0d73073f11f6962"}, ] pyqt5-sip = [ - {file = "PyQt5_sip-12.8.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:bb5a87b66fc1445915104ee97f7a20a69decb42f52803e3b0795fa17ff88226c"}, - {file = "PyQt5_sip-12.8.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a29e2ac399429d3b7738f73e9081e50783e61ac5d29344e0802d0dcd6056c5a2"}, - {file = "PyQt5_sip-12.8.1-cp35-cp35m-win32.whl", hash = "sha256:0304ca9114b9817a270f67f421355075b78ff9fc25ac58ffd72c2601109d2194"}, - {file = "PyQt5_sip-12.8.1-cp35-cp35m-win_amd64.whl", hash = "sha256:84ba7746762bd223bed22428e8561aa267a229c28344c2d28c5d5d3f8970cffb"}, - {file = "PyQt5_sip-12.8.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:7b81382ce188d63890a0e35abe0f9bb946cabc873a31873b73583b0fc84ac115"}, - {file = "PyQt5_sip-12.8.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:b6d42250baec52a5f77de64e2951d001c5501c3a2df2179f625b241cbaec3369"}, - {file = "PyQt5_sip-12.8.1-cp36-cp36m-win32.whl", hash = "sha256:6c1ebee60f1d2b3c70aff866b7933d8d8d7646011f7c32f9321ee88c290aa4f9"}, - {file = "PyQt5_sip-12.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:34dcd29be47553d5f016ff86e89e24cbc5eebae92eb2f96fb32d2d7ba028c43c"}, - {file = "PyQt5_sip-12.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ed897c58acf4a3cdca61469daa31fe6e44c33c6c06a37c3f21fab31780b3b86a"}, - {file = "PyQt5_sip-12.8.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a1b8ef013086e224b8e86c93f880f776d01b59195bdfa2a8e0b23f0480678fec"}, - {file = "PyQt5_sip-12.8.1-cp37-cp37m-win32.whl", hash = "sha256:0cd969be528c27bbd4755bd323dff4a79a8fdda28215364e6ce3e069cb56c2a9"}, - {file = "PyQt5_sip-12.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c9800729badcb247765e4ffe2241549d02da1fa435b9db224845bc37c3e99cb0"}, - {file = "PyQt5_sip-12.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9312ec47cac4e33c11503bc1cbeeb0bdae619620472f38e2078c5a51020a930f"}, - {file = "PyQt5_sip-12.8.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:2f35e82fd7ec1e1f6716e9154721c7594956a4f5bd4f826d8c6a6453833cc2f0"}, - {file = "PyQt5_sip-12.8.1-cp38-cp38-win32.whl", hash = "sha256:da9c9f1e65b9d09e73bd75befc82961b6b61b5a3b9d0a7c832168e1415f163c6"}, - {file = "PyQt5_sip-12.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:832fd60a264de4134c2824d393320838f3ab648180c9c357ec58a74524d24507"}, - {file = "PyQt5_sip-12.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c317ab1263e6417c498b81f5c970a9b1af7acefab1f80b4cc0f2f8e661f29fc5"}, - {file = "PyQt5_sip-12.8.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:c9d6d448c29dc6606bb7974696608f81f4316c8234f7c7216396ed110075e777"}, - {file = "PyQt5_sip-12.8.1-cp39-cp39-win32.whl", hash = "sha256:5a011aeff89660622a6d5c3388d55a9d76932f3b82c95e82fc31abd8b1d2990d"}, - {file = "PyQt5_sip-12.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:f168f0a7f32b81bfeffdf003c36f25d81c97dee5eb67072a5183e761fe250f13"}, - {file = "PyQt5_sip-12.8.1.tar.gz", hash = "sha256:30e944db9abee9cc757aea16906d4198129558533eb7fadbe48c5da2bd18e0bd"}, + {file = "PyQt5_sip-12.9.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:d85002238b5180bce4b245c13d6face848faa1a7a9e5c6e292025004f2fd619a"}, + {file = "PyQt5_sip-12.9.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:83c3220b1ca36eb8623ba2eb3766637b19eb0ce9f42336ad8253656d32750c0a"}, + {file = "PyQt5_sip-12.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:69a3ad4259172e2b1aa9060de211efac39ddd734a517b1924d9c6c0cc4f55f96"}, + {file = "PyQt5_sip-12.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:42274a501ab4806d2c31659170db14c282b8313d2255458064666d9e70d96206"}, + {file = "PyQt5_sip-12.9.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6a8701892a01a5a2a4720872361197cc80fdd5f49c8482d488ddf38c9c84f055"}, + {file = "PyQt5_sip-12.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4347bd81d30c8e3181e553b3734f91658cfbdd8f1a19f254777f906870974e6d"}, + {file = "PyQt5_sip-12.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c446971c360a0a1030282a69375a08c78e8a61d568bfd6dab3dcc5cf8817f644"}, + {file = "PyQt5_sip-12.9.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fc43f2d7c438517ee33e929e8ae77132749c15909afab6aeece5fcf4147ffdb5"}, + {file = "PyQt5_sip-12.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:c5216403d4d8d857ec4a61f631d3945e44fa248aa2415e9ee9369ab7c8a4d0c7"}, + {file = "PyQt5_sip-12.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a25b9843c7da6a1608f310879c38e6434331aab1dc2fe6cb65c14f1ecf33780e"}, + {file = "PyQt5_sip-12.9.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:dd05c768c2b55ffe56a9d49ce6cc77cdf3d53dbfad935258a9e347cbfd9a5850"}, + {file = "PyQt5_sip-12.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:b09f4cd36a4831229fb77c424d89635fa937d97765ec90685e2f257e56a2685a"}, + {file = "PyQt5_sip-12.9.0.tar.gz", hash = "sha256:d3e4489d7c2b0ece9d203ae66e573939f7f60d4d29e089c9f11daa17cfeaae32"}, ] pyrsistent = [ {file = "pyrsistent-0.17.3.tar.gz", hash = "sha256:2e636185d9eb976a18a8a8e96efce62f2905fea90041958d8cc2a189756ebf3e"}, ] pytest = [ - {file = "pytest-6.2.3-py3-none-any.whl", hash = "sha256:6ad9c7bdf517a808242b998ac20063c41532a570d088d77eec1ee12b0b5574bc"}, - {file = "pytest-6.2.3.tar.gz", hash = "sha256:671238a46e4df0f3498d1c3270e5deb9b32d25134c99b7d75370a68cfbe9b634"}, + {file = "pytest-6.2.4-py3-none-any.whl", hash = "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"}, + {file = "pytest-6.2.4.tar.gz", hash = "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b"}, ] pytest-cov = [ {file = "pytest-cov-2.11.1.tar.gz", hash = "sha256:359952d9d39b9f822d9d29324483e7ba04a3a17dd7d05aa6beb7ea01e359e5f7"}, @@ -2236,9 +2237,13 @@ secretstorage = [ {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, ] +semver = [ + {file = "semver-2.13.0-py2.py3-none-any.whl", hash = "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4"}, + {file = "semver-2.13.0.tar.gz", hash = "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"}, +] six = [ - {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, - {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] snowballstemmer = [ {file = "snowballstemmer-2.1.0-py2.py3-none-any.whl", hash = "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2"}, @@ -2249,8 +2254,8 @@ speedcopy = [ {file = "speedcopy-2.1.0.tar.gz", hash = "sha256:8bb1a6c735900b83901a7be84ba2175ed3887c13c6786f97dea48f2ea7d504c2"}, ] sphinx = [ - {file = "Sphinx-3.5.4-py3-none-any.whl", hash = "sha256:2320d4e994a191f4b4be27da514e46b3d6b420f2ff895d064f52415d342461e8"}, - {file = "Sphinx-3.5.4.tar.gz", hash = "sha256:19010b7b9fa0dc7756a6e105b2aacd3a80f798af3c25c273be64d7beeb482cb1"}, + {file = "Sphinx-4.0.1-py3-none-any.whl", hash = "sha256:b2566f5f339737a6ef37198c47d56de1f4a746c722bebdb2fe045c34bfd8b9d0"}, + {file = "Sphinx-4.0.1.tar.gz", hash = "sha256:cf5104777571b2b7f06fa88ee08fade24563f4a0594cf4bd17d31c47b8740b4c"}, ] sphinx-qt-documentation = [ {file = "sphinx_qt_documentation-0.3-py3-none-any.whl", hash = "sha256:bee247cb9e4fc03fc496d07adfdb943100e1103320c3e5e820e0cfa7c790d9b6"}, @@ -2328,9 +2333,9 @@ typed-ast = [ {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, ] typing-extensions = [ - {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, - {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"}, - {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"}, + {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"}, + {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"}, + {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"}, ] uritemplate = [ {file = "uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"}, @@ -2345,8 +2350,8 @@ wcwidth = [ {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] websocket-client = [ - {file = "websocket_client-0.58.0-py2.py3-none-any.whl", hash = "sha256:44b5df8f08c74c3d82d28100fdc81f4536809ce98a17f0757557813275fbb663"}, - {file = "websocket_client-0.58.0.tar.gz", hash = "sha256:63509b41d158ae5b7f67eb4ad20fecbb4eee99434e73e140354dc3ff8e09716f"}, + {file = "websocket-client-0.59.0.tar.gz", hash = "sha256:d376bd60eace9d437ab6d7ee16f4ab4e821c9dae591e1b783c58ebd8aaf80c5c"}, + {file = "websocket_client-0.59.0-py2.py3-none-any.whl", hash = "sha256:2e50d26ca593f70aba7b13a489435ef88b8fc3b5c5643c1ce8808ff9b40f0b32"}, ] wrapt = [ {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, diff --git a/pyproject.toml b/pyproject.toml index 12b9c4446d..f7b5dd1426 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "OpenPype" -version = "3.0.0-beta2" -description = "Multi-platform open-source pipeline built around the Avalon platform, expanding it with extra features and integrations." +version = "3.0.0-rc4" +description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" homepage = "https://openpype.io" @@ -36,6 +36,7 @@ pyqt5 = "^5.12.2" # ideally should be replaced with PySide2 "Qt.py" = "^1.3.3" speedcopy = "^2.1" six = "^1.15" +semver = "^2.13.0" # for version resolution wsrpc_aiohttp = "^3.1.1" # websocket server pywin32 = { version = "300", markers = "sys_platform == 'win32'" } jinxed = [ @@ -97,9 +98,9 @@ url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.0-windows.zip" hash = "fd2e00278e01e85dcee7b4a6969d1a16f13016ec16700fb0366dbb1b1f3c37ad" [openpype.thirdparty.oiio.linux] -url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-linux.tgz" -hash = "sha256:..." +url = "https://distribute.openpype.io/thirdparty/oiio_tools-2.2.12-linux.tgz" +hash = "de63a8bf7f6c45ff59ecafeba13123f710c2cbc1783ec9e0b938e980d4f5c37f" [openpype.thirdparty.oiio.darwin] url = "https://distribute.openpype.io/thirdparty/oiio-2.2.0-darwin.tgz" -hash = "sha256:..." \ No newline at end of file +hash = "sha256:..." diff --git a/repos/avalon-core b/repos/avalon-core index 807e8577a0..cfd4191e36 160000 --- a/repos/avalon-core +++ b/repos/avalon-core @@ -1 +1 @@ -Subproject commit 807e8577a0268580a2934ba38889911adad26eb1 +Subproject commit cfd4191e364b47de7364096f45d9d9d9a901692a diff --git a/setup.py b/setup.py index fd589e5251..c096befa34 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,8 @@ install_requires = [ "googleapiclient", "httplib2", # Harmony implementation - "filecmp" + "filecmp", + "dns" ] includes = [] @@ -69,7 +70,11 @@ if sys.platform == "win32": "pythoncom" ]) -build_options = dict( + +icon_path = openpype_root / "igniter" / "openpype.ico" +mac_icon_path = openpype_root / "igniter" / "openpype.icns" + +build_exe_options = dict( packages=install_requires, includes=includes, excludes=excludes, @@ -78,13 +83,16 @@ build_options = dict( optimize=0 ) -icon_path = openpype_root / "igniter" / "openpype.ico" +bdist_mac_options = dict( + bundle_name="OpenPype", + iconfile=mac_icon_path +) executables = [ - Executable("start.py", base=None, - target_name="openpype_console", icon=icon_path.as_posix()), Executable("start.py", base=base, - target_name="openpype_gui", icon=icon_path.as_posix()) + target_name="openpype_gui", icon=icon_path.as_posix()), + Executable("start.py", base=None, + target_name="openpype_console", icon=icon_path.as_posix()) ] setup( @@ -93,7 +101,8 @@ setup( description="Ultimate pipeline", cmdclass={"build_sphinx": BuildDoc}, options={ - "build_exe": build_options, + "build_exe": build_exe_options, + "bdist_mac": bdist_mac_options, "build_sphinx": { "project": "OpenPype", "version": __version__, diff --git a/start.py b/start.py index a2a03f112c..660d0c9006 100644 --- a/start.py +++ b/start.py @@ -38,47 +38,47 @@ So, bootstrapping OpenPype looks like this:: .. code-block:: bash -+-------------------------------------------------------+ -| Determine MongoDB connection: | -| Use `OPENPYPE_MONGO`, system keyring `openPypeMongo` | -+--------------------------|----------------------------+ - .--- Found? --. +┌───────────────────────────────────────────────────────┐ +│ Determine MongoDB connection: │ +│ Use `OPENPYPE_MONGO`, system keyring `openPypeMongo` │ +└──────────────────────────┬────────────────────────────┘ + ┌───- Found? -─┐ YES NO - | | - | +------v--------------+ - | | Fire up Igniter GUI |<---------+ - | | and ask User | | - | +---------------------+ | - | | - | | -+-----------------v------------------------------------+ | -| Get location of OpenPype: | | -| 1) Test for `OPENPYPE_PATH` environment variable | | -| 2) Test `openPypePath` in registry setting | | -| 3) Test user data directory | | -| ................................................... | | -| If running from frozen code: | | -| - Use latest one found in user data dir | | -| If running from live code: | | -| - Use live code and install it to user data dir | | -| * can be overridden with `--use-version` argument | | -+-------------------------|----------------------------+ | - .-- Is OpenPype found? --. | - YES NO | - | | | - | +---------------v-----------------+ | - | | Look in `OPENPYPE_PATH`, find | | - | | latest version and install it | | - | | to user data dir. | | - | +--------------|------------------+ | - | .-- Is OpenPype found? --. | - | YES NO --------+ - | | - |<---------+ - | -+-------------v------------+ -| Run OpenPype | -+--------------------------+ + │ │ + │ ┌──────┴──────────────┐ + │ │ Fire up Igniter GUI ├<-────────┐ + │ │ and ask User │ │ + │ └─────────────────────┘ │ + │ │ + │ │ +┌─────────────────┴─────────────────────────────────────┐ │ +│ Get location of OpenPype: │ │ +│ 1) Test for `OPENPYPE_PATH` environment variable │ │ +│ 2) Test `openPypePath` in registry setting │ │ +│ 3) Test user data directory │ │ +│ ····················································· │ │ +│ If running from frozen code: │ │ +│ - Use latest one found in user data dir │ │ +│ If running from live code: │ │ +│ - Use live code and install it to user data dir │ │ +│ * can be overridden with `--use-version` argument │ │ +└──────────────────────────┬────────────────────────────┘ │ + ┌─- Is OpenPype found? -─┐ │ + YES NO │ + │ │ │ + │ ┌─────────────────┴─────────────┐ │ + │ │ Look in `OPENPYPE_PATH`, find │ │ + │ │ latest version and install it │ │ + │ │ to user data dir. │ │ + │ └──────────────┬────────────────┘ │ + │ ┌─- Is OpenPype found? -─┐ │ + │ YES NO -──────┘ + │ │ + ├<-───────┘ + │ +┌─────────────┴────────────┐ +│ Run OpenPype │ +└─────═══════════════──────┘ Todo: @@ -99,6 +99,8 @@ import traceback import subprocess import site from pathlib import Path +import platform + # OPENPYPE_ROOT is variable pointing to build (or code) directory # WARNING `OPENPYPE_ROOT` must be defined before igniter import @@ -110,11 +112,23 @@ if not getattr(sys, 'frozen', False): else: OPENPYPE_ROOT = os.path.dirname(sys.executable) + # FIX #1469: Certificates from certifi are not available in some + # macos builds, so connection to ftrack/mongo will fail with + # unable to verify certificate issuer error. This will add certifi + # certificates so ssl can see them. + # WARNING: this can break stuff if custom certificates are used. In that + # case they need to be merged to certificate bundle and SSL_CERT_FILE + # should point to them. + if not os.getenv("SSL_CERT_FILE") and platform.system().lower() == "darwin": # noqa: E501 + ssl_cert_file = Path(OPENPYPE_ROOT) / "dependencies" / "certifi" / "cacert.pem" # noqa: E501 + os.environ["SSL_CERT_FILE"] = ssl_cert_file.as_posix() + # add dependencies folder to sys.pat for frozen code frozen_libs = os.path.normpath( os.path.join(OPENPYPE_ROOT, "dependencies") ) sys.path.append(frozen_libs) + sys.path.insert(0, OPENPYPE_ROOT) # add stuff from `/dependencies` to PYTHONPATH. pythonpath = os.getenv("PYTHONPATH", "") paths = pythonpath.split(os.pathsep) @@ -123,7 +137,10 @@ else: import igniter # noqa: E402 from igniter import BootstrapRepos # noqa: E402 -from igniter.tools import get_openpype_path_from_db # noqa +from igniter.tools import ( + get_openpype_path_from_db, + validate_mongo_connection +) # noqa from igniter.bootstrap_repos import OpenPypeVersion # noqa: E402 bootstrap = BootstrapRepos() @@ -285,6 +302,10 @@ def _process_arguments() -> tuple: if return_code not in [2, 3]: sys.exit(return_code) + idx = sys.argv.index("igniter") + sys.argv.pop(idx) + sys.argv.insert(idx, "tray") + return use_version, use_staging @@ -307,25 +328,39 @@ def _determine_mongodb() -> str: # try system keyring try: openpype_mongo = bootstrap.secure_registry.get_item( - "openPypeMongo") + "openPypeMongo" + ) except ValueError: - print("*** No DB connection string specified.") - print("--- launching setup UI ...") - import igniter - igniter.open_dialog() + pass + if openpype_mongo: + result, msg = validate_mongo_connection(openpype_mongo) + if not result: + print(msg) + openpype_mongo = None + + if not openpype_mongo: + print("*** No DB connection string specified.") + print("--- launching setup UI ...") + + result = igniter.open_dialog() + if result == 0: + raise RuntimeError("MongoDB URL was not defined") + + openpype_mongo = os.getenv("OPENPYPE_MONGO") + if not openpype_mongo: try: openpype_mongo = bootstrap.secure_registry.get_item( "openPypeMongo") except ValueError: - raise RuntimeError("missing mongodb url") + raise RuntimeError("Missing MongoDB url") return openpype_mongo def _initialize_environment(openpype_version: OpenPypeVersion) -> None: version_path = openpype_version.path - os.environ["OPENPYPE_VERSION"] = openpype_version.version + os.environ["OPENPYPE_VERSION"] = str(openpype_version) # set OPENPYPE_REPOS_ROOT to point to currently used OpenPype version. os.environ["OPENPYPE_REPOS_ROOT"] = os.path.normpath( version_path.as_posix() @@ -382,6 +417,26 @@ def _find_frozen_openpype(use_version: str = None, openpype_version = None openpype_versions = bootstrap.find_openpype(include_zips=True, staging=use_staging) + # get local frozen version and add it to detected version so if it is + # newer it will be used instead. + local_version_str = bootstrap.get_version( + Path(os.environ["OPENPYPE_ROOT"])) + if local_version_str: + local_version = OpenPypeVersion( + version=local_version_str, + path=Path(os.environ["OPENPYPE_ROOT"])) + if local_version not in openpype_versions: + openpype_versions.append(local_version) + openpype_versions.sort() + # if latest is currently running, ditch whole list + # and run from current without installing it. + if local_version == openpype_versions[-1]: + os.environ["OPENPYPE_TRYOUT"] = "1" + openpype_versions = [] + + else: + print("!!! Warning: cannot determine current running version.") + if not os.getenv("OPENPYPE_TRYOUT"): try: # use latest one found (last in the list is latest) @@ -429,12 +484,9 @@ def _find_frozen_openpype(use_version: str = None, use_version, openpype_versions) if not version_path: - if use_version is not None: - if not openpype_version: - ... - else: - print(("!!! Specified version was not found, using " - "latest available")) + if use_version is not None and openpype_version: + print(("!!! Specified version was not found, using " + "latest available")) # specified version was not found so use latest detected. version_path = openpype_version.path print(f">>> Using version [ {openpype_version} ]") @@ -457,7 +509,15 @@ def _find_frozen_openpype(use_version: str = None, if openpype_version.path.is_file(): print(">>> Extracting zip file ...") - version_path = bootstrap.extract_openpype(openpype_version) + try: + version_path = bootstrap.extract_openpype(openpype_version) + except OSError as e: + print("!!! failed: {}".format(str(e))) + sys.exit(1) + else: + # cleanup zip after extraction + os.unlink(openpype_version.path) + openpype_version.path = version_path _initialize_environment(openpype_version) diff --git a/test_localsystem.txt b/test_localsystem.txt new file mode 100644 index 0000000000..dde7986af8 --- /dev/null +++ b/test_localsystem.txt @@ -0,0 +1 @@ +I have run diff --git a/tests/igniter/test_bootstrap_repos.py b/tests/igniter/test_bootstrap_repos.py index 6c70380ab6..743131acfa 100644 --- a/tests/igniter/test_bootstrap_repos.py +++ b/tests/igniter/test_bootstrap_repos.py @@ -5,72 +5,76 @@ import sys from collections import namedtuple from pathlib import Path from zipfile import ZipFile +from uuid import uuid4 import appdirs import pytest from igniter.bootstrap_repos import BootstrapRepos -from igniter.bootstrap_repos import PypeVersion -from pype.lib import OpenPypeSettingsRegistry +from igniter.bootstrap_repos import OpenPypeVersion +from igniter.user_settings import OpenPypeSettingsRegistry @pytest.fixture def fix_bootstrap(tmp_path, pytestconfig): + """This will fix BoostrapRepos with temp paths.""" bs = BootstrapRepos() bs.live_repo_dir = pytestconfig.rootpath / 'repos' bs.data_dir = tmp_path return bs -def test_pype_version(): - v1 = PypeVersion(1, 2, 3) +def test_openpype_version(printer): + """Test determination of OpenPype versions.""" + v1 = OpenPypeVersion(1, 2, 3) assert str(v1) == "1.2.3" - v2 = PypeVersion(1, 2, 3, client="x") + v2 = OpenPypeVersion(1, 2, 3, prerelease="x") assert str(v2) == "1.2.3-x" - assert v1 < v2 + assert v1 > v2 - v3 = PypeVersion(1, 2, 3, variant="staging") - assert str(v3) == "1.2.3-staging" + v3 = OpenPypeVersion(1, 2, 3, staging=True) + assert str(v3) == "1.2.3+staging" - v4 = PypeVersion(1, 2, 3, variant="staging", client="client") - assert str(v4) == "1.2.3-client-staging" - assert v3 < v4 - assert v1 < v4 + v4 = OpenPypeVersion(1, 2, 3, staging="True", prerelease="rc.1") + assert str(v4) == "1.2.3-rc.1+staging" + assert v3 > v4 + assert v1 > v4 + assert v4 < OpenPypeVersion(1, 2, 3, prerelease="rc.1") - v5 = PypeVersion(1, 2, 3, variant="foo", client="x") - assert str(v5) == "1.2.3-x" + v5 = OpenPypeVersion(1, 2, 3, build="foo", prerelease="x") + assert str(v5) == "1.2.3-x+foo" assert v4 < v5 - v6 = PypeVersion(1, 2, 3, variant="foo") - assert str(v6) == "1.2.3" + v6 = OpenPypeVersion(1, 2, 3, prerelease="foo") + assert str(v6) == "1.2.3-foo" - v7 = PypeVersion(2, 0, 0) + v7 = OpenPypeVersion(2, 0, 0) assert v1 < v7 - v8 = PypeVersion(0, 1, 5) + v8 = OpenPypeVersion(0, 1, 5) assert v8 < v7 - v9 = PypeVersion(1, 2, 4) + v9 = OpenPypeVersion(1, 2, 4) assert v9 > v1 - v10 = PypeVersion(1, 2, 2) + v10 = OpenPypeVersion(1, 2, 2) assert v10 < v1 - v11 = PypeVersion(1, 2, 3, path=Path("/foo/bar")) + v11 = OpenPypeVersion(1, 2, 3, path=Path("/foo/bar")) assert v10 < v11 assert v5 == v2 sort_versions = [ - PypeVersion(3, 2, 1), - PypeVersion(1, 2, 3), - PypeVersion(0, 0, 1), - PypeVersion(4, 8, 10), - PypeVersion(4, 8, 20), - PypeVersion(4, 8, 9), - PypeVersion(1, 2, 3, variant="staging"), - PypeVersion(1, 2, 3, client="client") + OpenPypeVersion(3, 2, 1), + OpenPypeVersion(1, 2, 3), + OpenPypeVersion(0, 0, 1), + OpenPypeVersion(4, 8, 10), + OpenPypeVersion(4, 8, 20), + OpenPypeVersion(4, 8, 9), + OpenPypeVersion(1, 2, 3, staging=True), + OpenPypeVersion(1, 2, 3, build="foo") ] res = sorted(sort_versions) @@ -81,57 +85,51 @@ def test_pype_version(): str_versions = [ "5.5.1", - "5.5.2-client", - "5.5.3-client-strange", - "5.5.4-staging", - "5.5.5-staging-client", + "5.5.2-foo", + "5.5.3-foo+strange", + "5.5.4+staging", + "5.5.5+staging-client", "5.6.3", - "5.6.3-staging" + "5.6.3+staging" ] - res_versions = [] - for v in str_versions: - res_versions.append(PypeVersion(version=v)) - + res_versions = [OpenPypeVersion(version=v) for v in str_versions] sorted_res_versions = sorted(res_versions) assert str(sorted_res_versions[0]) == str_versions[0] assert str(sorted_res_versions[-1]) == str_versions[5] - with pytest.raises(ValueError): - _ = PypeVersion() + with pytest.raises(TypeError): + _ = OpenPypeVersion() with pytest.raises(ValueError): - _ = PypeVersion(major=1) + _ = OpenPypeVersion(version="booobaa") - with pytest.raises(ValueError): - _ = PypeVersion(version="booobaa") - - v11 = PypeVersion(version="4.6.7-client-staging") + v11 = OpenPypeVersion(version="4.6.7-foo+staging") assert v11.major == 4 assert v11.minor == 6 - assert v11.subversion == 7 - assert v11.variant == "staging" - assert v11.client == "client" + assert v11.patch == 7 + assert v11.staging is True + assert v11.prerelease == "foo" def test_get_main_version(): - ver = PypeVersion(1, 2, 3, variant="staging", client="foo") + ver = OpenPypeVersion(1, 2, 3, staging=True, prerelease="foo") assert ver.get_main_version() == "1.2.3" def test_get_version_path_from_list(): versions = [ - PypeVersion(1, 2, 3, path=Path('/foo/bar')), - PypeVersion(3, 4, 5, variant="staging", path=Path("/bar/baz")), - PypeVersion(6, 7, 8, client="x", path=Path("boo/goo")) + OpenPypeVersion(1, 2, 3, path=Path('/foo/bar')), + OpenPypeVersion(3, 4, 5, staging=True, path=Path("/bar/baz")), + OpenPypeVersion(6, 7, 8, prerelease="x", path=Path("boo/goo")) ] path = BootstrapRepos.get_version_path_from_list( - "3.4.5-staging", versions) + "3.4.5+staging", versions) assert path == Path("/bar/baz") -def test_search_string_for_pype_version(printer): +def test_search_string_for_openpype_version(printer): strings = [ ("3.0.1", True), ("foo-3.0", False), @@ -142,106 +140,112 @@ def test_search_string_for_pype_version(printer): ] for ver_string in strings: printer(f"testing {ver_string[0]} should be {ver_string[1]}") - assert PypeVersion.version_in_str(ver_string[0])[0] == ver_string[1] + assert OpenPypeVersion.version_in_str(ver_string[0])[0] == \ + ver_string[1] @pytest.mark.slow -def test_install_live_repos(fix_bootstrap, printer): - pype_version = fix_bootstrap.create_version_from_live_code() +def test_install_live_repos(fix_bootstrap, printer, monkeypatch, pytestconfig): + monkeypatch.setenv("OPENPYPE_ROOT", pytestconfig.rootpath.as_posix()) + monkeypatch.setenv("OPENPYPE_DATABASE_NAME", str(uuid4())) + openpype_version = fix_bootstrap.create_version_from_live_code() sep = os.path.sep expected_paths = [ - f"{pype_version.path}{sep}repos{sep}avalon-core", - f"{pype_version.path}{sep}repos{sep}avalon-unreal-integration", - f"{pype_version.path}" + f"{openpype_version.path}{sep}repos{sep}avalon-core", + f"{openpype_version.path}{sep}repos{sep}avalon-unreal-integration", + f"{openpype_version.path}" ] printer("testing zip creation") - assert os.path.exists(pype_version.path), "zip archive was not created" - fix_bootstrap.add_paths_from_archive(pype_version.path) + assert os.path.exists(openpype_version.path), "zip archive was not created" + fix_bootstrap.add_paths_from_archive(openpype_version.path) for ep in expected_paths: assert ep in sys.path, f"{ep} not set correctly" - printer("testing pype imported") - del sys.modules["pype"] - import pype # noqa: F401 + printer("testing openpype imported") + try: + del sys.modules["openpype"] + except KeyError: + # wasn't imported before + pass + import openpype # noqa: F401 - # test if pype is imported from specific location in zip - assert "pype" in sys.modules.keys(), "Pype not imported" - assert sys.modules["pype"].__file__ == \ - f"{pype_version.path}{sep}pype{sep}__init__.py" + # test if openpype is imported from specific location in zip + assert "openpype" in sys.modules.keys(), "OpenPype not imported" + assert sys.modules["openpype"].__file__ == \ + f"{openpype_version.path}{sep}openpype{sep}__init__.py" -def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): - - test_pype = namedtuple("Pype", "prefix version suffix type valid") +def test_find_openpype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): + test_openpype = namedtuple("OpenPype", "prefix version suffix type valid") test_versions_1 = [ - test_pype(prefix="foo-v", version="5.5.1", - suffix=".zip", type="zip", valid=False), - test_pype(prefix="bar-v", version="5.5.2-client", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="baz-v", version="5.5.3-client-strange", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="bum-v", version="5.5.4-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="zum-v", version="5.5.5-client-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="fam-v", version="5.6.3", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="5.6.3-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="fim-v", version="5.6.3", - suffix=".zip", type="zip", valid=False), - test_pype(prefix="foo-v", version="5.6.4", - suffix=".txt", type="txt", valid=False), - test_pype(prefix="foo-v", version="5.7.1", - suffix="", type="dir", valid=False), + test_openpype(prefix="foo-v", version="5.5.1", + suffix=".zip", type="zip", valid=False), + test_openpype(prefix="bar-v", version="5.5.2-rc.1", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="baz-v", version="5.5.3-foo-strange", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="bum-v", version="5.5.4+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="zum-v", version="5.5.5-foo+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="fam-v", version="5.6.3", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="5.6.3+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="fim-v", version="5.6.3", + suffix=".zip", type="zip", valid=False), + test_openpype(prefix="foo-v", version="5.6.4", + suffix=".txt", type="txt", valid=False), + test_openpype(prefix="foo-v", version="5.7.1", + suffix="", type="dir", valid=False), ] test_versions_2 = [ - test_pype(prefix="foo-v", version="10.0.0", - suffix=".txt", type="txt", valid=False), - test_pype(prefix="lom-v", version="7.2.6", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="bom-v", version="7.2.7-client", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="woo-v", version="7.2.8-client-strange", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="loo-v", version="7.2.10-client-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="kok-v", version="7.0.1", - suffix=".zip", type="zip", valid=True) + test_openpype(prefix="foo-v", version="10.0.0", + suffix=".txt", type="txt", valid=False), + test_openpype(prefix="lom-v", version="7.2.6", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="bom-v", version="7.2.7-rc.3", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="woo-v", version="7.2.8-foo-strange", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="loo-v", version="7.2.10-foo+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="kok-v", version="7.0.1", + suffix=".zip", type="zip", valid=True) ] test_versions_3 = [ - test_pype(prefix="foo-v", version="3.0.0", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="goo-v", version="3.0.1", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="hoo-v", version="4.1.0", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="4.1.2", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="3.0.1-client", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="3.0.1-client-strange", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="3.0.1-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="3.0.1-client-staging", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="foo-v", version="3.2.0", - suffix=".zip", type="zip", valid=True) + test_openpype(prefix="foo-v", version="3.0.0", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="goo-v", version="3.0.1", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="hoo-v", version="4.1.0", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="4.1.2", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="3.0.1-foo", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="3.0.1-foo-strange", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="3.0.1+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="3.0.1-foo+staging", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="foo-v", version="3.2.0", + suffix=".zip", type="zip", valid=True) ] test_versions_4 = [ - test_pype(prefix="foo-v", version="10.0.0", - suffix="", type="dir", valid=True), - test_pype(prefix="lom-v", version="11.2.6", - suffix=".zip", type="dir", valid=False), - test_pype(prefix="bom-v", version="7.2.7-client", - suffix=".zip", type="zip", valid=True), - test_pype(prefix="woo-v", version="7.2.8-client-strange", - suffix=".zip", type="txt", valid=False) + test_openpype(prefix="foo-v", version="10.0.0", + suffix="", type="dir", valid=True), + test_openpype(prefix="lom-v", version="11.2.6", + suffix=".zip", type="dir", valid=False), + test_openpype(prefix="bom-v", version="7.2.7-foo", + suffix=".zip", type="zip", valid=True), + test_openpype(prefix="woo-v", version="7.2.8-foo-strange", + suffix=".zip", type="txt", valid=False) ] def _create_invalid_zip(path: Path): @@ -251,7 +255,7 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): def _create_valid_zip(path: Path, version: str): with ZipFile(path, "w") as zf: zf.writestr( - "pype/version.py", f"__version__ = '{version}'\n\n") + "openpype/version.py", f"__version__ = '{version}'\n\n") def _create_invalid_dir(path: Path): path.mkdir(parents=True, exist_ok=True) @@ -259,9 +263,9 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): fp.write("invalid") def _create_valid_dir(path: Path, version: str): - pype_path = path / "pype" - version_path = pype_path / "version.py" - pype_path.mkdir(parents=True, exist_ok=True) + openpype_path = path / "openpype" + version_path = openpype_path / "version.py" + openpype_path.mkdir(parents=True, exist_ok=True) with open(version_path, "w") as fp: fp.write(f"__version__ = '{version}'\n\n") @@ -283,15 +287,15 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): with open(test_path, "w") as fp: fp.write("foo") - # in PYPE_PATH + # in OPENPYPE_PATH e_path = tmp_path_factory.mktemp("environ") # create files and directories for test for test_file in test_versions_1: _build_test_item(e_path, test_file) - # in pypePath registry - p_path = tmp_path_factory.mktemp("pypePath") + # in openPypePath registry + p_path = tmp_path_factory.mktemp("openPypePath") for test_file in test_versions_2: _build_test_item(p_path, test_file) @@ -310,10 +314,10 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): for test_file in test_versions_4: _build_test_item(dir_path, test_file) - printer("testing finding Pype in given path ...") - result = fix_bootstrap.find_pype(g_path, include_zips=True) + printer("testing finding OpenPype in given path ...") + result = fix_bootstrap.find_openpype(g_path, include_zips=True) # we should have results as file were created - assert result is not None, "no Pype version found" + assert result is not None, "no OpenPype version found" # latest item in `result` should be latest version found. expected_path = Path( g_path / "{}{}{}".format( @@ -323,13 +327,14 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): ) ) assert result, "nothing found" - assert result[-1].path == expected_path, "not a latest version of Pype 3" + assert result[-1].path == expected_path, ("not a latest version of " + "OpenPype 3") - monkeypatch.setenv("PYPE_PATH", e_path.as_posix()) + monkeypatch.setenv("OPENPYPE_PATH", e_path.as_posix()) - result = fix_bootstrap.find_pype(include_zips=True) + result = fix_bootstrap.find_openpype(include_zips=True) # we should have results as file were created - assert result is not None, "no Pype version found" + assert result is not None, "no OpenPype version found" # latest item in `result` should be latest version found. expected_path = Path( e_path / "{}{}{}".format( @@ -339,21 +344,23 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): ) ) assert result, "nothing found" - assert result[-1].path == expected_path, "not a latest version of Pype 1" + assert result[-1].path == expected_path, ("not a latest version of " + "OpenPype 1") - monkeypatch.delenv("PYPE_PATH", raising=False) + monkeypatch.delenv("OPENPYPE_PATH", raising=False) # mock appdirs user_data_dir def mock_user_data_dir(*args, **kwargs): + """Mock local app data dir.""" return d_path.as_posix() monkeypatch.setattr(appdirs, "user_data_dir", mock_user_data_dir) fix_bootstrap.registry = OpenPypeSettingsRegistry() - fix_bootstrap.registry.set_item("pypePath", d_path.as_posix()) + fix_bootstrap.registry.set_item("openPypePath", d_path.as_posix()) - result = fix_bootstrap.find_pype(include_zips=True) + result = fix_bootstrap.find_openpype(include_zips=True) # we should have results as file were created - assert result is not None, "no Pype version found" + assert result is not None, "no OpenPype version found" # latest item in `result` should be latest version found. expected_path = Path( d_path / "{}{}{}".format( @@ -363,10 +370,11 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): ) ) assert result, "nothing found" - assert result[-1].path == expected_path, "not a latest version of Pype 2" + assert result[-1].path == expected_path, ("not a latest version of " + "OpenPype 2") - result = fix_bootstrap.find_pype(e_path, include_zips=True) - assert result is not None, "no Pype version found" + result = fix_bootstrap.find_openpype(e_path, include_zips=True) + assert result is not None, "no OpenPype version found" expected_path = Path( e_path / "{}{}{}".format( test_versions_1[5].prefix, @@ -374,10 +382,11 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): test_versions_1[5].suffix ) ) - assert result[-1].path == expected_path, "not a latest version of Pype 1" + assert result[-1].path == expected_path, ("not a latest version of " + "OpenPype 1") - result = fix_bootstrap.find_pype(dir_path, include_zips=True) - assert result is not None, "no Pype versions found" + result = fix_bootstrap.find_openpype(dir_path, include_zips=True) + assert result is not None, "no OpenPype versions found" expected_path = Path( dir_path / "{}{}{}".format( test_versions_4[0].prefix, @@ -385,4 +394,5 @@ def test_find_pype(fix_bootstrap, tmp_path_factory, monkeypatch, printer): test_versions_4[0].suffix ) ) - assert result[-1].path == expected_path, "not a latest version of Pype 4" + assert result[-1].path == expected_path, ("not a latest version of " + "OpenPype 4") diff --git a/tests/pype/lib/test_user_settings.py b/tests/openpype/lib/test_user_settings.py similarity index 95% rename from tests/pype/lib/test_user_settings.py rename to tests/openpype/lib/test_user_settings.py index 02342abbc9..2c58e1f35a 100644 --- a/tests/pype/lib/test_user_settings.py +++ b/tests/openpype/lib/test_user_settings.py @@ -1,5 +1,7 @@ +# -*- coding: utf-8 -*- +"""Test suite for User Settings.""" import pytest -from pype.lib import ( +from igniter.user_settings import ( IniSettingRegistry, JSONSettingRegistry, OpenPypeSecureRegistry @@ -9,9 +11,9 @@ import configparser @pytest.fixture -def secure_registry(tmpdir): +def secure_registry(): name = "pypetest_{}".format(str(uuid4())) - r = OpenPypeSecureRegistry(name, tmpdir) + r = OpenPypeSecureRegistry(name) yield r diff --git a/tools/build.ps1 b/tools/build.ps1 index 412bb111c1..5c392c355c 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -70,17 +70,23 @@ function Install-Poetry() { Write-Host ">>> " -NoNewline -ForegroundColor Green Write-Host "Installing Poetry ... " (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - - # add it to PATH - $env:PATH = "$($env:PATH);$($env:USERPROFILE)\.poetry\bin" } $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ @@ -93,6 +99,14 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" @@ -121,48 +135,25 @@ catch { Exit-WithCode 1 } +Write-Host ">>> " -NoNewLine -ForegroundColor green +Write-Host "Making sure submodules are up-to-date ..." +git submodule update --init --recursive + Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building OpenPype [ " -NoNewline -ForegroundColor white +Write-Host "OpenPype [ " -NoNewline -ForegroundColor white Write-host $openpype_version -NoNewline -ForegroundColor green -Write-Host " ] ..." -ForegroundColor white - -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Detecting host Python ... " -NoNewline -if (-not (Get-Command "python" -ErrorAction SilentlyContinue)) { - Write-Host "!!! Python not detected" -ForegroundColor red - Exit-WithCode 1 -} -$version_command = @" -import sys -print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1])) -"@ - -$p = & python -c $version_command -$env:PYTHON_VERSION = $p -$m = $p -match '(\d+)\.(\d+)' -if(-not $m) { - Write-Host "!!! Cannot determine version" -ForegroundColor red - Exit-WithCode 1 -} -# We are supporting python 3.6 and up -if(($matches[1] -lt 3) -or ($matches[2] -lt 7)) { - Write-Host "FAILED Version [ $p ] is old and unsupported" -ForegroundColor red - Exit-WithCode 1 -} -Write-Host "OK [ $p ]" -ForegroundColor green - +Write-Host " ]" -ForegroundColor white Write-Host ">>> " -NoNewline -ForegroundColor Green Write-Host "Reading Poetry ... " -NoNewline -if (-not (Test-Path -PathType Container -Path "$($env:USERPROFILE)\.poetry\bin")) { +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { Write-Host "NOT FOUND" -ForegroundColor Yellow - Install-Poetry - - Write-Host "INSTALLED" -ForegroundColor Cyan + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" } else { Write-Host "OK" -ForegroundColor Green } -$env:PATH = "$($env:PATH);$($env:USERPROFILE)\.poetry\bin" Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Cleaning cache files ... " -NoNewline @@ -173,6 +164,7 @@ Write-Host "OK" -ForegroundColor green Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Building OpenPype ..." +$startTime = (Get-Date).Millisecond $out = & poetry run python setup.py build 2>&1 if ($LASTEXITCODE -ne 0) @@ -191,7 +183,8 @@ Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "restoring current directory" Set-Location -Path $current_dir +$endTime = (Get-Date).Millisecond Write-Host "*** " -NoNewline -ForegroundColor Cyan -Write-Host "All done. You will find OpenPype and build log in " -NoNewLine +Write-Host "All done in $($endTime - $startTime) secs. You will find OpenPype and build log in " -NoNewLine Write-Host "'.\build'" -NoNewline -ForegroundColor Green Write-Host " directory." diff --git a/tools/build.sh b/tools/build.sh index b95e2969c4..953d51bd81 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -6,11 +6,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -65,7 +73,7 @@ detect_python () { local version_command version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" local python_version - python_version="$(python3 <<< ${version_command})" + python_version="$(python <<< ${version_command})" oIFS="$IFS" IFS=. set -- $python_version @@ -77,7 +85,7 @@ detect_python () { echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" fi else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } + command -v python >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } fi } @@ -123,8 +131,7 @@ realpath () { install_poetry () { echo -e "${BIGreen}>>>${RST} Installing Poetry ..." command -v curl >/dev/null 2>&1 || { echo -e "${BIRed}!!!${RST}${BIYellow} Missing ${RST}${BIBlue}curl${BIYellow} command.${RST}"; return 1; } - curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - - export PATH="$PATH:$HOME/.poetry/bin" + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - } # Main @@ -139,7 +146,15 @@ main () { pushd "$openpype_root" > /dev/null || return > /dev/null version_command="import os;exec(open(os.path.join('$openpype_root', 'openpype', 'version.py')).read());print(__version__);" - openpype_version="$(python3 <<< ${version_command})" + openpype_version="$(python <<< ${version_command})" + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" echo -e "${BIYellow}---${RST} Cleaning build directory ..." rm -rf "$openpype_root/build" && mkdir "$openpype_root/build" > /dev/null @@ -149,17 +164,40 @@ main () { clean_pyc echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" - if [ -f "$HOME/.poetry/bin/poetry" ]; then + if [ -f "$POETRY_HOME/bin/poetry" ]; then echo -e "${BIGreen}OK${RST}" - export PATH="$PATH:$HOME/.poetry/bin" else echo -e "${BIYellow}NOT FOUND${RST}" - install_poetry || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } fi + echo -e "${BIGreen}>>>${RST} Making sure submodules are up-to-date ..." + git submodule update --init --recursive + echo -e "${BIGreen}>>>${RST} Building ..." - poetry run python3 "$openpype_root/setup.py" build > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } - poetry run python3 "$openpype_root/tools/build_dependencies.py" + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + poetry run python "$openpype_root/setup.py" build > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } + elif [[ "$OSTYPE" == "darwin"* ]]; then + poetry run python "$openpype_root/setup.py" bdist_mac > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } + fi + poetry run python "$openpype_root/tools/build_dependencies.py" + + if [[ "$OSTYPE" == "darwin"* ]]; then + # fix code signing issue + codesign --remove-signature "$openpype_root/build/OpenPype.app/Contents/MacOS/lib/Python" + if command -v create-dmg > /dev/null 2>&1; then + create-dmg \ + --volname "OpenPype Installer" \ + --window-pos 200 120 \ + --window-size 600 300 \ + --app-drop-link 100 50 \ + "$openpype_root/build/OpenPype-Installer.dmg" \ + "$openpype_root/build/OpenPype.app" + else + echo -e "${BIYellow}!!!${RST} ${BIWhite}create-dmg${RST} command is not available." + fi + fi echo -e "${BICyan}>>>${RST} All done. You will find OpenPype and build log in \c" echo -e "${BIWhite}$openpype_root/build${RST} directory." diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py index e49e930a70..fb52e2b5fd 100644 --- a/tools/build_dependencies.py +++ b/tools/build_dependencies.py @@ -22,6 +22,7 @@ import os import sys import site from distutils.util import get_platform +import platform from pathlib import Path import shutil import blessed @@ -76,7 +77,14 @@ _print(f"Working with: {site_pkg}", 2) build_dir = "exe.{}-{}".format(get_platform(), sys.version[0:3]) # create full path -build_dir = Path(os.path.dirname(__file__)).parent / "build" / build_dir +if platform.system().lower() == "darwin": + build_dir = Path(os.path.dirname(__file__)).parent.joinpath( + "build", + "OpenPype.app", + "Contents", + "MacOS") +else: + build_dir = Path(os.path.dirname(__file__)).parent / "build" / build_dir _print(f"Using build at {build_dir}", 2) if not build_dir.exists(): diff --git a/tools/build_win_installer.ps1 b/tools/build_win_installer.ps1 new file mode 100644 index 0000000000..4a4d011258 --- /dev/null +++ b/tools/build_win_installer.ps1 @@ -0,0 +1,140 @@ +<# +.SYNOPSIS + Helper script to build OpenPype. + +.DESCRIPTION + This script will detect Python installation, and build OpenPype to `build` + directory using existing virtual environment created by Poetry (or + by running `/tools/create_venv.ps1`). It will then shuffle dependencies in + build folder to optimize for different Python versions (2/3) in Python host. + +.EXAMPLE + +PS> .\build.ps1 + +#> + +function Start-Progress { + param([ScriptBlock]$code) + $scroll = "/-\|/-\|" + $idx = 0 + $job = Invoke-Command -ComputerName $env:ComputerName -ScriptBlock { $code } -AsJob + + $origpos = $host.UI.RawUI.CursorPosition + + # $origpos.Y -= 1 + + while (($job.State -eq "Running") -and ($job.State -ne "NotStarted")) + { + $host.UI.RawUI.CursorPosition = $origpos + Write-Host $scroll[$idx] -NoNewline + $idx++ + if ($idx -ge $scroll.Length) + { + $idx = 0 + } + Start-Sleep -Milliseconds 100 + } + # It's over - clear the activity indicator. + $host.UI.RawUI.CursorPosition = $origpos + Write-Host ' ' + <# + .SYNOPSIS + Display spinner for running job + .PARAMETER code + Job to display spinner for + #> +} + + +function Exit-WithCode($exitcode) { + # Only exit this host process if it's a child of another PowerShell parent process... + $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId + $parentProcName = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$parentPID" | Select-Object -Property Name).Name + if ('powershell.exe' -eq $parentProcName) { $host.SetShouldExit($exitcode) } + + exit $exitcode +} + +function Show-PSWarning() { + if ($PSVersionTable.PSVersion.Major -lt 7) { + Write-Host "!!! " -NoNewline -ForegroundColor Red + Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" + Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray + Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Exit-WithCode 1 + } +} + +function Install-Poetry() { + Write-Host ">>> " -NoNewline -ForegroundColor Green + Write-Host "Installing Poetry ... " + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - + # add it to PATH + $env:PATH = "$($env:PATH);$($env:USERPROFILE)\.poetry\bin" +} + +$art = @" + +▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ +▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ +▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ + .---= [ by Pype Club ] =---. + https://openpype.io + +"@ + +Write-Host $art -ForegroundColor DarkGreen + +# Enable if PS 7.x is needed. +# Show-PSWarning + +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +Set-Location -Path $openpype_root + +$version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" +$result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') +$openpype_version = $result[0].Groups['version'].Value +if (-not $openpype_version) { + Write-Host "!!! " -ForegroundColor yellow -NoNewline + Write-Host "Cannot determine OpenPype version." + Exit-WithCode 1 +} +$env:BUILD_VERSION = $openpype_version + +iscc + +Write-Host ">>> " -NoNewline -ForegroundColor green +Write-Host "Creating OpenPype installer ... " -ForegroundColor white + +$build_dir_command = @" +import sys +from distutils.util import get_platform +print('exe.{}-{}'.format(get_platform(), sys.version[0:3])) +"@ + +$build_dir = & python -c $build_dir_command +Write-Host "Build directory ... ${build_dir}" -ForegroundColor white +$env:BUILD_DIR = $build_dir + +if (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError) +{ + iscc "$openpype_root\inno_setup.iss" +}else { + Write-Host "!!! Cannot find Inno Setup command" -ForegroundColor red + Write-Host "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red + Exit-WithCode 1 +} + + +Write-Host ">>> " -NoNewline -ForegroundColor green +Write-Host "restoring current directory" +Set-Location -Path $current_dir + +Write-Host "*** " -NoNewline -ForegroundColor Cyan +Write-Host "All done. You will find OpenPype installer in " -NoNewLine +Write-Host "'.\build'" -NoNewline -ForegroundColor Green +Write-Host " directory." diff --git a/tools/create_env.ps1 b/tools/create_env.ps1 index 44e1799be8..5600ae71c7 100644 --- a/tools/create_env.ps1 +++ b/tools/create_env.ps1 @@ -43,9 +43,10 @@ function Show-PSWarning() { function Install-Poetry() { Write-Host ">>> " -NoNewline -ForegroundColor Green Write-Host "Installing Poetry ... " + $env:POETRY_HOME="$openpype_root\.poetry" (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - # add it to PATH - $env:PATH = "$($env:PATH);$($env:USERPROFILE)\.poetry\bin" + $env:PATH = "$($env:PATH);$openpype_root\.poetry\bin" } @@ -84,19 +85,36 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · + "@ - -Write-Host $art -ForegroundColor DarkGreen +if (-not (Test-Path 'env:_INSIDE_OPENPYPE_TOOL')) { + Write-Host $art -ForegroundColor DarkGreen +} # Enable if PS 7.x is needed. # Show-PSWarning @@ -118,7 +136,7 @@ Test-Python Write-Host ">>> " -NoNewline -ForegroundColor Green Write-Host "Reading Poetry ... " -NoNewline -if (-not (Test-Path -PathType Container -Path "$($env:USERPROFILE)\.poetry\bin")) { +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { Write-Host "NOT FOUND" -ForegroundColor Yellow Install-Poetry Write-Host "INSTALLED" -ForegroundColor Cyan @@ -133,7 +151,7 @@ if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\poetry.lock")) { Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Installing virtual environment from lock." } -& poetry install $poetry_verbosity +& poetry install --no-root $poetry_verbosity if ($LASTEXITCODE -ne 0) { Write-Host "!!! " -ForegroundColor yellow -NoNewline Write-Host "Poetry command failed." diff --git a/tools/create_env.sh b/tools/create_env.sh index 7bdb8503fd..d6a6828718 100755 --- a/tools/create_env.sh +++ b/tools/create_env.sh @@ -7,11 +7,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -81,7 +89,7 @@ done detect_python () { echo -e "${BIGreen}>>>${RST} Using python \c" local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" - local python_version="$(python3 <<< ${version_command})" + local python_version="$(python <<< ${version_command})" oIFS="$IFS" IFS=. set -- $python_version @@ -93,15 +101,16 @@ detect_python () { echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" fi else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } + command -v python >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } fi } install_poetry () { echo -e "${BIGreen}>>>${RST} Installing Poetry ..." + export POETRY_HOME="$openpype_root/.poetry" command -v curl >/dev/null 2>&1 || { echo -e "${BIRed}!!!${RST}${BIYellow} Missing ${RST}${BIBlue}curl${BIYellow} command.${RST}"; return 1; } - curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - - export PATH="$PATH:$HOME/.poetry/bin" + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - + export PATH="$PATH:$POETRY_HOME/bin" } ############################################################################## @@ -136,19 +145,27 @@ realpath () { main () { # Main - echo -e "${BGreen}" - art - echo -e "${RST}" + if [[ -z $_inside_openpype_tool ]]; then + echo -e "${BGreen}" + art + echo -e "${RST}" + fi detect_python || return 1 # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + + pushd "$openpype_root" > /dev/null || return > /dev/null echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" - if [ -f "$HOME/.poetry/bin/poetry" ]; then + if [ -f "$POETRY_HOME/bin/poetry" ]; then echo -e "${BIGreen}OK${RST}" - export PATH="$PATH:$HOME/.poetry/bin" else echo -e "${BIYellow}NOT FOUND${RST}" install_poetry || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } @@ -160,7 +177,7 @@ main () { echo -e "${BIGreen}>>>${RST} Installing dependencies ..." fi - poetry install $poetry_verbosity || { echo -e "${BIRed}!!!${RST} Poetry environment installation failed"; return; } + poetry install --no-root $poetry_verbosity || { echo -e "${BIRed}!!!${RST} Poetry environment installation failed"; return; } echo -e "${BIGreen}>>>${RST} Cleaning cache files ..." clean_pyc @@ -169,7 +186,7 @@ main () { # cx_freeze will crash on missing __pychache__ on these but # reinstalling them solves the problem. echo -e "${BIGreen}>>>${RST} Fixing pycache bug ..." - poetry run python -m pip install --upgrade pip + poetry run python -m pip install --force-reinstall pip poetry run pip install --force-reinstall setuptools poetry run pip install --force-reinstall wheel poetry run python -m pip install --force-reinstall pip diff --git a/tools/create_zip.ps1 b/tools/create_zip.ps1 index d18806c40b..2fef4d216b 100644 --- a/tools/create_zip.ps1 +++ b/tools/create_zip.ps1 @@ -37,15 +37,32 @@ function Show-PSWarning() { $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ @@ -63,35 +80,27 @@ if (-not $openpype_version) { Exit-WithCode 1 } -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Detecting host Python ... " -NoNewline -if (-not (Get-Command "python" -ErrorAction SilentlyContinue)) { - Write-Host "!!! Python not detected" -ForegroundColor red - Exit-WithCode 1 +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green } -$version_command = @' -import sys -print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1])) -'@ -$p = & python -c $version_command -$env:PYTHON_VERSION = $p -$m = $p -match '(\d+)\.(\d+)' -if(-not $m) { - Write-Host "!!! Cannot determine version" -ForegroundColor red - Exit-WithCode 1 -} -# We are supporting python 3.6 and up -if(($matches[1] -lt 3) -or ($matches[2] -lt 7)) { - Write-Host "FAILED Version [ $p ] is old and unsupported" -ForegroundColor red - Exit-WithCode 1 -} -Write-Host "OK [ $p ]" -ForegroundColor green +Write-Host ">>> " -NoNewline -ForegroundColor green +Write-Host "Cleaning cache files ... " -NoNewline +Get-ChildItem $openpype_root -Filter "*.pyc" -Force -Recurse | Remove-Item -Force +Get-ChildItem $openpype_root -Filter "*.pyo" -Force -Recurse | Remove-Item -Force +Get-ChildItem $openpype_root -Filter "__pycache__" -Force -Recurse | Remove-Item -Force -Recurse +Write-Host "OK" -ForegroundColor green Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Generating zip from current sources ..." -Write-Host "... " -NoNewline -ForegroundColor Magenta -Write-Host "arguments: " -NoNewline -ForegroundColor Gray -Write-Host $ARGS -ForegroundColor White -& poetry run python "$($openpype_root)\start.py" generate-zip $ARGS -Set-Location -Path $current_dir \ No newline at end of file +$env:PYTHONPATH="$($openpype_root);$($env:PYTHONPATH)" +$env:OPENPYPE_ROOT="$($openpype_root)" +& poetry run python "$($openpype_root)\tools\create_zip.py" $ARGS +Set-Location -Path $current_dir diff --git a/tools/create_zip.py b/tools/create_zip.py new file mode 100644 index 0000000000..32a4d27e8b --- /dev/null +++ b/tools/create_zip.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +"""Create OpenPype version from live sources.""" +from igniter import bootstrap_repos +import click +import enlighten +import blessed +from pathlib2 import Path + + +term = blessed.Terminal() +manager = enlighten.get_manager() +last_increment = 0 + + +@click.group(invoke_without_command=True) +@click.option("--path", required=False, + help="path where to put version", + type=click.Path(exists=True)) +def main(path): + # create zip file + + progress_bar = enlighten.Counter( + total=100, desc="OpenPype ZIP", units="%", color="green") + + def progress(inc: int): + """Progress handler.""" + global last_increment + progress_bar.update(incr=inc - last_increment) + last_increment = inc + + bs = bootstrap_repos.BootstrapRepos(progress_callback=progress) + if path: + out_path = Path(path) + bs.data_dir = out_path.parent + + _print(f"Creating zip in {bs.data_dir} ...") + repo_file = bs.create_version_from_live_code() + if not repo_file: + _print("Error while creating zip file.", 1) + exit(1) + + _print(f"Created {repo_file}") + + +def _print(msg: str, message_type: int = 0) -> None: + """Print message to console. + + Args: + msg (str): message to print + message_type (int): type of message (0 info, 1 error, 2 note) + + """ + if message_type == 0: + header = term.aquamarine3(">>> ") + elif message_type == 1: + header = term.orangered2("!!! ") + elif message_type == 2: + header = term.tan1("... ") + else: + header = term.darkolivegreen3("--- ") + + print("{}{}".format(header, msg)) + + +if __name__ == "__main__": + main() diff --git a/tools/create_zip.sh b/tools/create_zip.sh index 6e7f792f1d..adaf9431a7 100755 --- a/tools/create_zip.sh +++ b/tools/create_zip.sh @@ -8,11 +8,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -120,10 +128,30 @@ main () { # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + pushd "$openpype_root" > /dev/null || return > /dev/null + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + echo -e "${BIGreen}>>>${RST} Generating zip from current sources ..." - poetry run python3 "$openpype_root/start.py" generate-zip "$@" + PYTHONPATH="$openpype_root:$PYTHONPATH" + OPENPYPE_ROOT="$openpype_root" + poetry run python3 "$openpype_root/tools/create_zip.py" "$@" } main "$@" diff --git a/tools/docker_build.sh b/tools/docker_build.sh new file mode 100755 index 0000000000..7600fe044b --- /dev/null +++ b/tools/docker_build.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# Colors for terminal + +RST='\033[0m' # Text Reset +BIGreen='\033[1;92m' # Green +BIYellow='\033[1;93m' # Yellow +BIRed='\033[1;91m' # Red + +############################################################################## +# Return absolute path +# Globals: +# None +# Arguments: +# Path to resolve +# Returns: +# None +############################################################################### +realpath () { + echo $(cd $(dirname "$1"); pwd)/$(basename "$1") +} + +# Main +main () { + openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + pushd "$openpype_root" > /dev/null || return > /dev/null + + echo -e "${BIYellow}---${RST} Cleaning build directory ..." + rm -rf "$openpype_root/build" && mkdir "$openpype_root/build" > /dev/null + + version_command="import os;exec(open(os.path.join('$openpype_root', 'openpype', 'version.py')).read());print(__version__);" + openpype_version="$(python3 <<< ${version_command})" + + echo -e "${BIGreen}>>>${RST} Running docker build ..." + docker build --pull --no-cache -t pypeclub/openpype:$openpype_version . + if [ $? -ne 0 ] ; then + echo -e "${BIRed}!!!${RST} Docker build failed." + return 1 + fi + + echo -e "${BIGreen}>>>${RST} Copying build from container ..." + echo -e "${BIYellow}---${RST} Creating container from pypeclub/openpype:$openpype_version ..." + id="$(docker create -ti pypeclub/openpype:$openpype_version bash)" + if [ $? -ne 0 ] ; then + echo -e "${BIRed}!!!${RST} Cannot create just built container." + return 1 + fi + echo -e "${BIYellow}---${RST} Copying ..." + docker cp "$id:/opt/openpype/build/exe.linux-x86_64-3.7" "$openpype_root/build" + if [ $? -ne 0 ] ; then + echo -e "${BIRed}!!!${RST} Copying failed." + return 1 + fi + + echo -e "${BIGreen}>>>${RST} Fixing user ownership ..." + username="$(logname)" + chown -R $username ./build + + echo -e "${BIGreen}>>>${RST} All done, you can delete container:" + echo -e "${BIYellow}$id${RST}" +} + +return_code=0 +main || return_code=$? +exit $return_code diff --git a/tools/fetch_thirdparty_libs.ps1 b/tools/fetch_thirdparty_libs.ps1 index d1b914fac2..23f0b50c7a 100644 --- a/tools/fetch_thirdparty_libs.ps1 +++ b/tools/fetch_thirdparty_libs.ps1 @@ -14,7 +14,28 @@ PS> .\fetch_thirdparty_libs.ps1 $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + & poetry run python "$($openpype_root)\tools\fetch_thirdparty_libs.py" Set-Location -Path $current_dir diff --git a/tools/fetch_thirdparty_libs.sh b/tools/fetch_thirdparty_libs.sh index e305b4b3e4..31f109ba68 100755 --- a/tools/fetch_thirdparty_libs.sh +++ b/tools/fetch_thirdparty_libs.sh @@ -5,13 +5,20 @@ art () { cat <<-EOF - ____________ - /\\ ___ \\ - \\ \\ \\/_\\ \\ - \\ \\ _____/ ______ ___ ___ ___ - \\ \\ \\___/ /\\ \\ \\ \\\\ \\\\ \\ - \\ \\____\\ \\ \\_____\\ \\__\\\\__\\\\__\\ - \\/____/ \\/_____/ . PYPE Club . + + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -51,53 +58,6 @@ BICyan='\033[1;96m' # Cyan BIWhite='\033[1;97m' # White -############################################################################## -# Detect required version of python -# Globals: -# colors -# PYTHON -# Arguments: -# None -# Returns: -# None -############################################################################### -detect_python () { - echo -e "${BIGreen}>>>${RST} Using python \c" - local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" - local python_version="$(python3 <<< ${version_command})" - oIFS="$IFS" - IFS=. - set -- $python_version - IFS="$oIFS" - if [ "$1" -ge "3" ] && [ "$2" -ge "6" ] ; then - if [ "$2" -gt "7" ] ; then - echo -e "${BIWhite}[${RST} ${BIRed}$1.$2 ${BIWhite}]${RST} - ${BIRed}FAILED${RST} ${BIYellow}Version is new and unsupported, use${RST} ${BIPurple}3.7.x${RST}"; return 1; - else - echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" - fi - PYTHON="python3" - else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } - fi -} - -############################################################################## -# Clean pyc files in specified directory -# Globals: -# None -# Arguments: -# Optional path to clean -# Returns: -# None -############################################################################### -clean_pyc () { - local path - path=$pype_root - echo -e "${BIGreen}>>>${RST} Cleaning pyc at [ ${BIWhite}$path${RST} ] ... \c" - find "$path" -regex '^.*\(__pycache__\|\.py[co]\)$' -delete - echo -e "${BIGreen}DONE${RST}" -} - ############################################################################## # Return absolute path # Globals: @@ -116,14 +76,31 @@ main () { echo -e "${BGreen}" art echo -e "${RST}" - detect_python || return 1 # Directories - pype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) - pushd "$pype_root" > /dev/null || return > /dev/null + openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + + pushd "$openpype_root" > /dev/null || return > /dev/null echo -e "${BIGreen}>>>${RST} Running Pype tool ..." - poetry run python3 "$pype_root/tools/fetch_thirdparty_libs.py" + poetry run python "$openpype_root/tools/fetch_thirdparty_libs.py" } main \ No newline at end of file diff --git a/tools/make_docs.ps1 b/tools/make_docs.ps1 index aa526bbdc9..01edaf9c58 100644 --- a/tools/make_docs.ps1 +++ b/tools/make_docs.ps1 @@ -16,22 +16,49 @@ PS> .\make_docs.ps1 $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root $art = @" - -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ Write-Host $art -ForegroundColor DarkGreen +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + Write-Host "This will not overwrite existing source rst files, only scan and add new." Set-Location -Path $openpype_root Write-Host ">>> " -NoNewline -ForegroundColor green diff --git a/tools/make_docs.sh b/tools/make_docs.sh index 2ac12d3d95..9dfab26a38 100755 --- a/tools/make_docs.sh +++ b/tools/make_docs.sh @@ -7,11 +7,20 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · + EOF } @@ -71,6 +80,24 @@ main () { # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + pushd "$openpype_root" > /dev/null || return > /dev/null echo -e "${BIGreen}>>>${RST} Running apidoc ..." diff --git a/tools/run_documentation.ps1 b/tools/run_documentation.ps1 new file mode 100644 index 0000000000..1be3709642 --- /dev/null +++ b/tools/run_documentation.ps1 @@ -0,0 +1,32 @@ +<# +.SYNOPSIS + Helper script to run mongodb. + +.DESCRIPTION + This script will detect mongodb, add it to the PATH and launch it on specified port and db location. + +.EXAMPLE + +PS> .\run_mongo.ps1 + +#> + +$art = @" + +▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ +▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ +▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ + .---= [ by Pype Club ] =---. + https://openpype.io + +"@ + +Write-Host $art -ForegroundColor DarkGreen + +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +cd $openpype_root/website + +yarn run start + diff --git a/tools/run_mongo.ps1 b/tools/run_mongo.ps1 index 7e43a355e5..05fc497d34 100644 --- a/tools/run_mongo.ps1 +++ b/tools/run_mongo.ps1 @@ -13,11 +13,19 @@ PS> .\run_mongo.ps1 $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ diff --git a/tools/run_mongo.sh b/tools/run_mongo.sh index 1c788abcaf..7b512e67ab 100755 --- a/tools/run_mongo.sh +++ b/tools/run_mongo.sh @@ -7,11 +7,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -82,3 +90,4 @@ main () { echo -e "${BIGreen}>>>${RST} Detached to background." } +main diff --git a/tools/run_settings.ps1 b/tools/run_settings.ps1 index 3f99de4b4e..7477e546b3 100644 --- a/tools/run_settings.ps1 +++ b/tools/run_settings.ps1 @@ -14,6 +14,27 @@ PS> .\run_settings.ps1 $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root + +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + & poetry run python "$($openpype_root)\start.py" settings --dev Set-Location -Path $current_dir \ No newline at end of file diff --git a/tools/run_settings.sh b/tools/run_settings.sh index 0c8a951d7c..0287043bb6 100755 --- a/tools/run_settings.sh +++ b/tools/run_settings.sh @@ -6,11 +6,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -50,52 +58,6 @@ BICyan='\033[1;96m' # Cyan BIWhite='\033[1;97m' # White -############################################################################## -# Detect required version of python -# Globals: -# colors -# PYTHON -# Arguments: -# None -# Returns: -# None -############################################################################### -detect_python () { - echo -e "${BIGreen}>>>${RST} Using python \c" - local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" - local python_version="$(python3 <<< ${version_command})" - oIFS="$IFS" - IFS=. - set -- $python_version - IFS="$oIFS" - if [ "$1" -ge "3" ] && [ "$2" -ge "6" ] ; then - if [ "$2" -gt "7" ] ; then - echo -e "${BIWhite}[${RST} ${BIRed}$1.$2 ${BIWhite}]${RST} - ${BIRed}FAILED${RST} ${BIYellow}Version is new and unsupported, use${RST} ${BIPurple}3.7.x${RST}"; return 1; - else - echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" - fi - else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } - fi -} - -############################################################################## -# Clean pyc files in specified directory -# Globals: -# None -# Arguments: -# Optional path to clean -# Returns: -# None -############################################################################### -clean_pyc () { - local path - path=$oepnpype_root - echo -e "${BIGreen}>>>${RST} Cleaning pyc at [ ${BIWhite}$path${RST} ] ... \c" - find "$path" -regex '^.*\(__pycache__\|\.py[co]\)$' -delete - echo -e "${BIGreen}DONE${RST}" -} - ############################################################################## # Return absolute path # Globals: @@ -111,15 +73,29 @@ realpath () { # Main main () { - echo -e "${BGreen}" - art - echo -e "${RST}" - detect_python || return 1 # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + pushd "$openpype_root" > /dev/null || return > /dev/null + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + echo -e "${BIGreen}>>>${RST} Generating zip from current sources ..." poetry run python3 "$openpype_root/start.py" settings --dev } diff --git a/tools/run_tests.ps1 b/tools/run_tests.ps1 index 5070591c02..7b9a5c841d 100644 --- a/tools/run_tests.ps1 +++ b/tools/run_tests.ps1 @@ -32,11 +32,19 @@ function Show-PSWarning() { $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ @@ -49,6 +57,14 @@ $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root $version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" @@ -61,34 +77,20 @@ if (-not $openpype_version) { } Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Building OpenPype [ " -NoNewline -ForegroundColor white +Write-Host "OpenPype [ " -NoNewline -ForegroundColor white Write-host $openpype_version -NoNewline -ForegroundColor green Write-Host " ] ..." -ForegroundColor white -Write-Host ">>> " -NoNewline -ForegroundColor green -Write-Host "Detecting host Python ... " -NoNewline -if (-not (Get-Command "python" -ErrorAction SilentlyContinue)) { - Write-Host "!!! Python not detected" -ForegroundColor red - Exit-WithCode 1 +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green } -$version_command = @" -import sys -print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1])) -"@ - -$p = & python -c $version_command -$env:PYTHON_VERSION = $p -$m = $p -match '(\d+)\.(\d+)' -if(-not $m) { - Write-Host "!!! Cannot determine version" -ForegroundColor red - Exit-WithCode 1 -} -# We are supporting python 3.6 and up -if(($matches[1] -lt 3) -or ($matches[2] -lt 7)) { - Write-Host "FAILED Version [ $p ] is old and unsupported" -ForegroundColor red - Exit-WithCode 1 -} -Write-Host "OK [ $p ]" -ForegroundColor green Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Cleaning cache files ... " -NoNewline diff --git a/tools/run_tests.sh b/tools/run_tests.sh index 0af052ca01..3620ebc0e5 100755 --- a/tools/run_tests.sh +++ b/tools/run_tests.sh @@ -6,11 +6,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } @@ -49,32 +57,6 @@ BIPurple='\033[1;95m' # Purple BICyan='\033[1;96m' # Cyan BIWhite='\033[1;97m' # White - -############################################################################## -# Detect required version of python -# Globals: -# colors -# PYTHON -# Arguments: -# None -# Returns: -# None -############################################################################### -detect_python () { - echo -e "${BIGreen}>>>${RST} Using python \c" - local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" - local python_version="$(python3 <<< ${version_command})" - oIFS="$IFS" - IFS=. - set -- $python_version - IFS="$oIFS" - if [ "$1" -ge "3" ] && [ "$2" -ge "6" ] ; then - echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" - else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}FAILED${RST} ${BIYellow} Version [${RST}${BICyan}$1.$2${RST}]${BIYellow} is old and unsupported${RST}"; return 1; } - fi -} - ############################################################################## # Clean pyc files in specified directory # Globals: @@ -110,10 +92,27 @@ main () { echo -e "${BGreen}" art echo -e "${RST}" - detect_python || return 1 # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + pushd "$openpype_root" || return > /dev/null echo -e "${BIGreen}>>>${RST} Testing OpenPype ..." @@ -123,5 +122,4 @@ main () { PYTHONPATH=$original_pythonpath } - - +main diff --git a/tools/run_tray.ps1 b/tools/run_tray.ps1 index 9485584c6f..533a791836 100644 --- a/tools/run_tray.ps1 +++ b/tools/run_tray.ps1 @@ -13,7 +13,27 @@ PS> .\run_tray.ps1 $current_dir = Get-Location $script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent $openpype_root = (Get-Item $script_dir).parent.FullName + +$env:_INSIDE_OPENPYPE_TOOL = "1" + +# make sure Poetry is in PATH +if (-not (Test-Path 'env:POETRY_HOME')) { + $env:POETRY_HOME = "$openpype_root\.poetry" +} +$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin" + Set-Location -Path $openpype_root +Write-Host ">>> " -NoNewline -ForegroundColor Green +Write-Host "Reading Poetry ... " -NoNewline +if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) { + Write-Host "NOT FOUND" -ForegroundColor Yellow + Write-Host "*** " -NoNewline -ForegroundColor Yellow + Write-Host "We need to install Poetry create virtual env first ..." + & "$openpype_root\tools\create_env.ps1" +} else { + Write-Host "OK" -ForegroundColor Green +} + & poetry run python "$($openpype_root)\start.py" tray --debug Set-Location -Path $current_dir \ No newline at end of file diff --git a/tools/run_tray.sh b/tools/run_tray.sh index 8174f7e38a..339ff6f918 100755 --- a/tools/run_tray.sh +++ b/tools/run_tray.sh @@ -1,20 +1,6 @@ #!/usr/bin/env bash - # Run OpenPype Tray - -art () { - cat <<-EOF - -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io - -EOF -} - # Colors for terminal RST='\033[0m' # Text Reset @@ -50,53 +36,6 @@ BICyan='\033[1;96m' # Cyan BIWhite='\033[1;97m' # White -############################################################################## -# Detect required version of python -# Globals: -# colors -# PYTHON -# Arguments: -# None -# Returns: -# None -############################################################################### -detect_python () { - echo -e "${BIGreen}>>>${RST} Using python \c" - local version_command="import sys;print('{0}.{1}'.format(sys.version_info[0], sys.version_info[1]))" - local python_version="$(python3 <<< ${version_command})" - oIFS="$IFS" - IFS=. - set -- $python_version - IFS="$oIFS" - if [ "$1" -ge "3" ] && [ "$2" -ge "6" ] ; then - if [ "$2" -gt "7" ] ; then - echo -e "${BIWhite}[${RST} ${BIRed}$1.$2 ${BIWhite}]${RST} - ${BIRed}FAILED${RST} ${BIYellow}Version is new and unsupported, use${RST} ${BIPurple}3.7.x${RST}"; return 1; - else - echo -e "${BIWhite}[${RST} ${BIGreen}$1.$2${RST} ${BIWhite}]${RST}" - fi - PYTHON="python3" - else - command -v python3 >/dev/null 2>&1 || { echo -e "${BIRed}$1.$2$ - ${BIRed}FAILED${RST} ${BIYellow}Version is old and unsupported${RST}"; return 1; } - fi -} - -############################################################################## -# Clean pyc files in specified directory -# Globals: -# None -# Arguments: -# Optional path to clean -# Returns: -# None -############################################################################### -clean_pyc () { - local path - path=$openpype_root - echo -e "${BIGreen}>>>${RST} Cleaning pyc at [ ${BIWhite}$path${RST} ] ... \c" - find "$path" -regex '^.*\(__pycache__\|\.py[co]\)$' -delete - echo -e "${BIGreen}DONE${RST}" -} - ############################################################################## # Return absolute path # Globals: @@ -112,13 +51,26 @@ realpath () { # Main main () { - echo -e "${BGreen}" - art - echo -e "${RST}" - detect_python || return 1 - # Directories openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}"))) + + _inside_openpype_tool="1" + + # make sure Poetry is in PATH + if [[ -z $POETRY_HOME ]]; then + export POETRY_HOME="$openpype_root/.poetry" + fi + export PATH="$POETRY_HOME/bin:$PATH" + + echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c" + if [ -f "$POETRY_HOME/bin/poetry" ]; then + echo -e "${BIGreen}OK${RST}" + else + echo -e "${BIYellow}NOT FOUND${RST}" + echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..." + . "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } + fi + pushd "$openpype_root" > /dev/null || return > /dev/null echo -e "${BIGreen}>>>${RST} Running OpenPype Tray with debug option ..." diff --git a/tools/update_submodules.ps1 b/tools/update_submodules.ps1 index d0f93d9f7e..8ecc278510 100644 --- a/tools/update_submodules.ps1 +++ b/tools/update_submodules.ps1 @@ -10,11 +10,19 @@ PS> .\update_submodules.ps1 $art = @" -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. - https://openpype.io + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · "@ diff --git a/tools/update_submodules.sh b/tools/update_submodules.sh index 465827bfbb..49a3d08afb 100644 --- a/tools/update_submodules.sh +++ b/tools/update_submodules.sh @@ -6,10 +6,19 @@ art () { cat <<-EOF -▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ -▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ -▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ - .---= [ by Pype Club ] =---. + . . .. . .. + _oOOP3OPP3Op_. . + .PPpo~· ·· ~2p. ·· ···· · · + ·Ppo · .pPO3Op.· · O:· · · · + .3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · · + ·~OP 3PO· .Op3 : · ·· _____ _____ _____ + ·P3O · oP3oP3O3P' · · · · / /·/ /·/ / + O3:· O3p~ · ·:· · ·/____/·/____/ /____/ + 'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · · + · ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · · + · '_ .. · . _OP3·· · ·https://openpype.io·· · + ~P3·OPPPO3OP~ · ·· · + · ' '· · ·· · · · ·· · EOF } diff --git a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py index d1287dd213..5e64605271 100644 --- a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py +++ b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py @@ -60,7 +60,7 @@ def inject_openpype_environment(deadlinePlugin): with open(export_url) as fp: contents = json.load(fp) for key, value in contents.items(): - deadlinePlugin.SetEnvironmentVariable(key, value) + deadlinePlugin.SetProcessEnvironmentVariable(key, value) os.remove(export_url) @@ -162,4 +162,3 @@ def __main__(deadlinePlugin): inject_openpype_environment(deadlinePlugin) else: pype(deadlinePlugin) # backward compatibility with Pype2 - diff --git a/website/docs/admin_hosts_aftereffects.md b/website/docs/admin_hosts_aftereffects.md new file mode 100644 index 0000000000..dc43820465 --- /dev/null +++ b/website/docs/admin_hosts_aftereffects.md @@ -0,0 +1,39 @@ +--- +id: admin_hosts_aftereffects +title: AfterEffects Settings +sidebar_label: AfterEffects +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## AfterEffects settings + +There is a couple of settings that could configure publishing process for **AfterEffects**. +All of them are Project based, eg. each project could have different configuration. + +Location: Settings > Project > AfterEffects + +![Harmony Project Settings](assets/admin_hosts_aftereffects_settings.png) + +## Publish plugins + +### Validate Scene Settings + +#### Skip Resolution Check for Tasks + +Set regex pattern(s) to look for in a Task name to skip resolution check against values from DB. + +#### Skip Timeline Check for Tasks + +Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB. + +### AfterEffects Submit to Deadline + +* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. +* `Priority` - priority of job on farm +* `Primary Pool` - here is list of pool fetched from server you can select from. +* `Secondary Pool` +* `Frames Per Task` - number of sequence division between individual tasks (chunks) +making one job on farm. + diff --git a/website/docs/admin_hosts_blender.md b/website/docs/admin_hosts_blender.md new file mode 100644 index 0000000000..0655e5341a --- /dev/null +++ b/website/docs/admin_hosts_blender.md @@ -0,0 +1,83 @@ +--- +id: admin_hosts_blender +title: Blender +sidebar_label: Blender +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Blender requirements +Blender integration requires to use **PySide2** module inside blender. Module is different for Blender versions and platforms so can't be bundled with OpenPype. + +### How to install + +:::info Permissions +This step requires Admin persmission. +::: + + + + + +Find python executable inside your Blender installation folder. It is usually located in **C:\\Program Files\\Blender Foundation\\Blender {version}\\{version}\\python\\bin\\python.exe** (This may differ in future blender version). + +Open Powershell or Command Prompt as Administrator and run commands below. + +*Replace `C:\Program Files\Blender Foundation\Blender 2.83\2.83\python\bin` with your path.* + +```bash +# Change directory to python executable directory. +> cd C:\Program Files\Blender Foundation\Blender 2.83\2.83\python\bin + +# Run pip install command. +> python -m pip install PySide2 +``` + + + + + +Procedure may differ based on Linux distribution and blender distribution. Some Blender distributions are using system Python in that case it is required to install PySide2 using pip to system python (Not tested). + +**These instructions are for Blender using bundled python.** + +Find python executable inside your blender application. + +:::note Find python executable in Blender +You can launch Blender and in "Scripting" section enter commands to console. +```bash +>>> import bpy +>>> print(bpy.app.binary_path_python) +'/path/to/python/executable' +``` +::: + +Open terminal and run pip install command below. + +*Replace `/usr/bin/blender/2.83/python/bin/python3.7m` with your path.* +```bash +> /usr/bin/blender/2.83/python/bin/python3.7m -m pip install PySide2 +``` + +:::warning No module named pip +If you get error `No module named pip` you'll have to do few steps first. Open new terminal and run the python executable from Blender (entering full path). +```bash +# Run Python executable +> /usr/bin/blender/2.83/python/bin/python3.7m +# Python process should start +>>> import ensurepip +>>> ensurepip.bootstrap() +``` +You can close new terminal. Run pip install command above again. Now should work as expected. +::: + + + + diff --git a/website/docs/admin_hosts_harmony.md b/website/docs/admin_hosts_harmony.md new file mode 100644 index 0000000000..2c49d8ba73 --- /dev/null +++ b/website/docs/admin_hosts_harmony.md @@ -0,0 +1,51 @@ +--- +id: admin_hosts_harmony +title: ToonBoom Harmony Settings +sidebar_label: ToonBoom Harmony +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## ToonBoom Harmony settings + +There is a couple of settings that could configure publishing process for **ToonBoom Harmony**. +All of them are Project based, eg. each project could have different configuration. + +Location: Settings > Project > Harmony + +![Harmony Project Settings](assets/admin_hosts_harmony_settings.png) + +## Publish plugins + +### Collect Palettes + +#### Allowed tasks + +Set regex pattern(s) only for task names when publishing of Palettes should occur. + +Use ".*" to publish Palettes for ALL tasks. + +### Validate Scene Settings + +#### Skip Frame check for Assets with + +Set regex pattern(s) for filtering Asset name that should skip validation of `frameEnd` value from DB. + +#### Skip Resolution Check for Tasks + +Set regex pattern(s) for filtering Asset name that should skip validation or `Resolution` value from DB. + +#### Skip Timeline Check for Tasks + +Set regex pattern(s) for filtering Task name that should skip validation `frameStart`, `frameEnd` check against values from DB. + +### Harmony Submit to Deadline + +* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. +* `Priority` - priority of job on farm +* `Primary Pool` - here is list of pool fetched from server you can select from. +* `Secondary Pool` +* `Frames Per Task` - number of sequence division between individual tasks (chunks) +making one job on farm. + diff --git a/website/docs/admin_hosts_maya.md b/website/docs/admin_hosts_maya.md new file mode 100644 index 0000000000..83c4121be9 --- /dev/null +++ b/website/docs/admin_hosts_maya.md @@ -0,0 +1,52 @@ +--- +id: admin_hosts_maya +title: Maya +sidebar_label: Maya +--- + +## Maya + +### Publish Plugins + +#### Render Settings Validator (`ValidateRenderSettings`) + +Render Settings Validator is here to make sure artists will submit renders +we correct settings. Some of these settings are needed by OpenPype but some +can be defined by TD using [OpenPype Settings UI](admin_settings). + +OpenPype enforced settings include: + +- animation must be enabled in output +- render prefix must start with `maya/` to make sure renders are in +correct directory +- there must be `` or its equivalent in different renderers in +file prefix +- if multiple cameras are to be rendered, `` token must be in file prefix + +For **Vray**: +- AOV separator must be set to `_` (underscore) + +For **Redshift**: +- all AOVs must follow `/_` image file prefix +- AOV image format must be same as the one set in Output settings + +For **Renderman**: +- both image and directory prefixes must comply to `_..` and `/renders/maya//` respectively + +For **Arnold**: +- there shouldn't be `` token when merge AOVs option is turned on + + +Additional check can be added via Settings - **Project Settings > Maya > Publish plugin > ValidateRenderSettings**. +You can add as many options as you want for every supported renderer. In first field put node type and attribute +and in the second required value. + +![Settings example](assets/maya-admin_render_settings_validator.png) + +In this example we've put `aiOptions.AA_samples` in first one and `6` to second to enforce +Arnolds Camera (AA) samples to 6. + +Note that `aiOptions` is not the name of node but rather its type. For renderers there is usually +just one instance of this node type but if that is not so, validator will go through all its +instances and check the value there. Node type for **VRay** settings is `VRaySettingsNode`, for **Renderman** +it is `rmanGlobals`, for **Redshift** it is `RedshiftOptions`. \ No newline at end of file diff --git a/website/docs/admin_hosts_resolve.md b/website/docs/admin_hosts_resolve.md new file mode 100644 index 0000000000..d2e027205d --- /dev/null +++ b/website/docs/admin_hosts_resolve.md @@ -0,0 +1,103 @@ +--- +id: admin_hosts_resolve +title: DaVinci Resolve Setup +sidebar_label: DaVinci Resolve +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Resolve requirements +Due to the way resolve handles python and python scripts there are a few steps required steps needed to be done on any machine that will be using OpenPype with resolve. + +### Installing Resolve's own python 3.6 interpreter. +Resolve uses a hardcoded method to look for the python executable path. All of tho following paths are defined automatically by Python msi installer. We are using Python 3.6.2. + + + + + +`%LOCALAPPDATA%\Programs\Python\Python36` + + + + +`/opt/Python/3.6/bin` + + + + +`~/Library/Python/3.6/bin` + + + + + +### Installing PySide2 into python 3.6 for correct gui work + +OpenPype is using its own window widget inside Resolve, for that reason PySide2 has to be installed into the python 3.6 (as explained above). + + + + + +paste to any terminal of your choice + +```bash +%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2 +``` + + + + +paste to any terminal of your choice + +```bash +/opt/Python/3.6/bin/python -m pip install PySide2 +``` + + + + +paste to any terminal of your choice + +```bash +~/Library/Python/3.6/bin/python -m pip install PySide2 +``` + + + + +
+ +### Set Resolve's Fusion settings for Python 3.6 interpereter + +
+ + +As it is shown in bellow picture you have to go to Fusion Tab and then in Fusion menu find Fusion Settings. Go to Fusion/Script and find Default Python Version and swith to Python 3.6 + +
+ +
+ +![Create menu](assets/resolve_fusion_tab.png) +![Create menu](assets/resolve_fusion_menu.png) +![Create menu](assets/resolve_fusion_script_settings.png) + +
+
\ No newline at end of file diff --git a/website/docs/admin_settings_project.md b/website/docs/admin_settings_project.md deleted file mode 100644 index a30c0f0082..0000000000 --- a/website/docs/admin_settings_project.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -id: admin_settings_project -title: Project Settings -sidebar_label: Project Settings ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -PROJECT Settings \ No newline at end of file diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md new file mode 100644 index 0000000000..4ada1fba2d --- /dev/null +++ b/website/docs/artist_hosts_hiero.md @@ -0,0 +1,193 @@ +--- +id: artist_hosts_hiero +title: Hiero +sidebar_label: Hiero / Nuke Studio +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note +All the information also applies to **_Nuke Studio_**(NKS), but for simplicity we only refer to Hiero/NKS. The workflows are identical for both. We are supporting versions **`11.0`** and above. +::: + + + +## OpenPype global tools + +- [Work Files](artist_tools.md#workfiles) +- [Create](artist_tools.md#creator) +- [Load](artist_tools.md#loader) +- [Manage (Inventory)](artist_tools.md#inventory) +- [Publish](artist_tools.md#publisher) + + +## Hiero specific tools + + + +
+
+ +### Create Default Tags + +This tool will recreate all necessary OpenPype tags needed for successful publishing. It is automatically ran at start of the Hiero/NKS. Use this tool to manually re-create all the tags if you accidentaly delete them, or you want to reset them to default values. + +#### Result + +- Will create tags in Tags bin in case there were none +- Will set all tags to default values if they have been altered + +
+
+ +![Default Tags](assets/hiero_defaultTags.png) + +
+
+ +
+
+ +### Apply Colorspace Project + +This tool will set any defined colorspace definition from OpenPype `Settings / Project / Anatomy / Color Management and Output Formats / Hiero / Workfile` to Hiero `menu / Project / Edit Settings / Color Management tab` + +#### Result + +- Define corect color management settings on project + +
+
+ +![Default Tags](assets/hiero_menuColorspaceProject.png) + +
+
+ + +
+
+ +### Apply Colorspace Clips + +This tool will set any defined colorspace definition from OpenPype `Settings / Project / Anatomy / Color Management and Output Formats / Hiero / Colorspace on Inputs by regex detection` to any matching clip's source path. + +#### Result + +- Set correct `Set Media Color Transform` on each clip of active timeline if it matches defined expressions + +
+
+ +![Default Tags](assets/hiero_menuColorspaceClip.png) + +
+
+ +## Publishing Shots + + + +
+ +With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor. +



+ +### Preparing timeline for conversion to instances +Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recomend following. +1. Merge all tracks which supposed to be one and they are multipy only because of editor's style +2. Rename tracks to follow basic structure > if only one layer then `main` in case of multiple layer (elements) for one shot then `main`, and other elements for example: `bg`, `greenscreen`, `fg01`, `fg02`, `display01`, etc. please avoid using [-/_.,%&*] or spaces. These names will be later used in *subset* name creation as `{family}{trackName}` so for example **plateMain** or **plateFg01** +3. Define correct `Set Media Color Transform` at all clips as those will be also published to metadata and used for later loading with correct color transformation. +4. Reviewable video material which you wish to be used as preview videos on any supported Projec manager platform (Ftrack) has to be added ideally to track named **review**. This can be offline edit used as reference video for 2D/3D artists. This video material can be edited to fit length of **main** timeline track or it cand be one long video clip under all clips in **main** track, because OpenPype will trim this to appropriate length with use of FFMPEG. Please be avare we only support MP4(h264) or JPG sequence at the moment. + +
+ +![Create menu](assets/hiero_timelinePrep.png) + +
+ + +### Converting timeline clips to instances + +Every clip on timeline which is inteded to be published has to be converted to publishable instance. + +
+ +In OpenPype it is done by tagging a clip with our own metadata. Select all clips you wish to convert and `menu > OpenPype > Create`. +



+ +
+ +
+ +![Create menu](assets/hiero_menuCreate.png) + +
+ +
+ +Then chose `Create Publishable Clip` in **Instance Creator** dialogue. +

+ +Then you can alter Subset name, but this will be changed dynamically and replaces with timeline's track name. +

+ +Keep **Use selection** on. +

+ +Hit **Create** +

+
+ +
+ +![Instance Creator](assets/hiero_instanceCreator.png) + +
+
+ +Dialogue `Pype publish attributes creator` will open. Here you can define instance properties. If you wish to rename clips dynamically during creation then Keep **Rename clips** ticked. +

+ +In case you wish to use *multiple elements of shots* workflow then keep **Enamble vertical sync** ticked on and define correct hero track which is holding main plates, this is usually the **main** track. +
+ +
+ +![Create menu](assets/hiero_createUIRename.png) + +
+
+ +Subset name is created dynamically if `` is selected on **Subset name**. +

+ +I case you wish to publish reviewable video as explained above then find the appropriate track from drop down menu **Use review track**. Usually named `review` +

+ +Hover above each input field for help. +

+ +Handles can be defined here to. In case you wish to have individual clip set differently we recomend to set here the default value and later change those in the created OpenPype tag's metadata under `handleStart` and `handleEnd` properties (look bellow for details) +
+ +
+ +![Create menu](assets/hiero_createUIFrames.png) + +
+
+ +After you hit **Ok** tags are added to selected clips (except clips in **review** tracks). +

+ +If you wish to change any individual propertie of the shot then you are able to do it here. In this example we can change `handleStart` and `handleEnd` to some other values. +
+ +
+ +![Create menu](assets/hiero_tagHandles.png) + +
+
diff --git a/website/docs/artist_hosts_maya.md b/website/docs/artist_hosts_maya.md index d19bde7b49..fc94f20f02 100644 --- a/website/docs/artist_hosts_maya.md +++ b/website/docs/artist_hosts_maya.md @@ -715,3 +715,40 @@ Once data are marked as Redshift Proxy instance, they can be published - **OpenP Published proxy files can be loaded with OpenPype Loader. It will create mesh and attach Redshift Proxy parameters to it - Redshift will then represent proxy with bounding box. + +## Using VRay Proxies + +OpenPype support publishing, loading and using of VRay Proxy in look management. Their underlaying format +can be either vrmesh or alembic. + +:::warning vrmesh or alembic and look management +Be aware that **vrmesh** cannot be used with looks as it doesn't retain IDs necessary to map shaders to geometry. +::: + +### Creating VRay Proxy + +To create VRay Proxy, select geometry you want and - **OpenPype → Create ...** select **VRay Proxy**. Name your +subset as you want and press **Create** button. + +This will create `vrayproxy` set for your subset. You can set some options in Attribute editor, mainly if you want +export animation instead of single frame. + +![Maya - VRay Proxy Creation](assets/maya-vray_proxy.jpg) + +### Publishing VRay Proxies + +VRay Proxy can be published - **OpenPype → Publish ...**. It will publish data as VRays `vrmesh` format and as +Alembic file. + +## Using VRay Proxies + +You can load VRay Proxy using loader - **OpenPype → Loader ...** + +![Maya - VRay Proxy Creation](assets/maya-vray_proxy-loader.jpg) + +Select your subset and right-click. Select **Import VRay Proxy (vrmesh)** to import it. + +:::note +Note that even if it states `vrmesh` in descriptions, if loader finds Alembic published along (default behavior) it will +use abc file instead of vrmesh as it is more flexible and without it looks doesn't work. +::: diff --git a/website/docs/artist_hosts_nukestudio.md b/website/docs/artist_hosts_nukestudio.md deleted file mode 100644 index 23301f53bf..0000000000 --- a/website/docs/artist_hosts_nukestudio.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -id: artist_hosts_nukestudio -title: Hiero -sidebar_label: Hiero / Nuke Studio ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::note -All the information also applies to **_Nuke Studio_**, but for simplicity we only refer to Hiero. The workflows are identical for both. We are supporting versions **`11.0`** and above. -::: - - -## Hiero specific tools - - - -
-
- -### Create Default Tags - -This tool will recreate all necessary OpenPype tags needed for successful publishing. It is automatically ran at start of the Hiero. Use this tool to manually re-create all the tags if you accidentaly delete them, or you want to reset them to default values. - -
-
- -![Default Tags](assets/nukestudio_defaultTags.png) - -
-
- -#### Result - -- Will create tags in Tags bin in case there were none -- Will set all tags to default values if they have been altered - -## Publishing Shots - - - -
-
- -With OpenPype, you can use Hiero as a starting point for creating project hierarchy in avalon and ftrack database (episodes, sequences, shots, folders etc.), publishing plates, reference quicktimes, audio and various soft effects that will be evailable later on for compositors and 3D artist to use. - -There are two ways to `Publish` data and create shots in database from Hiero. Use either context menu on right clicking selected clips or go to top `menu > OpenPype > Publish`. - -
-
- -![Clips naming](assets/nukestudio_basic_clipNaming.png) - -
-
- -Keep in mind that the publishing currently works on selected shots - -Shot names for all the related plates that you want to publish (subsets) has to be the same to be correctly paired together (as it is shown in image). -Note the layer **review** which contains `plateMainReview`. -This media is just h264, 1920x1080 video for that will be used as preview of the actual `plateMain` subset and will be uploaded to Ftrack. We explain how to work with review tag in [**Reviewing**](#reviewing). - - -:::important -To to successfuly publish a shot from Hiero: -1. At least one clip of your shot must be tagged with `Hierarchy`, `subset` and `handleStart/End`. -2. Your source media must be pre-cut to correct length (including handles) -::: - -### Tagging - - -OpenPype's custom tags are used for defining shot parameters and to define which clips and how they are going to be published. - -If you want to add any properties to your clips you'll need to adjust values on the given tag and then drag it onto the clip. - - -
- -![Tags basic](assets/nukestudio_tagsToClips_basic.png) - -
- -1. double click on preferable tag and drag&drop it to selected clip(s) -2. Basic set of tags on clip (usually subset: plateMain) -3. Additionally select clip and edit its parameters -4. Edit parameters here but do not touch `family` - -
-
- -:::important -Only clips with `subset` will be directly processed for publishing. -::: - -### Custom Tags Details - -#### Asset related -| Icon | Description | Editable | Options | -| ------------------- | ---------------------------------------------------------------------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------- | -| ![Hierarchy][hi] | Define parent hierarchy of the shot. Usually combined with one of subset tags. | root, folder, sequence, episode, shot | example: {sequence} = name of Hiero sequence or overwrite by any text without `-` or `_` | -| ![Frame Start][fst] | Set start frame of the shot. Using `"source"` will keep original frame numbers. | number | int `number` or `"source"` | - - -#### Subsets - -| Icon | Description | Editable | Options | -| ------------------ | ------------------------------------------------------------------------------ | -------- | --------------------------------- | -| ![Review][rew] | Choose which track holds review quicktime for the given shot. | track | `"review"` or other track name | -| ![Plate Main][pmn] | Main plate subset identifier | subset | `"main"` or other | -| ![Plate FG][pfg] | Foreground plate subset identifier (comped over the main plate) | subset | `"Fg##"` or other | -| ![Plate BG][pbg] | Background plate subset identifier (comped under the main plate) | subset | `"Bg##"` or other | -| ![Plate Ref][ref] | Reference plate subset identifier | subset | `"Ref"` or other | - -#### Subset's attributes - -| Icon | Description | Editable | Options | -| ------------------ | --------------------------------------------------------------------------------- | ------------------- | ----------------------------- | -| ![Resolution][rsl] | Use source resolution instead of sequence settings. | none | | -| ![Retiming][rtm] | Publish retime metadata to shot if retime or time-warp found on clip | marginIn, marginOut | int `number` frame cushioning | -| ![Lens][lns] | Specify lens focal length metadata (work in progress) | focalLengthMm | int `number` | - -#### Handles - -| Icon | Description | Editable | Options | -| --------------------- | ---------------------------------------------------------------------------- | -------- | -------------------------- | -| ![Handles Start][ahs] | Handles at the start of the clip/shot | value | change to any int `number` | -| ![Handles End][ahe] | Handles at the end of a clip/shot | value | change to any int `number` | - -[hi]: assets/nks_icons/hierarchy.png - -[ahs]: assets/nks_icons/3_add_handles_start.png - -[ahe]: assets/nks_icons/1_add_handles_end.png - -[rsl]: assets/nks_icons/resolution.png - -[rtm]: assets/nks_icons/retiming.png - -[rew]: assets/nks_icons/review.png - -[pmn]: assets/nks_icons/z_layer_main.png - -[pfg]: assets/nks_icons/z_layer_fg.png - -[pbg]: assets/nks_icons/z_layer_bg.png - -[lns]: assets/nks_icons/lense1.png - -[fst]: assets/nks_icons/frame_start.png - -[ref]: assets/nks_icons/reference.png - -### Handles - -OpenPype requires handle information in shot metadata even if they are set to 0. -For this you need to add handles tags to the main clip (Should be the one with Hierarchy tag). -This way we are defining a shot property. In case you wish to have different -handles on other subsets (e.g. when plateBG is longer than plateFG) you can add handle tags with different value to this longer plate. - -If you wish to have different handles length (say 100) than one of the default tags, simply drag `start: add 10 frames` to your clip -and then go to clips tags, find the tag, then replace 10 for 100 in name and also change value to 100. -This is also explained following tutorial [`Extending premade handles tags`](#extending-premade-handles-tags) - -:::caution -Even if you don't need any handles you have to add `start: add 0 frames` and `end: add 0 frames` tags to the clip with Hierarchy tag. -::: - -### Retiming - -OpenPype is also able to publish retiming parameters into the database. -Any clip with **editorial**/**retime** or **TimeWarp** soft effect has to be tagged with `Retiming` tag, if you want this information preserved during publishing. - -Any animation on **TimeWarp** is also preserved and reapplied in _Nuke_. - -You can only combine **retime** and with a single **Timewarp**. - -### Reviewing - -There are two ways to publish reviewable **h264 mov** into OpenPype (and Ftrack). - - - - - - - -The first one uses the Review Tag pointing to the track that holds the reviewable quicktimes for plates. - -This tag metadata has `track` key inside that points to `review` track by default. If you drop this tag onto any publishable clip on the timeline you're telling OpenPype "you will find quicktime version of this plate on `review` track (clips must have the same name)" - -In the image on the right we dropped it to **plateMain** clip. Then we renamed the layer tha hold reviewable quicktime called `plateMainReview`. You can see that the clip names are the same. - - - -
- -![Reviewing](assets/nukestudio_reviewing.png) - -
- -1. `-review` suffix is added to publishing item label if any reviewable file is found -2. `plateMain` clip is holding the Review tag -3. layer name is `review` as it is used as default in _Review_ Tag in _track_ -4. name of clip is the same across all subsets - -
-
- - -
- - -Second way would be to add the **h264 mov 1920x1080** into the same folder -as image sequence. The name of the file has to be the same as image sequence. -Publisher will pick this file up and add it to the files list during collecting. -This will also add `"- review"` to instance label in **Publish**. - -Example: - -- img seq: `image_sequence_name.0001.exr` -- mov: `image_sequence_name.mov` - - -
- - --------------- - - -### LUT Workflow - - -
-
- -It is possible to publish Hiero soft effects for compositors to use later on. You can add the effect to a particular clip or to whole layer as shows on the picture. All clips -below the `Video 6` layer (green arrow) will be published with the **LUT** subset which combines all the colour corrections from he soft effects. Any clips above the `Video 6` layer will have no **LUT** published with them. - - -
-
- -![Reviewing](assets/nukestudio_softEffects.png) - -
-
- -Any external Lut files used in the soft effects will be copied over to `resources` of the published subset folder `lutPlateMain` (in our example). - -:::note - -
-
- -You cannot currently publish soft effects on their own because at the moment we only support soft effects as a part of other subset publishing. Image is demonstrating successful publishing. - -
-
- -![Reviewing](assets/nukestudio_lutSucess.png) - -
-
- -::: - -## Tutorials - - -### Basic publishing with soft effects - - - - -### Extending premade handles tags - - diff --git a/website/docs/artist_hosts_resolve.md b/website/docs/artist_hosts_resolve.md new file mode 100644 index 0000000000..be069eea79 --- /dev/null +++ b/website/docs/artist_hosts_resolve.md @@ -0,0 +1,216 @@ +--- +id: artist_hosts_resolve +title: DaVinci Resolve +sidebar_label: DaVinci Resolve +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::warning +Before you are able to start with OpenPype tools in DaVinci Resolve, installation of its own Python 3.6 interpreter and PySide 2 has to be done. Go to [Installation of python and pyside](#installation-of-python-and-pyside) link for more information +::: + + + +## OpenPype global tools + +- [Work Files](artist_tools.md#workfiles) +- [Create](artist_tools.md#creator) +- [Load](artist_tools.md#loader) +- [Manage (Inventory)](artist_tools.md#inventory) +- [Publish](artist_tools.md#publisher) + + +
+ +## Creating Shots from timeline items + +Before a clip can be published with [Publisher](artist_tools.md#publisher) timeline item has to be marked with OpenPype metadata markers. This way it is converted to a publishable subset. + +Lets do it step by step. + +
+ + +
+ +### Color clips before opening Create menu + + +Timeline video clips should be colored to `Chocolate` color for OpenPype to se it as selected for subset creation. + + +
+ +![Create menu](assets/resolve_select_clips_timeline_chocolate.png) + +
+
+ + +### Rename timeline track names + +
+ + +
+ +To be able to work with dynamic subset name, which is based on track names it is recommended to rename those tracks to what type of plates their clips represent. Commonly used ones are `main`, `review`, `fg01`, `fg02`, `bg`, `bg01`, etc. It is completely up to you but we recommend to always have at least `main` plate. For example if a clip is on track **element** and subset family is set to **plate** then the resulting subset name will be **plateElement** + +

+
+ +
+ +![Create menu](assets/resolve_creator_subset_name.png) +The name of the resulting *subset* can be seen in the **OpenPypeData** marker. +



+
+ +
+ +![Create menu](assets/resolve_remame_track_names.png) +Simple track setup where we are only using `main` and `review` track names. + +
+
+ +![Create menu](assets/resolve_create_vertical_rename_timeline.png) +An example of used track names. The yellow frame is highlighting vertically aligned clips - which are going to be renamed and grouped together under one asset (shot) name. The concept of vertical renaming will be explained later in [Vertical Synchronization of Subset Attributes](#vertical-synchronization-of-subset-attributes). + +
+
+ + +### Create menu... + +
+
+ +After all clips which are intended to be converted to publishable instances are colored to `Chocolate` color, you can open OpenPype menu. + +
+
+ +![Create menu](assets/resolve_menu_openpype.png) + +
+ +
+ +
+
+ +After the menu widget is opened (it can take while so be patient please :). + +Hit `Create ...` and then set **Use selection** to active and select the family to **Create Publishable Clips**. + +The Subset name can stay as it is, it is not going to be used because each clip will generate it's own name. + +
+
+ +![Create menu](assets/resolve_create_clips.png) + +
+
+ +
+
+ +The new windows that opens, let's you define various attributes for your future subsets and shots. + +Set Rename clips to active if you wish to use different names of shots in pipeline then the original clip names conformed from EDL/XML. + +**Count sequence from** - Start of the shot numbering if `#` is used in one of the keywords + +**Stepping number** - Sequential gaps in the numbering + +As you can see the in `{shot}` key within *Shot Template Keywords* section, you can use `#` symbol do define padding of the number in sequence and where it's going to be used. + +
+
+ +![Create menu](assets/resolve_create_renaming_clips.png) + +
+
+ +
+
+ +Notice the relationship of following sections. Keys from **Shot Template Keywords** sections will be used for formating of templates in **Shot Hierarchy And Rename Settings** section. + +**Shot parent hierarchy** will be forming parents of the asset (shot) *the hidden root for this is project folder*. So for example of this template we will get resulging string `shots/sq01` + +**Clip name template** in context of clip sitting on track name `main` in second position `mainsq01sh020`. This is due track key is hosting `{_track_}` which is inheriting name form timeline track name. Other allowed namespases are: +- `{_sequence_}`: timeline name +- `{_clip_}`: clip name +- `{_trackIndex_}`: position of track on timeline from bottom +- `{_clipIndex_}`: clip positon on timeline from left + +
+
+ +![Create menu](assets/resolve_create_template_filling.png) + +
+
+ +### Vertical synchronization of subset attributes + +In case you are only working with two tracks on timeline where `main` track is going to be used as plates for compositors and `review` track holds mp4 clips for offlines and web preview. **Enable vertical sync** can be deactivated. + +In multiple tracks scenario - as mentioned [here](#rename-timeline-track-names) - it is recommended to activate **Enable vertical sync** and define the hero (driving) track to *main*. This will ensure that all of the clips on corresponding to the same shots will have the same publishing parameters. + +

+ +
+ +
+ +![Create menu](assets/resolve_create_single_track_rename_hero_track.png) + +
+ +
+ +![Create menu](assets/resolve_create_vertical_rename_creator_ui.png) + +
+
+ + +## Publishing Shots + +
+
+ +Once all `Chocolate` colored clips have gone through the [creator](#rcreate-menu), have been colored to `Pink` color and a marker has been created for each of them, it means they have been successfully converted to publishable clips. Now we can run **Publisher** - it's button can be found in the OpenPype menu. + +

+
+ +
+
+ +![Create menu](assets/resolve_publish_instance_review_main.png) +Notice that the main track clips and review had been merged into one instance. And since it is main `hero` clip it is also holding all new shot metadata. For that reason it also create secon instance for each with `shot` family. This instance will create all shot hierarchy and pass frame range attributes to shot (asset). + +
+
+ +
+
+ +![Create menu](assets/resolve_publish_instance_other_plateSubsets.png) +Also notice how the subset name is formed form a *track* name and *subset family* from previous steps. + +Also important is to notice the asset name in *OpenPypeData* at marker - the name is the same for all **Vertically renamed** shots as they have been grouped together. Unfortunately Resolve is not allowing to rename the clips so the only way to know is to see it in marker's metadata. + +
+
+ +
diff --git a/website/docs/artist_hosts_tvpaint.md b/website/docs/artist_hosts_tvpaint.md new file mode 100644 index 0000000000..19cb615158 --- /dev/null +++ b/website/docs/artist_hosts_tvpaint.md @@ -0,0 +1,208 @@ +--- +id: artist_hosts_tvpaint +title: TVPaint +sidebar_label: TVPaint +--- + +- [Work Files](artist_tools.md#workfiles) +- [Load](artist_tools.md#loader) +- [Create](artist_tools.md#creator) +- [Subset Manager](artist_tools.md#subset-manager) +- [Scene Inventory](artist_tools.md#scene-inventory) +- [Publish](artist_tools.md#publisher) +- [Library](artist_tools.md#library) + + +## Setup +When you launch TVPaint with OpenPype for the very first time it is necessary to do some additional steps. Right after the TVPaint launching a few system windows will pop up. + +![permission](assets/tvp_permission.png) + +Choose `Replace the file in the destination`. Then another window shows up. + +![permission2](assets/tvp_permission2.png) + +Click on `Continue`. + +After opening TVPaint go to the menu bar: `Windows → Plugins → OpenPype`. + +![pypewindow](assets/tvp_hidden_window.gif) + +Another TVPaint window pop up. Please press `Yes`. This window will be presented in every single TVPaint launching. Unfortunately, there is no other way how to workaround it. + +![writefile](assets/tvp_write_file.png) + +Now OpenPype Tools menu is in your TVPaint work area. + +![openpypetools](assets/tvp_openpype_menu.png) + +You can start your work. + +--- + +## Usage +In TVPaint you can find the Tools in OpenPype menu extension. The OpenPype Tools menu should be available in your work area. However, sometimes it happens that the Tools menu is hidden. You can display the extension panel by going to `Windows -> Plugins -> OpenPype`. + + +## Create +In TVPaint you can create and publish **[Reviews](#review)**, **[Render Passes](#render-pass)**, and **[Render Layers](#render-layer)**. + +You have the possibility to organize your layers by using `Color group`. + +On the bottom left corner of your timeline, you will note a `Color group` button. + +![colorgroups](assets/tvp_color_groups.png) + +It allows you to choose a group by checking one of the colors of the color list. + +![colorgroups](assets/tvp_color_groups2.png) + +The timeline's animation layer can be marked by the color you pick from your Color group. Layers in the timeline with the same color are gathered into a group represents one render layer. + +![timeline](assets/tvp_timeline_color.png) + +:::important +OpenPype specifically never tries to guess what you want to publish from the scene. Therefore, you have to tell OpenPype what you want to publish. There are three ways how to publish render from the scene. +::: + +When you want to publish `review` or `render layer` or `render pass`, open the `Creator` through the Tools menu `Create` button. + +### Review + +
+
+ +`Review` renders the whole file as is and sends the resulting QuickTime to Ftrack. + +To create reviewable quicktime of your animation: + +- select `Review` in the `Creator` +- press `Create` +- When you run [publish](#publish), file will be rendered and converted to quicktime.` + +
+
+ +![createreview](assets/tvp_create_review.png) + +
+
+ +### Render Layer + +
+
+ + +Render Layer bakes all the animation layers of one particular color group together. + +- Choose any amount of animation layers that need to be rendered together and assign them a color group. +- Select any layer of a particular color +- Go to `Creator` and choose `RenderLayer`. +- In the `Subset`, type in the name that the final published RenderLayer should have according to the naming convention in your studio. *(L10, BG, Hero, etc.)* +- Press `Create` +- When you run [publish](#publish), the whole color group will be rendered together and published as a single `RenderLayer` + +
+
+ +![createlayer](assets/tvp_create_layer.png) + +
+
+ + + + + +### Render Pass + +Render Passes are smaller individual elements of a Render Layer. A `character` render layer might +consist of multiple render passes such as `Line`, `Color` and `Shadow`. + + +
+
+Render Passes are specific because they have to belong to a particular layer. If you try to create a render pass and did not create any render layers before, an error message will pop up. + +When you want to create `RenderPass` +- choose one or several animation layers within one color group that you want to publish +- In the Creator, pick `RenderPass` +- Fill the `Subset` with the name of your pass, e.g. `Color`. +- Press `Create` + +
+
+ +![createpass](assets/tvp_create_pass.png) + +
+
+ +

+ +In this example, OpenPype will render selected animation layers within the given color group. E.i. the layers *L020_colour_fx*, *L020_colour_mouth*, and *L020_colour_eye* will be rendered as one pass belonging to the yellow RenderLayer. + +![renderpass](assets/tvp_timeline_color2.png) + + +:::note +You can check your RendrePasses and RenderLayers in [Subset Manager](#subset-manager) or you can start publishing. The publisher will show you a collection of all instances on the left side. +::: + + +--- + +## Publish + +
+
+ +Now that you have created the required instances, you can publish them via `Publish` tool. +- Click on `Publish` in OpenPype Tools menu. +- wait until all instances are collected. +- You can check on the left side whether all your instances have been created and are ready for publishing. +- Fill the comment on the bottom of the window. +- Press the `Play` button to publish + +
+
+ +![pyblish](assets/tvp_pyblish_render.png) + +
+
+ +Once the `Publisher` turns gets green your renders have been published. + +--- + +## Subset Manager +All created instances (render layers, passes, and reviews) will be shown as a simple list. If you don't want to publish some, right click on the item in the list and select `Remove instance`. + +![subsetmanager](assets/tvp_subset_manager.png) + +--- + +## Load +When you want to load existing published work you can reach the `Loader` through the OpenPype Tools `Load` button. + +The supported families for TVPaint are: + +- `render` +- `image` +- `background` +- `plate` + +To load a family item, right-click on the subset you want and import their representations, switch among the versions, delete older versions, copy files, etc. + +![Loader](assets/tvp_loader.gif) + +--- + +## Scene Inventory +Scene Inventory shows you everything that you have loaded into your scene using OpenPype. You can reach it through the extension's `Scene Inventory` button. + +![sceneinventory](assets/tvp_scene_inventory.png) + +You can switch to a previous version of the file or update it to the latest or delete items. diff --git a/website/docs/assets/admin_hosts_aftereffects_settings.png b/website/docs/assets/admin_hosts_aftereffects_settings.png new file mode 100644 index 0000000000..9b879585f8 Binary files /dev/null and b/website/docs/assets/admin_hosts_aftereffects_settings.png differ diff --git a/website/docs/assets/admin_hosts_harmony_settings.png b/website/docs/assets/admin_hosts_harmony_settings.png new file mode 100644 index 0000000000..800a64e986 Binary files /dev/null and b/website/docs/assets/admin_hosts_harmony_settings.png differ diff --git a/website/docs/assets/hiero_createUIFrames.png b/website/docs/assets/hiero_createUIFrames.png new file mode 100644 index 0000000000..798b3efb79 Binary files /dev/null and b/website/docs/assets/hiero_createUIFrames.png differ diff --git a/website/docs/assets/hiero_createUIRename.png b/website/docs/assets/hiero_createUIRename.png new file mode 100644 index 0000000000..3c02254559 Binary files /dev/null and b/website/docs/assets/hiero_createUIRename.png differ diff --git a/website/docs/assets/hiero_defaultTags.png b/website/docs/assets/hiero_defaultTags.png new file mode 100644 index 0000000000..225ec7d484 Binary files /dev/null and b/website/docs/assets/hiero_defaultTags.png differ diff --git a/website/docs/assets/hiero_instanceCreator.png b/website/docs/assets/hiero_instanceCreator.png new file mode 100644 index 0000000000..bcda6cdd18 Binary files /dev/null and b/website/docs/assets/hiero_instanceCreator.png differ diff --git a/website/docs/assets/hiero_menuColorspaceClip.png b/website/docs/assets/hiero_menuColorspaceClip.png new file mode 100644 index 0000000000..4014da2675 Binary files /dev/null and b/website/docs/assets/hiero_menuColorspaceClip.png differ diff --git a/website/docs/assets/hiero_menuColorspaceProject.png b/website/docs/assets/hiero_menuColorspaceProject.png new file mode 100644 index 0000000000..6b8e6e1b89 Binary files /dev/null and b/website/docs/assets/hiero_menuColorspaceProject.png differ diff --git a/website/docs/assets/hiero_menuCreate.png b/website/docs/assets/hiero_menuCreate.png new file mode 100644 index 0000000000..cbd816a203 Binary files /dev/null and b/website/docs/assets/hiero_menuCreate.png differ diff --git a/website/docs/assets/hiero_menuDefaultTags.png b/website/docs/assets/hiero_menuDefaultTags.png new file mode 100644 index 0000000000..ba542e1019 Binary files /dev/null and b/website/docs/assets/hiero_menuDefaultTags.png differ diff --git a/website/docs/assets/hiero_menuPublish.png b/website/docs/assets/hiero_menuPublish.png new file mode 100644 index 0000000000..4259dc44e4 Binary files /dev/null and b/website/docs/assets/hiero_menuPublish.png differ diff --git a/website/docs/assets/hiero_tagHandles.png b/website/docs/assets/hiero_tagHandles.png new file mode 100644 index 0000000000..84e7012088 Binary files /dev/null and b/website/docs/assets/hiero_tagHandles.png differ diff --git a/website/docs/assets/hiero_timelinePrep.png b/website/docs/assets/hiero_timelinePrep.png new file mode 100644 index 0000000000..253bc114ee Binary files /dev/null and b/website/docs/assets/hiero_timelinePrep.png differ diff --git a/website/docs/assets/maya-admin_render_settings_validator.png b/website/docs/assets/maya-admin_render_settings_validator.png new file mode 100644 index 0000000000..8687b538b1 Binary files /dev/null and b/website/docs/assets/maya-admin_render_settings_validator.png differ diff --git a/website/docs/assets/maya-vray_proxy-loader.jpg b/website/docs/assets/maya-vray_proxy-loader.jpg new file mode 100644 index 0000000000..29632796ea Binary files /dev/null and b/website/docs/assets/maya-vray_proxy-loader.jpg differ diff --git a/website/docs/assets/maya-vray_proxy.jpg b/website/docs/assets/maya-vray_proxy.jpg new file mode 100644 index 0000000000..181b77db89 Binary files /dev/null and b/website/docs/assets/maya-vray_proxy.jpg differ diff --git a/website/docs/assets/nukestudio_basic_clipNaming.png b/website/docs/assets/nukestudio_basic_clipNaming.png deleted file mode 100644 index 71d623f706..0000000000 Binary files a/website/docs/assets/nukestudio_basic_clipNaming.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_defaultTags.png b/website/docs/assets/nukestudio_defaultTags.png deleted file mode 100644 index 3ba15ccc17..0000000000 Binary files a/website/docs/assets/nukestudio_defaultTags.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_lutSucess.png b/website/docs/assets/nukestudio_lutSucess.png deleted file mode 100644 index fa013b99b2..0000000000 Binary files a/website/docs/assets/nukestudio_lutSucess.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_publishing_basic.png b/website/docs/assets/nukestudio_publishing_basic.png deleted file mode 100644 index 6592ec423c..0000000000 Binary files a/website/docs/assets/nukestudio_publishing_basic.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_reviewing.png b/website/docs/assets/nukestudio_reviewing.png deleted file mode 100644 index 0d3b4170df..0000000000 Binary files a/website/docs/assets/nukestudio_reviewing.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_setContext.png b/website/docs/assets/nukestudio_setContext.png deleted file mode 100644 index 8c8746a264..0000000000 Binary files a/website/docs/assets/nukestudio_setContext.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_softEffects.png b/website/docs/assets/nukestudio_softEffects.png deleted file mode 100644 index 13b92801fd..0000000000 Binary files a/website/docs/assets/nukestudio_softEffects.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_tagsToClips_basic.png b/website/docs/assets/nukestudio_tagsToClips_basic.png deleted file mode 100644 index fadb85342b..0000000000 Binary files a/website/docs/assets/nukestudio_tagsToClips_basic.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_workfiles_openCorrect.png b/website/docs/assets/nukestudio_workfiles_openCorrect.png deleted file mode 100644 index e097e50d9e..0000000000 Binary files a/website/docs/assets/nukestudio_workfiles_openCorrect.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_workfiles_openingLimit.png b/website/docs/assets/nukestudio_workfiles_openingLimit.png deleted file mode 100644 index d0e893f4e5..0000000000 Binary files a/website/docs/assets/nukestudio_workfiles_openingLimit.png and /dev/null differ diff --git a/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png b/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png new file mode 100644 index 0000000000..403f6e9433 Binary files /dev/null and b/website/docs/assets/resolve_clip_instances_pink_with_marker_in_middle.png differ diff --git a/website/docs/assets/resolve_create_audio_resolution.png b/website/docs/assets/resolve_create_audio_resolution.png new file mode 100644 index 0000000000..af22c7467e Binary files /dev/null and b/website/docs/assets/resolve_create_audio_resolution.png differ diff --git a/website/docs/assets/resolve_create_clips.png b/website/docs/assets/resolve_create_clips.png new file mode 100644 index 0000000000..b589bfb61e Binary files /dev/null and b/website/docs/assets/resolve_create_clips.png differ diff --git a/website/docs/assets/resolve_create_object_naming_convention.png b/website/docs/assets/resolve_create_object_naming_convention.png new file mode 100644 index 0000000000..13de366ef6 Binary files /dev/null and b/website/docs/assets/resolve_create_object_naming_convention.png differ diff --git a/website/docs/assets/resolve_create_renaming_clips.png b/website/docs/assets/resolve_create_renaming_clips.png new file mode 100644 index 0000000000..20c303e50a Binary files /dev/null and b/website/docs/assets/resolve_create_renaming_clips.png differ diff --git a/website/docs/assets/resolve_create_single_track_rename_hero_track.png b/website/docs/assets/resolve_create_single_track_rename_hero_track.png new file mode 100644 index 0000000000..5f68258d1d Binary files /dev/null and b/website/docs/assets/resolve_create_single_track_rename_hero_track.png differ diff --git a/website/docs/assets/resolve_create_subset_name_review_track.png b/website/docs/assets/resolve_create_subset_name_review_track.png new file mode 100644 index 0000000000..4efbff8409 Binary files /dev/null and b/website/docs/assets/resolve_create_subset_name_review_track.png differ diff --git a/website/docs/assets/resolve_create_template_filling.png b/website/docs/assets/resolve_create_template_filling.png new file mode 100644 index 0000000000..faa8c51ee3 Binary files /dev/null and b/website/docs/assets/resolve_create_template_filling.png differ diff --git a/website/docs/assets/resolve_create_vertical_rename_creator_ui.png b/website/docs/assets/resolve_create_vertical_rename_creator_ui.png new file mode 100644 index 0000000000..e163844993 Binary files /dev/null and b/website/docs/assets/resolve_create_vertical_rename_creator_ui.png differ diff --git a/website/docs/assets/resolve_create_vertical_rename_timeline.png b/website/docs/assets/resolve_create_vertical_rename_timeline.png new file mode 100644 index 0000000000..3e57db4119 Binary files /dev/null and b/website/docs/assets/resolve_create_vertical_rename_timeline.png differ diff --git a/website/docs/assets/resolve_creator_clip_marker_do_not_change.png b/website/docs/assets/resolve_creator_clip_marker_do_not_change.png new file mode 100644 index 0000000000..11cc5c4618 Binary files /dev/null and b/website/docs/assets/resolve_creator_clip_marker_do_not_change.png differ diff --git a/website/docs/assets/resolve_creator_framestart_handles.png b/website/docs/assets/resolve_creator_framestart_handles.png new file mode 100644 index 0000000000..65328fe041 Binary files /dev/null and b/website/docs/assets/resolve_creator_framestart_handles.png differ diff --git a/website/docs/assets/resolve_creator_subset_name.png b/website/docs/assets/resolve_creator_subset_name.png new file mode 100644 index 0000000000..4a42c5af2c Binary files /dev/null and b/website/docs/assets/resolve_creator_subset_name.png differ diff --git a/website/docs/assets/resolve_fusion_menu.png b/website/docs/assets/resolve_fusion_menu.png new file mode 100644 index 0000000000..ae1939690c Binary files /dev/null and b/website/docs/assets/resolve_fusion_menu.png differ diff --git a/website/docs/assets/resolve_fusion_script_settings.png b/website/docs/assets/resolve_fusion_script_settings.png new file mode 100644 index 0000000000..6d903b3ef4 Binary files /dev/null and b/website/docs/assets/resolve_fusion_script_settings.png differ diff --git a/website/docs/assets/resolve_fusion_tab.png b/website/docs/assets/resolve_fusion_tab.png new file mode 100644 index 0000000000..657d53cb16 Binary files /dev/null and b/website/docs/assets/resolve_fusion_tab.png differ diff --git a/website/docs/assets/resolve_menu_openpype.png b/website/docs/assets/resolve_menu_openpype.png new file mode 100644 index 0000000000..9812858072 Binary files /dev/null and b/website/docs/assets/resolve_menu_openpype.png differ diff --git a/website/docs/assets/resolve_menu_openpype_opened.png b/website/docs/assets/resolve_menu_openpype_opened.png new file mode 100644 index 0000000000..9b0e35569b Binary files /dev/null and b/website/docs/assets/resolve_menu_openpype_opened.png differ diff --git a/website/docs/assets/resolve_publish_instance_other_plateSubsets.png b/website/docs/assets/resolve_publish_instance_other_plateSubsets.png new file mode 100644 index 0000000000..fd5f857da5 Binary files /dev/null and b/website/docs/assets/resolve_publish_instance_other_plateSubsets.png differ diff --git a/website/docs/assets/resolve_publish_instance_review_main.png b/website/docs/assets/resolve_publish_instance_review_main.png new file mode 100644 index 0000000000..0cf5ed3b99 Binary files /dev/null and b/website/docs/assets/resolve_publish_instance_review_main.png differ diff --git a/website/docs/assets/resolve_remame_track_names.png b/website/docs/assets/resolve_remame_track_names.png new file mode 100644 index 0000000000..01174ea644 Binary files /dev/null and b/website/docs/assets/resolve_remame_track_names.png differ diff --git a/website/docs/assets/resolve_select_clips_timeline_chocolate.png b/website/docs/assets/resolve_select_clips_timeline_chocolate.png new file mode 100644 index 0000000000..b4a682e83a Binary files /dev/null and b/website/docs/assets/resolve_select_clips_timeline_chocolate.png differ diff --git a/website/docs/assets/tvp_asset_loader_actions.png b/website/docs/assets/tvp_asset_loader_actions.png new file mode 100644 index 0000000000..dbc3734e10 Binary files /dev/null and b/website/docs/assets/tvp_asset_loader_actions.png differ diff --git a/website/docs/assets/tvp_asset_loader_version.png b/website/docs/assets/tvp_asset_loader_version.png new file mode 100644 index 0000000000..b8052f3c84 Binary files /dev/null and b/website/docs/assets/tvp_asset_loader_version.png differ diff --git a/website/docs/assets/tvp_color_groups.png b/website/docs/assets/tvp_color_groups.png new file mode 100644 index 0000000000..558ff687c6 Binary files /dev/null and b/website/docs/assets/tvp_color_groups.png differ diff --git a/website/docs/assets/tvp_color_groups2.png b/website/docs/assets/tvp_color_groups2.png new file mode 100644 index 0000000000..7ab6775769 Binary files /dev/null and b/website/docs/assets/tvp_color_groups2.png differ diff --git a/website/docs/assets/tvp_create_layer.png b/website/docs/assets/tvp_create_layer.png new file mode 100644 index 0000000000..9d243da17a Binary files /dev/null and b/website/docs/assets/tvp_create_layer.png differ diff --git a/website/docs/assets/tvp_create_pass.png b/website/docs/assets/tvp_create_pass.png new file mode 100644 index 0000000000..7d226ea4b5 Binary files /dev/null and b/website/docs/assets/tvp_create_pass.png differ diff --git a/website/docs/assets/tvp_create_review.png b/website/docs/assets/tvp_create_review.png new file mode 100644 index 0000000000..d6e9f63428 Binary files /dev/null and b/website/docs/assets/tvp_create_review.png differ diff --git a/website/docs/assets/tvp_hidden_window.gif b/website/docs/assets/tvp_hidden_window.gif new file mode 100644 index 0000000000..b1adaa75b8 Binary files /dev/null and b/website/docs/assets/tvp_hidden_window.gif differ diff --git a/website/docs/assets/tvp_library.gif b/website/docs/assets/tvp_library.gif new file mode 100644 index 0000000000..14de86cdb5 Binary files /dev/null and b/website/docs/assets/tvp_library.gif differ diff --git a/website/docs/assets/tvp_loader.gif b/website/docs/assets/tvp_loader.gif new file mode 100644 index 0000000000..5775460372 Binary files /dev/null and b/website/docs/assets/tvp_loader.gif differ diff --git a/website/docs/assets/tvp_openpype_menu.png b/website/docs/assets/tvp_openpype_menu.png new file mode 100644 index 0000000000..cb5c2d4aac Binary files /dev/null and b/website/docs/assets/tvp_openpype_menu.png differ diff --git a/website/docs/assets/tvp_permission.png b/website/docs/assets/tvp_permission.png new file mode 100644 index 0000000000..e47d9841b6 Binary files /dev/null and b/website/docs/assets/tvp_permission.png differ diff --git a/website/docs/assets/tvp_permission2.png b/website/docs/assets/tvp_permission2.png new file mode 100644 index 0000000000..827e85db39 Binary files /dev/null and b/website/docs/assets/tvp_permission2.png differ diff --git a/website/docs/assets/tvp_pyblish.png b/website/docs/assets/tvp_pyblish.png new file mode 100644 index 0000000000..88423a3c75 Binary files /dev/null and b/website/docs/assets/tvp_pyblish.png differ diff --git a/website/docs/assets/tvp_pyblish_render.png b/website/docs/assets/tvp_pyblish_render.png new file mode 100644 index 0000000000..7d279c3aa0 Binary files /dev/null and b/website/docs/assets/tvp_pyblish_render.png differ diff --git a/website/docs/assets/tvp_render_pass.png b/website/docs/assets/tvp_render_pass.png new file mode 100644 index 0000000000..07790af7b2 Binary files /dev/null and b/website/docs/assets/tvp_render_pass.png differ diff --git a/website/docs/assets/tvp_scene_inventory.png b/website/docs/assets/tvp_scene_inventory.png new file mode 100644 index 0000000000..25c717b331 Binary files /dev/null and b/website/docs/assets/tvp_scene_inventory.png differ diff --git a/website/docs/assets/tvp_subset_manager.png b/website/docs/assets/tvp_subset_manager.png new file mode 100644 index 0000000000..1ffbead4ba Binary files /dev/null and b/website/docs/assets/tvp_subset_manager.png differ diff --git a/website/docs/assets/tvp_timeline_color.png b/website/docs/assets/tvp_timeline_color.png new file mode 100644 index 0000000000..6d00d4c8af Binary files /dev/null and b/website/docs/assets/tvp_timeline_color.png differ diff --git a/website/docs/assets/tvp_timeline_color2.png b/website/docs/assets/tvp_timeline_color2.png new file mode 100644 index 0000000000..e20e190471 Binary files /dev/null and b/website/docs/assets/tvp_timeline_color2.png differ diff --git a/website/docs/assets/tvp_write_file.png b/website/docs/assets/tvp_write_file.png new file mode 100644 index 0000000000..109e6badc9 Binary files /dev/null and b/website/docs/assets/tvp_write_file.png differ diff --git a/website/docs/dev_build.md b/website/docs/dev_build.md index 9523035705..14efeaa850 100644 --- a/website/docs/dev_build.md +++ b/website/docs/dev_build.md @@ -1,6 +1,6 @@ --- id: dev_build -title: Build openPYPE from source +title: Build OpenPYPE from source sidebar_label: Build --- @@ -45,12 +45,12 @@ To start OpenPype from source you need to 2) Run `.\tools\run_tray.ps1` if you have all required dependencies on your machine you should be greeted with OpenPype igniter window and once you give it your Mongo URL, with OpenPype icon in the system tray. -### To build openPype: +### To build OpenPype: 1) Run `.\tools\create_env.ps1` to create virtual environment in `.\venv` 2) Run `.\tools\build.ps1` to build pype executables in `.\build\` -To create distributable openPype versions, run `./tools/create_zip.ps1` - that will +To create distributable OpenPype versions, run `./tools/create_zip.ps1` - that will create zip file with name `pype-vx.x.x.zip` parsed from current pype repository and copy it to user data dir. You can specify `--path /path/to/zip` to force it into a different location. This can be used to prepare new version releases for artists in the studio environment @@ -61,7 +61,24 @@ without the need to re-build the whole package -To build pype on linux you wil need: +#### Docker +You can use Docker to build OpenPype. Just run: +```sh +sudo ./tools/docker_build.sh +``` +and you should have built OpenPype in `build` directory. It is using **Centos 7** +as a base image. + +You can pull the image: + +```sh +# replace 3.0.0 tag with version you want +docker pull pypeclub/openpype:3.0.0 +``` +See https://hub.docker.com/r/pypeclub/openpype/tag for more. + +#### Manual build +To build OpenPype on Linux you wil need: - **[curl](https://curl.se)** on systems that doesn't have one preinstalled. - Python header files installed (**python3-dev** on Ubuntu for example). @@ -143,7 +160,7 @@ pyenv local 3.7.9 To build pype on MacOS you wil need: - **[Homebrew](https://brew.sh)**, Easy way of installing everything necessary is to use. -- **[CMake](https://cmake.org/)** to build some external openPype dependencies. +- **[CMake](https://cmake.org/)** to build some external OpenPype dependencies. - **XCode Command Line Tools** (or some other build system) 1) Install **Homebrew**: diff --git a/website/docs/project_settings/assets/global_extract_review_letter_box.png b/website/docs/project_settings/assets/global_extract_review_letter_box.png new file mode 100644 index 0000000000..7cd9ecbdd6 Binary files /dev/null and b/website/docs/project_settings/assets/global_extract_review_letter_box.png differ diff --git a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png new file mode 100644 index 0000000000..9ad9c05f43 Binary files /dev/null and b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png differ diff --git a/website/docs/project_settings/assets/global_extract_review_output_defs.png b/website/docs/project_settings/assets/global_extract_review_output_defs.png new file mode 100644 index 0000000000..0dc8329324 Binary files /dev/null and b/website/docs/project_settings/assets/global_extract_review_output_defs.png differ diff --git a/website/docs/project_settings/assets/global_extract_review_profiles.png b/website/docs/project_settings/assets/global_extract_review_profiles.png new file mode 100644 index 0000000000..1b91786ff6 Binary files /dev/null and b/website/docs/project_settings/assets/global_extract_review_profiles.png differ diff --git a/website/docs/project_settings/assets/global_integrate_new_subset_group.png b/website/docs/project_settings/assets/global_integrate_new_subset_group.png new file mode 100644 index 0000000000..92638984cc Binary files /dev/null and b/website/docs/project_settings/assets/global_integrate_new_subset_group.png differ diff --git a/website/docs/project_settings/assets/global_integrate_new_template_name_profile.png b/website/docs/project_settings/assets/global_integrate_new_template_name_profile.png new file mode 100644 index 0000000000..8529761a50 Binary files /dev/null and b/website/docs/project_settings/assets/global_integrate_new_template_name_profile.png differ diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md new file mode 100644 index 0000000000..4fee57d575 --- /dev/null +++ b/website/docs/project_settings/settings_project_global.md @@ -0,0 +1,115 @@ +--- +id: settings_project_global +title: Project Global Setting +sidebar_label: Global +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Project settings can have project specific values. Each new project is using studio values defined in **default** project but these values can be modified or overriden per project. + +:::warning Default studio values +Projects always use default project values unless they have [project override](../admin_settings#project-overrides) (orage colour). Any changes in default project may affect all existing projects. +::: + +## Profile filters + +Many of the settings are using a concept of **Profile filters** + +You can define multiple profiles to choose from for different contexts. Each filter is evaluated and a +profile with filters matching the current context the most, is used. + +You can define profile without any filters and use it as **default**. + +Only **one or none** profile will be returned per context. + +All context filters are lists which may contain strings or Regular expressions (RegEx). +- **`hosts`** - Host from which publishing was triggered. `["maya", "nuke"]` +- **`families`** - Main family of processed subset. `["plate", "model"]` +- **`tasks`** - Currently processed task. `["modeling", "animation"]` + +:::important Filtering +Filters are optional. In case when multiple profiles match current context, profile with higher number of matched filters has higher priority that profile without filters. +(Eg. order of when filter is added doesn't matter, only the precision of matching does.) +::: + +## Publish plugins + +Publish plugins used across all integrations. + + +### Extract Review +Plugin responsible for automatic FFmpeg conversion to variety of formats. + +Extract review is using [profile filtering](#profile-filters) to be able render different outputs for different situations. + +Applicable context filters: + **`hosts`** - Host from which publishing was triggered. `["maya", "nuke"]` +- **`families`** - Main family of processed subset. `["plate", "model"]` + +![global_extract_review_profiles](assets/global_extract_review_profiles.png) + +**Output Definitions** + +Profile may generate multiple outputs from a single input. Each output must define unique name and output extension (use the extension without a dot e.g. **mp4**). All other settings of output definition are optional. + +![global_extract_review_output_defs](assets/global_extract_review_output_defs.png) +- **`Tags`** + Define what will happen to output. + +- **`FFmpeg arguments`** + These arguments are appended to ffmpeg arguments auto generated by publish plugin. Some of arguments are handled automatically like rescaling or letterboxes. + - **Video filters** additional FFmpeg filters that would be defined in `-filter:v` or `-vf` command line arguments. + - **Audio filters** additional FFmpeg filters that would be defined in `-filter:a` or `-af` command line arguments. + - **Input arguments** input definition arguments of video or image sequence - this setting has limitations as you have to know what is input. + - **Output arguments** other FFmpeg output arguments like codec definition. + +- **`Output width`** and **`Output height`** + - it is possible to rescale output to specified resolution and keep aspect ratio. + - If value is set to 0, source resolution will be used. + +- **`Letter Box`** + - **Enabled** - Enable letter boxes + - **Ratio** - Ratio of letter boxes + - **Type** - **Letterbox** (horizontal bars) or **Pillarbox** (vertical bars) + - **Fill color** - Fill color of boxes (RGBA: 0-255) + - **Line Thickness** - Line thickness on the edge of box (set to `0` to turn off) + - **Fill color** - Line color on the edge of box (RGBA: 0-255) + - **Example** + + ![global_extract_review_letter_box_settings](assets/global_extract_review_letter_box_settings.png) + ![global_extract_review_letter_box](assets/global_extract_review_letter_box.png) + +### IntegrateAssetNew + +Saves information for all published subsets into DB, published assets are available for other hosts, tools and tasks after. +#### Template name profiles + +Allows to select [anatomy template](admin_settings_project_anatomy.md#templates) based on context of subset being published. + +For example for `render` profile you might want to publish and store assets in different location (based on anatomy setting) then for `publish` profile. +[Profile filtering](#profile-filters) is used to select between appropriate template for each context of published subsets. + +Applicable context filters: +- **`hosts`** - Host from which publishing was triggered. `["maya", "nuke"]` +- **`tasks`** - Current task. `["modeling", "animation"]` + + ![global_integrate_new_template_name_profile](assets/global_integrate_new_template_name_profile.png) + +(This image shows use case where `render` anatomy template is used for subsets of families ['review, 'render', 'prerender'], `publish` template is chosen for all other.) + +#### Subset grouping profiles + +Published subsets might be grouped together for cleaner and easier selection in **[Loader](artist_tools.md#subset-groups)** + +Group name is chosen with use of [profile filtering](#profile-filters) + +Applicable context filters: +- **`families`** - Main family of processed subset. `["plate", "model"]` +- **`hosts`** - Host from which publishing was triggered. `["maya", "nuke"]` +- **`tasks`** - Current task. `["modeling", "animation"]` + + ![global_integrate_new_template_name_profile](assets/global_integrate_new_subset_group.png) + +(This image shows use case where only assets published from 'photoshop', for all families for all tasks should be marked as grouped with a capitalized name of Task where they are published from.) \ No newline at end of file diff --git a/website/sidebars.js b/website/sidebars.js index 82f063e252..c9edf5e3b7 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -16,13 +16,15 @@ module.exports = { collapsed: false, label: "Integrations", items: [ - "artist_hosts_nukestudio", + "artist_hosts_hiero", "artist_hosts_nuke", "artist_hosts_maya", "artist_hosts_blender", "artist_hosts_harmony", "artist_hosts_aftereffects", + "artist_hosts_resolve", "artist_hosts_photoshop", + "artist_hosts_tvpaint", "artist_hosts_unreal", { type: "category", @@ -57,7 +59,13 @@ module.exports = { "admin_settings", "admin_settings_system", "admin_settings_project_anatomy", - "admin_settings_project", + { + type: "category", + label: "Project Settings", + items: [ + "project_settings/settings_project_global" + ], + }, ], }, { @@ -71,6 +79,17 @@ module.exports = { "module_clockify" ], }, + { + type: "category", + label: "Integrations", + items: [ + "admin_hosts_blender", + "admin_hosts_maya", + "admin_hosts_resolve", + "admin_hosts_harmony", + "admin_hosts_aftereffects" + ], + }, { type: "category", label: "Releases",