Merge branch 'develop' into feature/PYPE-338-publish-representations-for-sho

# Conflicts:
#	pype/plugins/ftrack/publish/integrate_ftrack_api.py
This commit is contained in:
Jakub Jezek 2019-06-05 16:04:45 +02:00
commit 075e7cbada
94 changed files with 2438 additions and 2117 deletions

8
.gitignore vendored
View file

@ -4,3 +4,11 @@
__pycache__/
*.py[cod]
*$py.class
# Documentation
###############
/docs/build
# Editor backup files #
#######################
*~

View file

@ -1,20 +1,31 @@
he base studio _config_ for [Avalon](https://getavalon.github.io/)
Pype
====
The base studio _config_ for [Avalon](https://getavalon.github.io/)
Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. We're working on open sourcing all of the necessary code though. You can still get inspiration or take our individual validators and scripts which should work just fine in other pipelines.
_This configuration acts as a starting point for all pype club clients wth avalon deployment._
### Code convention
Code convention
---------------
Below are some of the standard practices applied to this repositories.
- **Etiquette: PEP8**
\- All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options.
- **Etiquette: Napoleon docstrings**
\- Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details.
- **Etiquette: Semantic Versioning**
\- This project follows [semantic versioning](http://semver.org).
- **Etiquette: Underscore means private**
\- Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`.
- **API: Idempotence**
\- A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing.
- **Etiquette: PEP8**
All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options.
- **Etiquette: Napoleon docstrings**
Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details.
- **Etiquette: Semantic Versioning**
This project follows [semantic versioning](http://semver.org).
- **Etiquette: Underscore means private**
Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`.
- **API: Idempotence**
A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing.

19
docs/Makefile Normal file
View file

@ -0,0 +1,19 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

35
docs/make.bat Normal file
View file

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

222
docs/source/conf.py Normal file
View file

@ -0,0 +1,222 @@
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys
import os
from pprint import pprint
from pypeapp.pypeLauncher import PypeLauncher
from pypeapp.storage import Storage
from pypeapp.deployment import Deployment
pype_setup = os.getenv('PYPE_ROOT')
d = Deployment(pype_setup)
launcher = PypeLauncher()
tools, config_path = d.get_environment_data()
os.environ['PYPE_CONFIG'] = config_path
os.environ['TOOL_ENV'] = os.path.normpath(os.path.join(config_path,
'environments'))
launcher._add_modules()
Storage().update_environment()
launcher._load_default_environments(tools=tools)
# -- Project information -----------------------------------------------------
project = 'pype'
copyright = '2019, Orbi Tools'
author = 'Orbi Tools'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'recommonmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# -- Options for autodoc -----------------------------------------------------
autodoc_default_flags = ['members']
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pypedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pype.tex', 'pype Documentation',
'OrbiTools', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pype', 'pype Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pype', 'pype Documentation',
author, 'pype', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True

18
docs/source/index.rst Normal file
View file

@ -0,0 +1,18 @@
.. pype documentation master file, created by
sphinx-quickstart on Mon May 13 17:18:23 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to pype's documentation!
================================
.. toctree::
readme
modules
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

7
docs/source/modules.rst Normal file
View file

@ -0,0 +1,7 @@
pype
====
.. toctree::
:maxdepth: 6
pype

View file

@ -0,0 +1,20 @@
pype.aport package
==================
.. automodule:: pype.aport
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.aport.api module
---------------------
.. automodule:: pype.aport.api
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,20 @@
pype.avalon\_apps package
=========================
.. automodule:: pype.avalon_apps
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.avalon\_apps.avalon\_app module
------------------------------------
.. automodule:: pype.avalon_apps.avalon_app
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,36 @@
pype.clockify package
=====================
.. automodule:: pype.clockify
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.clockify.clockify module
-----------------------------
.. automodule:: pype.clockify.clockify
:members:
:undoc-members:
:show-inheritance:
pype.clockify.clockify\_api module
----------------------------------
.. automodule:: pype.clockify.clockify_api
:members:
:undoc-members:
:show-inheritance:
pype.clockify.widget\_settings module
-------------------------------------
.. automodule:: pype.clockify.widget_settings
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,36 @@
pype.ftrack.ftrack\_server package
==================================
.. automodule:: pype.ftrack.ftrack_server
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.ftrack.ftrack\_server.event\_server module
-----------------------------------------------
.. automodule:: pype.ftrack.ftrack_server.event_server
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.ftrack\_server.event\_server\_cli module
----------------------------------------------------
.. automodule:: pype.ftrack.ftrack_server.event_server_cli
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.ftrack\_server.ftrack\_server module
------------------------------------------------
.. automodule:: pype.ftrack.ftrack_server.ftrack_server
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,52 @@
pype.ftrack.lib package
=======================
.. automodule:: pype.ftrack.lib
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.ftrack.lib.avalon\_sync module
-----------------------------------
.. automodule:: pype.ftrack.lib.avalon_sync
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.lib.ftrack\_action\_handler module
----------------------------------------------
.. automodule:: pype.ftrack.lib.ftrack_action_handler
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.lib.ftrack\_app\_handler module
-------------------------------------------
.. automodule:: pype.ftrack.lib.ftrack_app_handler
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.lib.ftrack\_base\_handler module
--------------------------------------------
.. automodule:: pype.ftrack.lib.ftrack_base_handler
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.lib.ftrack\_event\_handler module
---------------------------------------------
.. automodule:: pype.ftrack.lib.ftrack_event_handler
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,52 @@
pype.ftrack package
===================
.. automodule:: pype.ftrack
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.ftrack.ftrack_server
pype.ftrack.lib
Submodules
----------
pype.ftrack.credentials module
------------------------------
.. automodule:: pype.ftrack.credentials
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.ftrack\_module module
---------------------------------
.. automodule:: pype.ftrack.ftrack_module
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.login\_dialog module
--------------------------------
.. automodule:: pype.ftrack.login_dialog
:members:
:undoc-members:
:show-inheritance:
pype.ftrack.login\_tools module
-------------------------------
.. automodule:: pype.ftrack.login_tools
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,27 @@
pype.fusion package
===================
.. automodule:: pype.fusion
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.fusion.scripts
Submodules
----------
pype.fusion.lib module
----------------------
.. automodule:: pype.fusion.lib
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,28 @@
pype.fusion.scripts package
===========================
.. automodule:: pype.fusion.scripts
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.fusion.scripts.fusion\_switch\_shot module
-----------------------------------------------
.. automodule:: pype.fusion.scripts.fusion_switch_shot
:members:
:undoc-members:
:show-inheritance:
pype.fusion.scripts.publish\_filesequence module
------------------------------------------------
.. automodule:: pype.fusion.scripts.publish_filesequence
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,20 @@
pype.houdini package
====================
.. automodule:: pype.houdini
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.houdini.lib module
-----------------------
.. automodule:: pype.houdini.lib
:members:
:undoc-members:
:show-inheritance:

52
docs/source/pype.maya.rst Normal file
View file

@ -0,0 +1,52 @@
pype.maya package
=================
.. automodule:: pype.maya
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.maya.action module
-----------------------
.. automodule:: pype.maya.action
:members:
:undoc-members:
:show-inheritance:
pype.maya.customize module
--------------------------
.. automodule:: pype.maya.customize
:members:
:undoc-members:
:show-inheritance:
pype.maya.lib module
--------------------
.. automodule:: pype.maya.lib
:members:
:undoc-members:
:show-inheritance:
pype.maya.menu module
---------------------
.. automodule:: pype.maya.menu
:members:
:undoc-members:
:show-inheritance:
pype.maya.plugin module
-----------------------
.. automodule:: pype.maya.plugin
:members:
:undoc-members:
:show-inheritance:

44
docs/source/pype.nuke.rst Normal file
View file

@ -0,0 +1,44 @@
pype.nuke package
=================
.. automodule:: pype.nuke
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.nuke.actions module
------------------------
.. automodule:: pype.nuke.actions
:members:
:undoc-members:
:show-inheritance:
pype.nuke.lib module
--------------------
.. automodule:: pype.nuke.lib
:members:
:undoc-members:
:show-inheritance:
pype.nuke.menu module
---------------------
.. automodule:: pype.nuke.menu
:members:
:undoc-members:
:show-inheritance:
pype.nuke.templates module
--------------------------
.. automodule:: pype.nuke.templates
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,20 @@
pype.premiere package
=====================
.. automodule:: pype.premiere
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.premiere.templates module
------------------------------
.. automodule:: pype.premiere.templates
:members:
:undoc-members:
:show-inheritance:

88
docs/source/pype.rst Normal file
View file

@ -0,0 +1,88 @@
pype package
============
.. automodule:: pype
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.aport
pype.avalon_apps
pype.clockify
pype.ftrack
pype.fusion
pype.houdini
pype.maya
pype.nuke
pype.premiere
pype.scripts
pype.services
pype.standalonepublish
pype.tools
pype.widgets
Submodules
----------
pype.action module
------------------
.. automodule:: pype.action
:members:
:undoc-members:
:show-inheritance:
pype.api module
---------------
.. automodule:: pype.api
:members:
:undoc-members:
:show-inheritance:
pype.launcher\_actions module
-----------------------------
.. automodule:: pype.launcher_actions
:members:
:undoc-members:
:show-inheritance:
pype.lib module
---------------
.. automodule:: pype.lib
:members:
:undoc-members:
:show-inheritance:
pype.plugin module
------------------
.. automodule:: pype.plugin
:members:
:undoc-members:
:show-inheritance:
pype.setdress\_api module
-------------------------
.. automodule:: pype.setdress_api
:members:
:undoc-members:
:show-inheritance:
pype.templates module
---------------------
.. automodule:: pype.templates
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,28 @@
pype.scripts package
====================
.. automodule:: pype.scripts
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.scripts.fusion\_switch\_shot module
----------------------------------------
.. automodule:: pype.scripts.fusion_switch_shot
:members:
:undoc-members:
:show-inheritance:
pype.scripts.publish\_filesequence module
-----------------------------------------
.. automodule:: pype.scripts.publish_filesequence
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,20 @@
pype.services.idle\_manager package
===================================
.. automodule:: pype.services.idle_manager
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.services.idle\_manager.idle\_manager module
------------------------------------------------
.. automodule:: pype.services.idle_manager.idle_manager
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,17 @@
pype.services package
=====================
.. automodule:: pype.services
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.services.idle_manager
pype.services.statics_server
pype.services.timers_manager

View file

@ -0,0 +1,20 @@
pype.services.statics\_server package
=====================================
.. automodule:: pype.services.statics_server
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.services.statics\_server.statics\_server module
----------------------------------------------------
.. automodule:: pype.services.statics_server.statics_server
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,28 @@
pype.services.timers\_manager package
=====================================
.. automodule:: pype.services.timers_manager
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.services.timers\_manager.timers\_manager module
----------------------------------------------------
.. automodule:: pype.services.timers_manager.timers_manager
:members:
:undoc-members:
:show-inheritance:
pype.services.timers\_manager.widget\_user\_idle module
-------------------------------------------------------
.. automodule:: pype.services.timers_manager.widget_user_idle
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,8 @@
pype.standalonepublish.resources package
========================================
.. automodule:: pype.standalonepublish.resources
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,44 @@
pype.standalonepublish package
==============================
.. automodule:: pype.standalonepublish
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.standalonepublish.resources
pype.standalonepublish.widgets
Submodules
----------
pype.standalonepublish.app module
---------------------------------
.. automodule:: pype.standalonepublish.app
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.publish module
-------------------------------------
.. automodule:: pype.standalonepublish.publish
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.standalonepublish\_module module
-------------------------------------------------------
.. automodule:: pype.standalonepublish.standalonepublish_module
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,156 @@
pype.standalonepublish.widgets package
======================================
.. automodule:: pype.standalonepublish.widgets
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.standalonepublish.widgets.button\_from\_svgs module
--------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.button_from_svgs
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_asset module
--------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_asset
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_filter\_proxy\_exact\_match module
------------------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_filter_proxy_exact_match
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_filter\_proxy\_recursive\_sort module
---------------------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_filter_proxy_recursive_sort
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_node module
-------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_node
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_tasks\_template module
------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_tasks_template
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_tree module
-------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_tree
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.model\_tree\_view\_deselectable module
---------------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.model_tree_view_deselectable
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_asset module
---------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_asset
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_asset\_view module
---------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_asset_view
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_component\_item module
-------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_component_item
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_components module
--------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_components
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_components\_list module
--------------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_components_list
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_drop\_empty module
---------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_drop_empty
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_drop\_frame module
---------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_drop_frame
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_family module
----------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_family
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_family\_desc module
----------------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_family_desc
:members:
:undoc-members:
:show-inheritance:
pype.standalonepublish.widgets.widget\_shadow module
----------------------------------------------------
.. automodule:: pype.standalonepublish.widgets.widget_shadow
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,36 @@
pype.tools.assetcreator package
===============================
.. automodule:: pype.tools.assetcreator
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.tools.assetcreator.app module
----------------------------------
.. automodule:: pype.tools.assetcreator.app
:members:
:undoc-members:
:show-inheritance:
pype.tools.assetcreator.model module
------------------------------------
.. automodule:: pype.tools.assetcreator.model
:members:
:undoc-members:
:show-inheritance:
pype.tools.assetcreator.widget module
-------------------------------------
.. automodule:: pype.tools.assetcreator.widget
:members:
:undoc-members:
:show-inheritance:

View file

@ -0,0 +1,15 @@
pype.tools package
==================
.. automodule:: pype.tools
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
pype.tools.assetcreator

View file

@ -0,0 +1,36 @@
pype.widgets package
====================
.. automodule:: pype.widgets
:members:
:undoc-members:
:show-inheritance:
Submodules
----------
pype.widgets.message\_window module
-----------------------------------
.. automodule:: pype.widgets.message_window
:members:
:undoc-members:
:show-inheritance:
pype.widgets.popup module
-------------------------
.. automodule:: pype.widgets.popup
:members:
:undoc-members:
:show-inheritance:
pype.widgets.project\_settings module
-------------------------------------
.. automodule:: pype.widgets.project_settings
:members:
:undoc-members:
:show-inheritance:

1
docs/source/readme.rst Normal file
View file

@ -0,0 +1 @@
.. include:: ../../README.md

46
make_docs.bat Normal file
View file

@ -0,0 +1,46 @@
@echo off
echo ^>^>^> Generating pype-setup documentation, please wait ...
call "C:\Users\Public\pype_env2\Scripts\activate.bat"
setlocal enableextensions enabledelayedexpansion
set _OLD_PYTHONPATH=%PYTHONPATH%
echo ^>^>^> Adding repos path
rem add stuff in repos
call :ResolvePath repodir "..\"
for /d %%d in ( %repodir%*) do (
echo - adding path %%d
set PYTHONPATH=%%d;!PYTHONPATH!
)
echo ^>^>^> Adding python vendors path
rem add python vendor paths
call :ResolvePath vendordir "..\..\vendor\python\"
for /d %%d in ( %vendordir%*) do (
echo - adding path %%d
set PYTHONPATH=%%d;!PYTHONPATH!
)
echo ^>^>^> Setting PYPE_CONFIG
call :ResolvePath pypeconfig "..\pype-config"
set PYPE_CONFIG=%pypeconfig%
echo ^>^>^> Setting PYPE_ROOT
call :ResolvePath pyperoot "..\..\"
set PYPE_ROOT=%pyperoot%
set PYTHONPATH=%PYPE_ROOT%;%PYTHONPATH%
echo ^>^>^> Setting PYPE_ENV
set PYPE_ENV="C:\Users\Public\pype_env2"
call "docs\make.bat" clean
sphinx-apidoc -M -f -d 6 --ext-autodoc --ext-intersphinx --ext-viewcode -o docs\source pype %PYPE_ROOT%\repos\pype\pype\vendor\*
call "docs\make.bat" html
echo ^>^>^> Doing cleanup ...
set PYTHONPATH=%_OLD_PYTHONPATH%
set PYPE_CONFIG=
call "C:\Users\Public\pype_env2\Scripts\deactivate.bat"
exit /b
:ResolvePath
set %1=%~dpfn2
exit /b

View file

@ -1,7 +1,2 @@
from .lib import *
from .ftrack_server import *
from .ftrack_module import FtrackModule
def tray_init(tray_widget, main_widget):
return FtrackModule(main_widget, tray_widget)

View file

@ -106,7 +106,7 @@ class SyncToAvalon(BaseAction):
for entity in self.importable:
ftracklib.avalon_check_name(entity)
if entity['name'] in all_names:
duplicates.append("'{}'".format(e['name']))
duplicates.append("'{}'".format(entity['name']))
else:
all_names.append(entity['name'])

View file

@ -1,8 +1,7 @@
from .ftrack_server import FtrackServer
from . import event_server, event_server_cli
from . import event_server_cli
__all__ = [
'event_server',
'event_server_cli',
'FtrackServer'
]

View file

@ -1,41 +0,0 @@
import sys
from pype.ftrack import credentials, login_dialog as login_dialog
from pype.ftrack.ftrack_server import FtrackServer
from Qt import QtWidgets
from pype import api
log = api.Logger().get_logger(__name__, "ftrack-event-server")
class EventServer:
def __init__(self):
self.login_widget = login_dialog.Login_Dialog_ui(
parent=self, is_event=True
)
self.event_server = FtrackServer('event')
cred = credentials._get_credentials(True)
if 'username' in cred and 'apiKey' in cred:
self.login_widget.user_input.setText(cred['username'])
self.login_widget.api_input.setText(cred['apiKey'])
self.login_widget.setError("Credentials should be for API User")
self.login_widget.show()
def loginChange(self):
log.info("Logged successfully")
self.login_widget.close()
self.event_server.run_server()
def main():
app = QtWidgets.QApplication(sys.argv)
event = EventServer()
sys.exit(app.exec_())
if (__name__ == ('__main__')):
main()

View file

@ -1,114 +1,241 @@
import os
import sys
import argparse
import requests
from pype.vendor import ftrack_api
from pype.ftrack import credentials
from pype.ftrack.ftrack_server import FtrackServer
from pypeapp import Logger
log = Logger().get_logger(__name__, "ftrack-event-server-cli")
possible_yes = ['y', 'yes']
possible_no = ['n', 'no']
possible_third = ['a', 'auto']
possible_exit = ['exit']
log = Logger().get_logger('Ftrack event server', "ftrack-event-server-cli")
def ask_yes_no(third=False):
msg = "Y/N:"
if third:
msg = "Y/N/AUTO:"
log.info(msg)
response = input().lower()
if response in possible_exit:
sys.exit()
elif response in possible_yes:
return True
elif response in possible_no:
def check_url(url):
if not url:
log.error('Ftrack URL is not set!')
return None
url = url.strip('/ ')
if 'http' not in url:
if url.endswith('ftrackapp.com'):
url = 'https://' + url
else:
url = 'https://{0}.ftrackapp.com'.format(url)
try:
result = requests.get(url, allow_redirects=False)
except requests.exceptions.RequestException:
log.error('Entered Ftrack URL is not accesible!')
return None
if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers):
log.error('Entered Ftrack URL is not accesible!')
return None
log.debug('Ftrack server {} is accessible.'.format(url))
return url
def validate_credentials(url, user, api):
first_validation = True
if not user:
log.error('Ftrack Username is not set! Exiting.')
first_validation = False
if not api:
log.error('Ftrack API key is not set! Exiting.')
first_validation = False
if not first_validation:
return False
else:
all_entries = possible_no
all_entries.extend(possible_yes)
if third is True:
if response in possible_third:
return 'auto'
else:
all_entries.extend(possible_third)
all_entries.extend(possible_exit)
all_entries = ', '.join(all_entries)
log.info(
'Invalid input. Possible entries: [{}]. Try it again:'.foramt(
all_entries
)
try:
session = ftrack_api.Session(
server_url=url,
api_user=user,
api_key=api
)
return ask_yes_no()
session.close()
except Exception as e:
log.error(
'Can\'t log into Ftrack with used credentials:'
' Ftrack server: "{}" // Username: {} // API key: {}'.format(
url, user, api
))
return False
log.debug('Credentials Username: "{}", API key: "{}" are valid.'.format(
user, api
))
return True
def cli_login():
enter_cred = True
cred_data = credentials._get_credentials(True)
def process_event_paths(event_paths):
log.debug('Processing event paths: {}.'.format(str(event_paths)))
return_paths = []
not_found = []
if not event_paths:
return return_paths, not_found
user = cred_data.get('username', None)
key = cred_data.get('apiKey', None)
auto = cred_data.get('auto_connect', False)
if user is None or key is None:
log.info(
'Credentials are not set. Do you want to enter them now? (Y/N)'
)
if ask_yes_no() is False:
log.info("Exiting...")
return
elif credentials._check_credentials(user, key):
if auto is False:
log.info((
'Do you want to log with username {}'
' enter "auto" if want to autoconnect next time (Y/N/AUTO)'
).format(
user
))
result = ask_yes_no(True)
if result is True:
enter_cred = False
elif result == 'auto':
credentials._save_credentials(user, key, True, True)
enter_cred = False
if isinstance(event_paths, str):
event_paths = event_paths.split(os.pathsep)
for path in event_paths:
if os.path.exists(path):
return_paths.append(path)
else:
enter_cred = False
else:
log.info(
'Stored credentials are not valid.'
' Do you want enter them now?(Y/N)'
)
if ask_yes_no() is False:
log.info("Exiting...")
return
not_found.append(path)
while enter_cred:
log.info('Please enter Ftrack API User:')
user = input()
log.info('And now enter Ftrack API Key:')
key = input()
if credentials._check_credentials(user, key):
log.info(
'Credentials are valid.'
' Do you want to auto-connect next time?(Y/N)'
)
credentials._save_credentials(user, key, True, ask_yes_no())
enter_cred = False
break
else:
log.info(
'Entered credentials are not valid.'
' Do you want to try it again?(Y/N)'
)
if ask_yes_no() is False:
log.info('Exiting...')
return
return os.pathsep.join(return_paths), not_found
def run_event_server(ftrack_url, username, api_key, event_paths):
os.environ['FTRACK_SERVER'] = ftrack_url
os.environ['FTRACK_API_USER'] = username
os.environ['FTRACK_API_KEY'] = api_key
os.environ['FTRACK_EVENTS_PATH'] = event_paths
server = FtrackServer('event')
server.run_server()
def main(argv):
'''
There are 4 values neccessary for event server:
1.) Ftrack url - "studio.ftrackapp.com"
2.) Username - "my.username"
3.) API key - "apikey-long11223344-6665588-5565"
4.) Path/s to events - "X:/path/to/folder/with/events"
def main():
cli_login()
All these values can be entered with arguments or environment variables.
- arguments:
"-ftrackurl {url}"
"-ftrackuser {username}"
"-ftrackapikey {api key}"
"-ftrackeventpaths {path to events}"
- environment variables:
FTRACK_SERVER
FTRACK_API_USER
FTRACK_API_KEY
FTRACK_EVENTS_PATH
Credentials (Username & API key):
- Credentials can be stored for auto load on next start
- To *Store/Update* these values add argument "-storecred"
- They will be stored to appsdir file when login is successful
- To *Update/Override* values with enviromnet variables is also needed to:
- *don't enter argument for that value*
- add argument "-noloadcred" (currently stored credentials won't be loaded)
Order of getting values:
1.) Arguments are always used when entered.
- entered values through args have most priority! (in each case)
2.) Credentials are tried to load from appsdir file.
- skipped when credentials were entered through args or credentials
are not stored yet
- can be skipped with "-noloadcred" argument
3.) Environment variables are last source of values.
- will try to get not yet set values from environments
Best practice:
- set environment variables FTRACK_SERVER & FTRACK_EVENTS_PATH
- launch event_server_cli with args:
~/event_server_cli.py -ftrackuser "{username}" -ftrackapikey "{API key}" -storecred
- next time launch event_server_cli.py only with set environment variables
FTRACK_SERVER & FTRACK_EVENTS_PATH
'''
parser = argparse.ArgumentParser(description='Ftrack event server')
parser.add_argument(
"-ftrackurl", type=str, metavar='FTRACKURL',
help=(
"URL to ftrack server where events should handle"
" (default from environment: $FTRACK_SERVER)"
)
)
parser.add_argument(
"-ftrackuser", type=str,
help=(
"Username should be the username of the user in ftrack"
" to record operations against."
" (default from environment: $FTRACK_API_USER)"
)
)
parser.add_argument(
"-ftrackapikey", type=str,
help=(
"Should be the API key to use for authentication"
" (default from environment: $FTRACK_API_KEY)"
)
)
parser.add_argument(
"-ftrackeventpaths", nargs='+',
help=(
"List of paths where events are stored."
" (default from environment: $FTRACK_EVENTS_PATH)"
)
)
parser.add_argument(
'-storecred',
help=(
"Entered credentials will be also stored"
" to apps dir for future usage"
),
action="store_true"
)
parser.add_argument(
'-noloadcred',
help="Load creadentials from apps dir",
action="store_true"
)
ftrack_url = os.environ.get('FTRACK_SERVER')
username = os.environ.get('FTRACK_API_USER')
api_key = os.environ.get('FTRACK_API_KEY')
event_paths = os.environ.get('FTRACK_EVENTS_PATH')
kwargs, args = parser.parse_known_args(argv)
if kwargs.ftrackurl:
ftrack_url = kwargs.ftrackurl
if kwargs.ftrackeventpaths:
event_paths = kwargs.ftrackeventpaths
if not kwargs.noloadcred:
cred = credentials._get_credentials(True)
username = cred.get('username')
api_key = cred.get('apiKey')
if kwargs.ftrackuser:
username = kwargs.ftrackuser
if kwargs.ftrackapikey:
api_key = kwargs.ftrackapikey
# Check url regex and accessibility
ftrack_url = check_url(ftrack_url)
if not ftrack_url:
return 1
# Validate entered credentials
if not validate_credentials(ftrack_url, username, api_key):
return 1
# Process events path
event_paths, not_found = process_event_paths(event_paths)
if not_found:
log.warning(
'These paths were not found: {}'.format(str(not_found))
)
if not event_paths:
if not_found:
log.error('Any of entered paths is valid or can be accesible.')
else:
log.error('Paths to events are not set. Exiting.')
return 1
if kwargs.storecred:
credentials._save_credentials(username, api_key, True)
run_event_server(ftrack_url, username, api_key, event_paths)
if (__name__ == ('__main__')):
main()
sys.exit(main(sys.argv))

View file

@ -1,4 +1,5 @@
from .avalon_sync import *
from .credentials import *
from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *

View file

@ -64,6 +64,10 @@ def _clear_credentials(event=False):
def _set_env(username, apiKey):
if not username:
username = ''
if not apiKey:
apiKey = ''
os.environ['FTRACK_API_USER'] = username
os.environ['FTRACK_API_KEY'] = apiKey

View file

@ -340,7 +340,9 @@ class AppAction(BaseHandler):
if next_status_name is not None:
try:
query = 'Status where name is "{}"'.format(next_status_name)
query = 'Status where name is "{}"'.format(
next_status_name
)
status = session.query(query).one()
entity['status'] = status
session.commit()

View file

@ -1,6 +1,6 @@
import functools
import time
from pype import api as pype
from pypeapp import Logger
from pype.vendor import ftrack_api
from pype.vendor.ftrack_api import session as fa_session
@ -31,7 +31,7 @@ class BaseHandler(object):
def __init__(self, session):
'''Expects a ftrack_api.Session instance'''
self._session = session
self.log = pype.Logger().get_logger(self.__class__.__name__)
self.log = Logger().get_logger(self.__class__.__name__)
# Using decorator
self.register = self.register_decorator(self.register)

View file

@ -0,0 +1,5 @@
from .ftrack_module import FtrackModule
def tray_init(tray_widget, main_widget):
return FtrackModule(main_widget, tray_widget)

View file

@ -6,7 +6,8 @@ from Qt import QtCore, QtGui, QtWidgets
from pype.vendor import ftrack_api
from pypeapp import style
from pype.ftrack import FtrackServer, credentials, login_dialog as login_dialog
from pype.ftrack import FtrackServer, credentials
from . import login_dialog
from pype import api as pype
@ -16,7 +17,6 @@ log = pype.Logger().get_logger("FtrackModule", "ftrack")
class FtrackModule:
def __init__(self, main_parent=None, parent=None):
self.parent = parent
self.widget_login = login_dialog.Login_Dialog_ui(self)
self.action_server = FtrackServer('action')

View file

@ -1,8 +1,9 @@
import os
import requests
from Qt import QtCore, QtGui, QtWidgets
from pypeapp import style
from . import credentials, login_tools
from pype.ftrack import credentials
from . import login_tools
from Qt import QtCore, QtGui, QtWidgets
class Login_Dialog_ui(QtWidgets.QWidget):

View file

@ -23,11 +23,19 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
project = os.environ.get('AVALON_PROJECT', '')
asset = os.environ.get('AVALON_ASSET', '')
task = os.environ.get('AVALON_TASK', '')
task = os.environ.get('AVALON_TASK', None)
self.log.debug(task)
result = session.query('Task where\
project.full_name is "{0}" and\
name is "{1}" and\
parent.name is "{2}"'.format(project, task, asset)).one()
if task:
result = session.query('Task where\
project.full_name is "{0}" and\
name is "{1}" and\
parent.name is "{2}"'.format(project, task, asset)).one()
context.data["ftrackTask"] = result
else:
result = session.query('TypedContext where\
project.full_name is "{0}" and\
name is "{1}"'.format(project, asset)).one()
context.data["ftrackEntity"] = result
context.data["ftrackTask"] = result
self.log.info(result)

View file

@ -139,7 +139,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
}
if task:
assetversion_data['task'] = task
assetversion_data.update(data.get("assetversion_data", {}))
assetversion_entity = session.query(

View file

@ -31,60 +31,50 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
def process(self, instance):
self.log.debug('instance {}'.format(instance))
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
version_number = int(assumed_version)
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
family = instance.data['family'].lower()
asset_type = ''
asset_type = ''
asset_type = self.family_mapping[family]
componentList = []
dst_list = instance.data['destination_list']
ft_session = instance.context.data["ftrackSession"]
for file in instance.data['destination_list']:
self.log.debug('file {}'.format(file))
for comp in instance.data['representations']:
self.log.debug('component {}'.format(comp))
for file in dst_list:
filename, ext = os.path.splitext(file)
self.log.debug('dest ext: ' + ext)
thumbnail = False
if ext in ['.mov']:
if not instance.data.get('startFrameReview'):
instance.data['startFrameReview'] = instance.data['startFrame']
if not instance.data.get('endFrameReview'):
instance.data['endFrameReview'] = instance.data['endFrame']
if comp.get('thumbnail'):
location = ft_session.query(
'Location where name is "ftrack.server"').one()
component_data = {
"name": "thumbnail" # Default component name is "main".
}
elif comp.get('preview'):
if not comp.get('startFrameReview'):
comp['startFrameReview'] = comp['startFrame']
if not comp.get('endFrameReview'):
comp['endFrameReview'] = comp['endFrame']
location = ft_session.query(
'Location where name is "ftrack.server"').one()
component_data = {
# Default component name is "main".
"name": "ftrackreview-mp4",
"metadata": {'ftr_meta': json.dumps({
'frameIn': int(instance.data['startFrameReview']),
'frameOut': int(instance.data['startFrameReview']),
'frameRate': 25})}
'frameIn': int(comp['startFrameReview']),
'frameOut': int(comp['endFrameReview']),
'frameRate': comp['frameRate']})}
}
elif ext in [".jpg", ".jpeg"]:
component_data = {
"name": "thumbnail" # Default component name is "main".
}
thumbnail = True
location = ft_session.query(
'Location where name is "ftrack.server"').one()
comp['thumbnail'] = False
else:
component_data = {
"name": ext[1:] # Default component name is "main".
"name": comp['name']
}
location = ft_session.query(
'Location where name is "ftrack.unmanaged"').one()
comp['thumbnail'] = False
self.log.debug('location {}'.format(location))
@ -98,10 +88,10 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"version": version_number,
},
"component_data": component_data,
"component_path": file,
"component_path": comp['published_path'],
'component_location': location,
"component_overwrite": False,
"thumbnail": thumbnail
"thumbnail": comp['thumbnail']
}
)

View file

@ -1,85 +0,0 @@
import pyblish.api
import os
import clique
import json
class IntegrateFtrackReview(pyblish.api.InstancePlugin):
"""Collect ftrack component data
Add ftrack component list to instance.
"""
order = pyblish.api.IntegratorOrder + 0.48
label = 'Integrate Ftrack Review'
families = ['review', 'ftrack']
family_mapping = {'review': 'mov'
}
def process(self, instance):
self.log.debug('instance {}'.format(instance))
#
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# version_number = int(assumed_version)
# family = instance.data['family'].lower()
# asset_type = ''
#
# asset_type = self.family_mapping[family]
#
# componentList = []
#
# dst_list = instance.data['destination_list']
#
# ft_session = instance.context.data["ftrackSession"]
#
#
# for file in instance.data['destination_list']:
# self.log.debug('file {}'.format(file))
#
# for file in dst_list:
# filename, ext = os.path.splitext(file)
# self.log.debug('dest ext: ' + ext)
#
# if ext == '.mov':
# component_name = "ftrackreview-mp4"
# metadata = {'ftr_meta': json.dumps({
# 'frameIn': int(instance.data["startFrame"]),
# 'frameOut': int(instance.data["startFrame"]),
# 'frameRate': 25})}
# thumbnail = False
#
# else:
# component_name = "thumbnail"
# thumbnail = True
#
# location = ft_session.query(
# 'Location where name is "ftrack.server"').one()
#
# componentList.append({"assettype_data": {
# "short": asset_type,
# },
# "asset_data": {
# "name": instance.data["subset"],
# },
# "assetversion_data": {
# "version": version_number,
# },
# "component_data": {
# "name": component_name, # Default component name is "main".
# "metadata": metadata
# },
# "component_path": file,
# 'component_location': location,
# "component_overwrite": False,
# "thumbnail": thumbnail
# }
# )
#
#
# self.log.debug('componentsList: {}'.format(str(componentList)))
# instance.data["ftrackComponentsList"] = componentList

View file

@ -1,6 +1,7 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
@ -24,25 +25,10 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = ["animation",
"camera",
"look",
"mayaAscii",
"model",
"pointcache",
"vdbcache",
"setdress",
families = ["look",
"assembly",
"layout",
"rig",
"vrayproxy",
"yetiRig",
"yeticache",
"nukescript",
"review",
"workfile",
"scene",
"ass"]
"yeticache"]
exclude_families = ["clip"]
def process(self, instance):
@ -53,7 +39,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
self.integrate(instance)
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables

View file

@ -1,14 +1,10 @@
import os
import pyblish.api
from avalon import (
io,
api as avalon
)
from avalon import io
import json
import logging
import clique
log = logging.getLogger("collector")
@ -16,16 +12,11 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"""
Collecting temp json data sent from a host context
and path for returning json data back to hostself.
Setting avalon session into correct context
Args:
context (obj): pyblish context session
"""
label = "Collect Context - SA Publish"
order = pyblish.api.CollectorOrder - 0.49
hosts = ["shell"]
def process(self, context):
# get json paths from os and load them
@ -33,13 +24,12 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
input_json_path = os.environ.get("SAPUBLISH_INPATH")
output_json_path = os.environ.get("SAPUBLISH_OUTPATH")
context.data["stagingDir"] = os.path.dirname(input_json_path)
# context.data["stagingDir"] = os.path.dirname(input_json_path)
context.data["returnJsonPath"] = output_json_path
with open(input_json_path, "r") as f:
in_data = json.load(f)
project_name = in_data['project']
asset_name = in_data['asset']
family = in_data['family']
subset = in_data['subset']
@ -63,25 +53,24 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"families": [family, 'ftrack'],
})
self.log.info("collected instance: {}".format(instance.data))
self.log.info("parsing data: {}".format(in_data))
instance.data["files"] = list()
instance.data['destination_list'] = list()
instance.data['representations'] = list()
instance.data['source'] = 'standalone publisher'
for component in in_data['representations']:
# instance.add(node)
component['destination'] = component['files']
collections, remainder = clique.assemble(component['files'])
if collections:
self.log.debug(collections)
instance.data['startFrame'] = component['startFrame']
instance.data['endFrame'] = component['endFrame']
instance.data['frameRate'] = component['frameRate']
instance.data["files"].append(component)
component['destination'] = component['files']
component['stagingDir'] = component['stagingDir']
component['anatomy_template'] = 'render'
if isinstance(component['files'], list):
collections, remainder = clique.assemble(component['files'])
self.log.debug("collecting sequence: {}".format(collections))
instance.data['startFrame'] = int(component['startFrame'])
instance.data['endFrame'] = int(component['endFrame'])
instance.data['frameRate'] = int(component['frameRate'])
instance.data["representations"].append(component)
# "is_thumbnail": component['thumbnail'],
# "is_preview": component['preview']
self.log.info(in_data)

View file

@ -179,7 +179,6 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"files": [list(collection)],
"startFrame": start,
"endFrame": end,
"fps": fps,
@ -187,6 +186,14 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
})
instance.append(collection)
representation = {
'name': 'jpg',
'ext': '.jpg',
'files': [list(collection)],
"stagingDir": root,
}
instance.data["representations"] = [representation]
if data.get('user'):
context.data["user"] = data['user']

View file

@ -62,6 +62,13 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
sub_proc = subprocess.Popen(subprocess_jpeg)
sub_proc.wait()
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(jpegFile)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'jpg',
'ext': '.jpg',
'files': jpegFile,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)

View file

@ -70,6 +70,13 @@ class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
sub_proc = subprocess.Popen(subprocess_mov)
sub_proc.wait()
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(movFile)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': '.mov',
'files': movFile,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)

View file

@ -51,6 +51,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"setdress",
"layout",
"ass",
"vdbcache",
"scene",
"vrayproxy",
"render",
"imagesequence",
"review",
"nukescript",
"render",
"write",
"plates"
]
exclude_families = ["clip"]
@ -153,10 +162,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
version_data = self.create_version_data(context, instance)
repr_data = repres[-1].get('data')
version_data_instance = instance.data.get('versionData')
if repr_data:
version_data.update(repr_data)
if version_data_instance:
version_data.update(version_data_instance)
version = self.create_version(subset=subset,
version_number=next_version,
@ -164,9 +173,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
instance.data['version'] = version['name']
# Write to disk
@ -230,10 +237,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template = os.path.normpath(
anatomy.templates[template_name]["path"])
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
self.log.debug(
"dst_collections: {}".format(str(src_collections)))
"src_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
@ -242,16 +250,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(
os.path.normpath(
anatomy_filled[template_name]["path"])
)
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
@ -267,11 +274,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
src = os.path.join(stagingdir, src_file_name)
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
hashes = '#' * len(dst_padding)
dst = "{0}{1}{2}".format(dst_head, hashes, dst_tail)
# for imagesequence version data
hashes = '#' * len(dst_padding)
dst = "{0}{1}{2}".format(dst_head, hashes, dst_tail)
else:
# Single file
@ -297,17 +308,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst = anatomy_filled[template_name]["path"]
instance.data["transfers"].append([src, dst])
# template = anatomy.templates["publish"]["path"]
repres[idx]['published_path'] = dst
data = {'path': dst, 'template': template}
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": repre['name'],
"data": data,
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
@ -335,6 +343,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("Representation: {}".format(representations))
self.log.info("Registered {} items".format(len(representations)))
def integrate(self, instance):
"""Move the files
@ -379,6 +388,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):

View file

@ -24,7 +24,7 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["imagesequence", "render", "write", "source"]
families = ["imagesequence", "source"]
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
exclude_families = ["clip"]

View file

@ -20,7 +20,7 @@ class ExtractAlembic(pype.api.Extractor):
# Get the filename from the filename parameter
output = ropnode.evalParm("filename")
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
# instance.data["stagingDir"] = staging_dir
file_name = os.path.basename(output)
@ -37,7 +37,13 @@ class ExtractAlembic(pype.api.Extractor):
traceback.print_exc()
raise RuntimeError("Render failed: {0}".format(exc))
if "files" not in instance.data:
instance.data["files"] = []
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(file_name)
representation = {
'name': 'abc',
'ext': '.abc',
'files': file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)

View file

@ -35,9 +35,15 @@ class ExtractVDBCache(pype.api.Extractor):
traceback.print_exc()
raise RuntimeError("Render failed: {0}".format(exc))
if "files" not in instance.data:
instance.data["files"] = []
output = instance.data["frames"]
instance.data["files"].append(output)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': '.mov',
'files': output,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)

View file

@ -428,6 +428,8 @@ class CollectLook(pyblish.api.InstancePlugin):
computed_attribute = attribute
source = cmds.getAttr(attribute)
color_space_attr = "{}.colorSpace".format(node)
color_space = cmds.getAttr(color_space_attr)
# Compare with the computed file path, e.g. the one with the <UDIM>
# pattern in it, to generate some logging information about this
# difference
@ -453,4 +455,5 @@ class CollectLook(pyblish.api.InstancePlugin):
return {"node": node,
"attribute": attribute,
"source": source, # required for resources
"files": files} # required for resources
"files": files,
"color_space": color_space} # required for resources

View file

@ -37,16 +37,19 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"label": subset,
"publish": False,
"family": 'workfile',
"representation": "ma",
"setMembers": [current_file],
"stagingDir": folder
"setMembers": [current_file]
})
data['files'] = [file]
data['representations'] = [{
'name': 'ma',
'ext': '.ma',
'files': file,
"stagingDir": folder,
}]
instance.data.update(data)
self.log.info('Collected instance: {}'.format(file))
self.log.info('Scene path: {}'.format(current_file))
self.log.info('stagin Dir: {}'.format(folder))
self.log.info('subset: {}'.format(filename))
self.log.info('staging Dir: {}'.format(folder))
self.log.info('subset: {}'.format(subset))

View file

@ -77,9 +77,15 @@ class ExtractAnimation(pype.api.Extractor):
endFrame=end,
**options)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'abc',
'ext': '.abc',
'files': filename,
"stagingDir": dirname,
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -21,8 +21,8 @@ class ExtractAssStandin(pype.api.Extractor):
def process(self, instance):
staging_dir = self.staging_dir(instance)
file_name = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
filename = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, filename)
# Write out .ass file
self.log.info("Writing: '%s'" % file_path)
@ -37,11 +37,16 @@ class ExtractAssStandin(pype.api.Extractor):
boundingBox=True
)
if "representations" not in instance.data:
instance.data["representations"] = []
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(file_name)
representation = {
'name': 'ass',
'ext': '.ass',
'files': filename,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
% (instance.name, staging_dir))

View file

@ -33,7 +33,6 @@ class ExtractAssProxy(pype.api.Extractor):
else:
yield
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
@ -64,10 +63,15 @@ class ExtractAssProxy(pype.api.Extractor):
expressions=False,
constructionHistory=False)
if "representations" not in instance.data:
instance.data["representations"] = []
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
representation = {
'name': 'ma',
'ext': '.ma',
'files': filename,
"stagingDir": stagingdir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -70,10 +70,16 @@ class ExtractCameraAlembic(pype.api.Extractor):
with avalon.maya.suspended_refresh():
cmds.AbcExport(j=job_str, verbose=False)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'abc',
'ext': '.abc',
'files': filename,
"stagingDir": dir_path,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -168,10 +168,16 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
massage_ma_file(path)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'ma',
'ext': '.ma',
'files': filename,
"stagingDir": dir_path,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -146,9 +146,9 @@ class ExtractFBX(pype.api.Extractor):
cmds.loadPlugin("fbxmaya", quiet=True)
# Define output path
directory = self.staging_dir(instance)
stagingDir = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(directory, filename)
path = os.path.join(stagingDir, filename)
# The export requires forward slashes because we need
# to format it into a string in a mel expression
@ -208,9 +208,16 @@ class ExtractFBX(pype.api.Extractor):
cmds.select(members, r=1, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': '.mov',
'files': filename,
"stagingDir": stagingDir,
}
instance.data["representations"].append(representation)
instance.data["files"].append(filename)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -63,14 +63,18 @@ def maketx(source, destination, *args):
"""
cmd = [
"maketx",
"-v", # verbose
"-u", # update mode
# unpremultiply before conversion (recommended when alpha present)
"--unpremult",
# use oiio-optimized settings for tile-size, planarconfig, metadata
"--oiio"
]
"maketx",
"-v", # verbose
"-u", # update mode
# unpremultiply before conversion (recommended when alpha present)
"--unpremult",
"--checknan",
# use oiio-optimized settings for tile-size, planarconfig, metadata
"--oiio",
"--colorconvert sRGB linear",
"--filter lanczos3"
]
cmd.extend(args)
cmd.extend([
"-o", destination,
@ -85,8 +89,7 @@ def maketx(source, destination, *args):
creationflags=CREATE_NO_WINDOW
)
except subprocess.CalledProcessError as exc:
print exc
print out
print(exc)
import traceback
traceback.print_exc()
raise
@ -169,16 +172,33 @@ class ExtractLook(pype.api.Extractor):
# Collect all unique files used in the resources
files = set()
files_metadata = dict()
for resource in resources:
files.update(os.path.normpath(f) for f in resource["files"])
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
# ensure after context it's still the original value.
color_space = resource.get('color_space')
for f in resource["files"]:
files_metadata[os.path.normpath(f)] = {'color_space': color_space}
# files.update(os.path.normpath(f))
# Process the resource files
transfers = list()
hardlinks = list()
hashes = dict()
for filepath in files:
self.log.info(files)
for filepath in files_metadata:
cspace = files_metadata[filepath]['color_space']
linearise = False
if cspace == 'sRGB':
linearise = True
source, mode, hash = self._process_texture(
filepath, do_maketx, staging=dir_path
filepath, do_maketx, staging=dir_path, linearise=linearise
)
destination = self.resource_destination(
instance, source, do_maketx
@ -204,15 +224,17 @@ class ExtractLook(pype.api.Extractor):
instance, source, do_maketx
)
# Remap file node filename to destination
attr = resource['attribute']
remap[attr] = destinations[source]
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
# ensure after context it's still the original value.
color_space_attr = resource['node'] + ".colorSpace"
remap[color_space_attr] = cmds.getAttr(color_space_attr)
color_space = cmds.getAttr(color_space_attr)
# Remap file node filename to destination
attr = resource['attribute']
remap[attr] = destinations[source]
remap[color_space_attr] = color_space
self.log.info("Finished remapping destinations ...")
@ -285,7 +307,7 @@ class ExtractLook(pype.api.Extractor):
basename + ext
)
def _process_texture(self, filepath, do_maketx, staging):
def _process_texture(self, filepath, do_maketx, staging, linearise):
"""Process a single texture file on disk for publishing.
This will:
1. Check whether it's already published, if so it will do hardlink
@ -326,6 +348,11 @@ class ExtractLook(pype.api.Extractor):
fname + ".tx"
)
if linearise:
colorconvert = "--colorconvert sRGB linear"
else:
colorconvert = ""
# Ensure folder exists
if not os.path.exists(os.path.dirname(converted)):
os.makedirs(os.path.dirname(converted))
@ -333,7 +360,7 @@ class ExtractLook(pype.api.Extractor):
self.log.info("Generating .tx file for %s .." % filepath)
maketx(filepath, converted,
# Include `source-hash` as string metadata
"-sattrib", "sourceHash", texture_hash)
"-sattrib", "sourceHash", texture_hash, colorconvert)
return converted, COPY, texture_hash

View file

@ -51,9 +51,15 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
constraints=True,
expressions=True)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'ma',
'ext': '.ma',
'files': filename,
"stagingDir": dir_path
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -69,9 +69,15 @@ class ExtractModel(pype.api.Extractor):
# Store reference for integration
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'ma',
'ext': '.ma',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -79,9 +79,15 @@ class ExtractAlembic(pype.api.Extractor):
endFrame=end,
**options)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'abc',
'ext': '.abc',
'files': filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -123,9 +123,21 @@ class ExtractQuicktime(pype.api.Extractor):
self.log.error(ffmpeg_error)
raise RuntimeError(ffmpeg_error)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(movieFile)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': '.mov',
'files': movieFile,
"stagingDir": stagingdir,
'startFrame': start,
'endFrame': end,
'frameRate': fps,
'preview': True
}
instance.data["representations"].append(representation)
@contextlib.contextmanager

View file

@ -34,9 +34,16 @@ class ExtractRig(pype.api.Extractor):
expressions=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ma',
'ext': '.ma',
'files': filename,
"stagingDir": dir_path
}
instance.data["representations"].append(representation)
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -110,9 +110,9 @@ class ExtractThumbnail(pype.api.Extractor):
"depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)),
}
stagingdir = self.staging_dir(instance)
stagingDir = self.staging_dir(instance)
filename = "{0}".format(instance.name)
path = os.path.join(stagingdir, filename)
path = os.path.join(stagingDir, filename)
self.log.info("Outputting images to %s" % path)
@ -131,51 +131,19 @@ class ExtractThumbnail(pype.api.Extractor):
_, thumbnail = os.path.split(playblast)
self.log.info("file list {}".format(thumbnail))
# self.log.info("Calculating HUD data overlay")
# stagingdir = "C:/Users/milan.kolar/AppData/Local/Temp/pyblish_tmp_ucsymm"
# collected_frames = os.listdir(stagingdir)
# collections, remainder = clique.assemble(collected_frames)
# input_path = os.path.join(stagingdir, collections[0].format('{head}{padding}{tail}'))
# self.log.info("input {}".format(input_path))
if "representations" not in instance.data:
instance.data["representations"] = []
# movieFile = filename + ".mov"
# full_movie_path = os.path.join(stagingdir, movieFile)
# self.log.info("output {}".format(full_movie_path))
# fls = [os.path.join(stagingdir, filename).replace("\\","/") for f in os.listdir( dir_path ) if f.endswith(preset['compression'])]
# self.log.info("file list {}}".format(fls[0]))
representation = {
'name': 'thumbnail',
'ext': '.jpg',
'files': thumbnail,
"stagingDir": stagingDir,
"thumbnail": True
}
instance.data["representations"].append(representation)
# out, err = (
# ffmpeg
# .input(input_path, framerate=25)
# .output(full_movie_path)
# .run(overwrite_output=True)
# )
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(thumbnail)
# ftrackStrings = fStrings.annotationData()
# nData = ftrackStrings.niceData
# nData['version'] = instance.context.data('version')
# fFrame = int(pm.playbackOptions( q = True, minTime = True))
# eFrame = int(pm.playbackOptions( q = True, maxTime = True))
# nData['frame'] = [(str("{0:05d}".format(f))) for f in range(fFrame, eFrame + 1)]
# soundOfst = int(float(nData['oFStart'])) - int(float(nData['handle'])) - fFrame
# soundFile = mu.giveMePublishedAudio()
# self.log.info("SOUND offset %s" % str(soundOfst))
# self.log.info("SOUND source video to %s" % str(soundFile))
# ann = dHUD.draftAnnotate()
# if soundFile:
# ann.addAnotation(seqFls = fls, outputMoviePth = movieFullPth, annotateDataArr = nData, soundFile = soundFile, soundOffset = soundOfst)
# else:
# ann.addAnotation(seqFls = fls, outputMoviePth = movieFullPth, annotateDataArr = nData)
# for f in fls:
# os.remove(f)
# playblast = (ann.expPth).replace("\\","/")
@contextlib.contextmanager

View file

@ -54,10 +54,16 @@ class ExtractVRayProxy(pype.api.Extractor):
ignoreHiddenObjects=True,
createProxyNode=False)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(file_name)
representation = {
'name': 'vrmesh',
'ext': '.vrmesh',
'files': file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
% (instance.name, staging_dir))

View file

@ -1,228 +1,226 @@
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
import math
import maya.api.OpenMaya as om
from pymel.core import polyUVSet
import pymel.core as pm
class GetOverlappingUVs(object):
def _createBoundingCircle(self, meshfn):
""" Represent a face by center and radius
def _createBoundingCircle(self, meshfn):
""" Represent a face by center and radius
:param meshfn: MFnMesh class
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
:returns: (center, radius)
:rtype: tuple
"""
center = []
radius = []
for i in xrange(meshfn.numPolygons): # noqa: F821
# get uvs from face
uarray = []
varray = []
for j in range(len(meshfn.getPolygonVertices(i))):
uv = meshfn.getPolygonUV(i, j)
uarray.append(uv[0])
varray.append(uv[1])
# loop through all vertices to construct edges/rays
cu = 0.0
cv = 0.0
for j in range(len(uarray)):
cu += uarray[j]
cv += varray[j]
cu /= len(uarray)
cv /= len(varray)
rsqr = 0.0
for j in range(len(varray)):
du = uarray[j] - cu
dv = varray[j] - cv
dsqr = du * du + dv * dv
rsqr = dsqr if dsqr > rsqr else rsqr
center.append(cu)
center.append(cv)
radius.append(math.sqrt(rsqr))
return center, radius
def _createRayGivenFace(self, meshfn, faceId):
""" Represent a face by a series of edges(rays), i.e.
:param meshfn: MFnMesh class
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
:param faceId: face id
:type faceId: int
:returns: False if no valid uv's.
""(True, orig, vec)"" or ""(False, None, None)""
:rtype: tuple
.. code-block:: python
orig = [orig1u, orig1v, orig2u, orig2v, ... ]
vec = [vec1u, vec1v, vec2u, vec2v, ... ]
"""
orig = []
vec = []
# get uvs
:param meshfn: MFnMesh class
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
:returns: (center, radius)
:rtype: tuple
"""
center = []
radius = []
for i in xrange(meshfn.numPolygons): # noqa: F821
# get uvs from face
uarray = []
varray = []
for i in range(len(meshfn.getPolygonVertices(faceId))):
uv = meshfn.getPolygonUV(faceId, i)
for j in range(len(meshfn.getPolygonVertices(i))):
uv = meshfn.getPolygonUV(i, j)
uarray.append(uv[0])
varray.append(uv[1])
if len(uarray) == 0 or len(varray) == 0:
return (False, None, None)
# loop through all vertices to construct edges/rays
cu = 0.0
cv = 0.0
for j in range(len(uarray)):
cu += uarray[j]
cv += varray[j]
# loop throught all vertices to construct edges/rays
u = uarray[-1]
v = varray[-1]
for i in xrange(len(uarray)): # noqa: F821
orig.append(uarray[i])
orig.append(varray[i])
vec.append(u - uarray[i])
vec.append(v - varray[i])
u = uarray[i]
v = varray[i]
cu /= len(uarray)
cv /= len(varray)
rsqr = 0.0
for j in range(len(varray)):
du = uarray[j] - cu
dv = varray[j] - cv
dsqr = du * du + dv * dv
rsqr = dsqr if dsqr > rsqr else rsqr
return (True, orig, vec)
center.append(cu)
center.append(cv)
radius.append(math.sqrt(rsqr))
def _checkCrossingEdges(self,
face1Orig,
face1Vec,
face2Orig,
face2Vec):
""" Check if there are crossing edges between two faces.
Return True if there are crossing edges and False otherwise.
return center, radius
:param face1Orig: origin of face 1
:type face1Orig: tuple
:param face1Vec: face 1 edges
:type face1Vec: list
:param face2Orig: origin of face 2
:type face2Orig: tuple
:param face2Vec: face 2 edges
:type face2Vec: list
def _createRayGivenFace(self, meshfn, faceId):
""" Represent a face by a series of edges(rays), i.e.
A face is represented by a series of edges(rays), i.e.
.. code-block:: python
:param meshfn: MFnMesh class
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
:param faceId: face id
:type faceId: int
:returns: False if no valid uv's.
""(True, orig, vec)"" or ""(False, None, None)""
:rtype: tuple
faceOrig[] = [orig1u, orig1v, orig2u, orig2v, ... ]
faceVec[] = [vec1u, vec1v, vec2u, vec2v, ... ]
"""
face1Size = len(face1Orig)
face2Size = len(face2Orig)
for i in xrange(0, face1Size, 2): # noqa: F821
o1x = face1Orig[i]
o1y = face1Orig[i+1]
v1x = face1Vec[i]
v1y = face1Vec[i+1]
n1x = v1y
n1y = -v1x
for j in xrange(0, face2Size, 2): # noqa: F821
# Given ray1(O1, V1) and ray2(O2, V2)
# Normal of ray1 is (V1.y, V1.x)
o2x = face2Orig[j]
o2y = face2Orig[j+1]
v2x = face2Vec[j]
v2y = face2Vec[j+1]
n2x = v2y
n2y = -v2x
.. code-block:: python
# Find t for ray2
# t = [(o1x-o2x)n1x + (o1y-o2y)n1y] /
# (v2x * n1x + v2y * n1y)
denum = v2x * n1x + v2y * n1y
# Edges are parallel if denum is close to 0.
if math.fabs(denum) < 0.000001:
continue
t2 = ((o1x-o2x) * n1x + (o1y-o2y) * n1y) / denum
if (t2 < 0.00001 or t2 > 0.99999):
continue
orig = [orig1u, orig1v, orig2u, orig2v, ... ]
vec = [vec1u, vec1v, vec2u, vec2v, ... ]
"""
orig = []
vec = []
# get uvs
uarray = []
varray = []
for i in range(len(meshfn.getPolygonVertices(faceId))):
uv = meshfn.getPolygonUV(faceId, i)
uarray.append(uv[0])
varray.append(uv[1])
# Find t for ray1
# t = [(o2x-o1x)n2x
# + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y)
denum = v1x * n2x + v1y * n2y
# Edges are parallel if denum is close to 0.
if math.fabs(denum) < 0.000001:
continue
t1 = ((o2x-o1x) * n2x + (o2y-o1y) * n2y) / denum
if len(uarray) == 0 or len(varray) == 0:
return (False, None, None)
# Edges intersect
if (t1 > 0.00001 and t1 < 0.99999):
return 1
# loop throught all vertices to construct edges/rays
u = uarray[-1]
v = varray[-1]
for i in xrange(len(uarray)): # noqa: F821
orig.append(uarray[i])
orig.append(varray[i])
vec.append(u - uarray[i])
vec.append(v - varray[i])
u = uarray[i]
v = varray[i]
return 0
return (True, orig, vec)
def _getOverlapUVFaces(self, meshName):
""" Return overlapping faces
def _checkCrossingEdges(self,
face1Orig,
face1Vec,
face2Orig,
face2Vec):
""" Check if there are crossing edges between two faces.
Return True if there are crossing edges and False otherwise.
:param meshName: name of mesh
:type meshName: str
:returns: list of overlapping faces
:rtype: list
"""
faces = []
# find polygon mesh node
selList = om.MSelectionList()
selList.add(meshName)
mesh = selList.getDependNode(0)
if mesh.apiType() == om.MFn.kTransform:
dagPath = selList.getDagPath(0)
dagFn = om.MFnDagNode(dagPath)
child = dagFn.child(0)
if child.apiType() != om.MFn.kMesh:
raise Exception("Can't find polygon mesh")
mesh = child
meshfn = om.MFnMesh(mesh)
:param face1Orig: origin of face 1
:type face1Orig: tuple
:param face1Vec: face 1 edges
:type face1Vec: list
:param face2Orig: origin of face 2
:type face2Orig: tuple
:param face2Vec: face 2 edges
:type face2Vec: list
center, radius = self._createBoundingCircle(meshfn)
for i in xrange(meshfn.numPolygons): # noqa: F821
rayb1, face1Orig, face1Vec = self._createRayGivenFace(
meshfn, i)
if not rayb1:
A face is represented by a series of edges(rays), i.e.
.. code-block:: python
faceOrig[] = [orig1u, orig1v, orig2u, orig2v, ... ]
faceVec[] = [vec1u, vec1v, vec2u, vec2v, ... ]
"""
face1Size = len(face1Orig)
face2Size = len(face2Orig)
for i in xrange(0, face1Size, 2): # noqa: F821
o1x = face1Orig[i]
o1y = face1Orig[i+1]
v1x = face1Vec[i]
v1y = face1Vec[i+1]
n1x = v1y
n1y = -v1x
for j in xrange(0, face2Size, 2): # noqa: F821
# Given ray1(O1, V1) and ray2(O2, V2)
# Normal of ray1 is (V1.y, V1.x)
o2x = face2Orig[j]
o2y = face2Orig[j+1]
v2x = face2Vec[j]
v2y = face2Vec[j+1]
n2x = v2y
n2y = -v2x
# Find t for ray2
# t = [(o1x-o2x)n1x + (o1y-o2y)n1y] /
# (v2x * n1x + v2y * n1y)
denum = v2x * n1x + v2y * n1y
# Edges are parallel if denum is close to 0.
if math.fabs(denum) < 0.000001:
continue
t2 = ((o1x-o2x) * n1x + (o1y-o2y) * n1y) / denum
if (t2 < 0.00001 or t2 > 0.99999):
continue
cui = center[2*i]
cvi = center[2*i+1]
ri = radius[i]
# Exclude the degenerate face
# if(area(face1Orig) < 0.000001) continue;
# Loop through face j where j != i
for j in range(i+1, meshfn.numPolygons):
cuj = center[2*j]
cvj = center[2*j+1]
rj = radius[j]
du = cuj - cui
dv = cvj - cvi
dsqr = du * du + dv * dv
# Quick rejection if bounding circles don't overlap
if (dsqr >= (ri + rj) * (ri + rj)):
continue
rayb2, face2Orig, face2Vec = self._createRayGivenFace(
meshfn, j)
if not rayb2:
continue
# Exclude the degenerate face
# if(area(face2Orig) < 0.000001): continue;
if self._checkCrossingEdges(face1Orig,
face1Vec,
face2Orig,
face2Vec):
face1 = '%s.f[%d]' % (meshfn.name(), i)
face2 = '%s.f[%d]' % (meshfn.name(), j)
if face1 not in faces:
faces.append(face1)
if face2 not in faces:
faces.append(face2)
return faces
# Find t for ray1
# t = [(o2x-o1x)n2x
# + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y)
denum = v1x * n2x + v1y * n2y
# Edges are parallel if denum is close to 0.
if math.fabs(denum) < 0.000001:
continue
t1 = ((o2x-o1x) * n2x + (o2y-o1y) * n2y) / denum
# Edges intersect
if (t1 > 0.00001 and t1 < 0.99999):
return 1
return 0
def _getOverlapUVFaces(self, meshName):
""" Return overlapping faces
:param meshName: name of mesh
:type meshName: str
:returns: list of overlapping faces
:rtype: list
"""
faces = []
# find polygon mesh node
selList = om.MSelectionList()
selList.add(meshName)
mesh = selList.getDependNode(0)
if mesh.apiType() == om.MFn.kTransform:
dagPath = selList.getDagPath(0)
dagFn = om.MFnDagNode(dagPath)
child = dagFn.child(0)
if child.apiType() != om.MFn.kMesh:
raise Exception("Can't find polygon mesh")
mesh = child
meshfn = om.MFnMesh(mesh)
center, radius = self._createBoundingCircle(meshfn)
for i in xrange(meshfn.numPolygons): # noqa: F821
rayb1, face1Orig, face1Vec = self._createRayGivenFace(
meshfn, i)
if not rayb1:
continue
cui = center[2*i]
cvi = center[2*i+1]
ri = radius[i]
# Exclude the degenerate face
# if(area(face1Orig) < 0.000001) continue;
# Loop through face j where j != i
for j in range(i+1, meshfn.numPolygons):
cuj = center[2*j]
cvj = center[2*j+1]
rj = radius[j]
du = cuj - cui
dv = cvj - cvi
dsqr = du * du + dv * dv
# Quick rejection if bounding circles don't overlap
if (dsqr >= (ri + rj) * (ri + rj)):
continue
rayb2, face2Orig, face2Vec = self._createRayGivenFace(
meshfn, j)
if not rayb2:
continue
# Exclude the degenerate face
# if(area(face2Orig) < 0.000001): continue;
if self._checkCrossingEdges(face1Orig,
face1Vec,
face2Orig,
face2Vec):
face1 = '%s.f[%d]' % (meshfn.name(), i)
face2 = '%s.f[%d]' % (meshfn.name(), j)
if face1 not in faces:
faces.append(face1)
if face2 not in faces:
faces.append(face2)
return faces
class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
@ -241,7 +239,7 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
optional = True
@classmethod
def _has_overlapping_uvs(cls, node):
def _get_overlapping_uvs(cls, node):
""" Check if mesh has overlapping UVs.
:param node: node to check
@ -251,27 +249,32 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
"""
ovl = GetOverlappingUVs()
for i, uv in enumerate(polyUVSet(node, q=1, auv=1)):
polyUVSet(node, cuv=1, uvSet=uv)
of = ovl._getOverlapUVFaces(str(node))
if of != []:
return True
return False
overlapping_faces = []
for i, uv in enumerate(pm.polyUVSet(node, q=1, auv=1)):
pm.polyUVSet(node, cuv=1, uvSet=uv)
overlapping_faces.extend(ovl._getOverlapUVFaces(str(node)))
return overlapping_faces
@classmethod
def get_invalid(cls, instance):
def get_invalid(cls, instance, compute=False):
invalid = []
for node in cmds.ls(instance, type='mesh'):
if cls._has_overlapping_uvs(node):
invalid.append(node)
if compute:
instance.data["overlapping_faces"] = []
for node in pm.ls(instance, type="mesh"):
faces = cls._get_overlapping_uvs(node)
invalid.extend(faces)
# Store values for later.
instance.data["overlapping_faces"].extend(faces)
else:
invalid.extend(instance.data["overlapping_faces"])
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
invalid = self.get_invalid(instance, compute=True)
if invalid:
raise RuntimeError("Meshes found with overlapping "
"UVs: {0}".format(invalid))
pass
raise RuntimeError(
"Meshes found with overlapping UVs: {0}".format(invalid)
)

View file

@ -152,7 +152,8 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
"""
import maya.cmds as mc
attributes = mc.listAttr(control, keyable=True, scalar=True)
# Support controls without any attributes returning None
attributes = mc.listAttr(control, keyable=True, scalar=True) or []
invalid = []
for attr in attributes:
plug = "{}.{}".format(control, attr)

View file

@ -76,7 +76,16 @@ class CollectNukeReads(pyblish.api.ContextPlugin):
self.log.debug("collected_frames: {}".format(label))
instance.data["files"] = [source_files]
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': ext,
'ext': "." + ext,
'files': source_files,
"stagingDir": source_dir,
}
instance.data["representations"].append(representation)
transfer = False
if "publish" in node.knobs():

View file

@ -63,16 +63,28 @@ class CollectNukeWrites(pyblish.api.ContextPlugin):
int(last_frame)
)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = list()
try:
collected_frames = os.listdir(output_dir)
self.log.debug("collected_frames: {}".format(label))
instance.data["files"].append(collected_frames)
representation = {
'name': ext,
'ext': ext,
'files': collected_frames,
"stagingDir": output_dir,
"anatomy_template": "render"
}
instance.data["representations"].append(representation)
except Exception:
self.log.debug("couldn't collect frames: {}".format(label))
# except Exception:
# self.log.debug("couldn't collect frames: {}".format(label))
instance.data.update({
"path": path,
"outputDir": output_dir,

View file

@ -49,18 +49,27 @@ class NukeRenderLocal(pype.api.Extractor):
# swap path back to publish path
path = node['file'].value()
node['file'].setValue(path.replace(temp_dir, output_dir))
ext = node["file_type"].value()
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"] = [os.listdir(temp_dir)]
collected_frames = os.listdir(temp_dir)
repre = {
'name': ext,
'ext': ext,
'files': collected_frames,
"stagingDir": temp_dir,
"anatomy_template": "render"
}
instance.data["representations"].append(repre)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name,
output_dir
temp_dir
))
collections, remainder = clique.assemble(*instance.data['files'])
collections, remainder = clique.assemble(collected_frames)
self.log.info('collections: {}'.format(str(collections)))
if collections:

View file

@ -50,10 +50,10 @@ class ExtractDataForReview(pype.api.Extractor):
def transcode_mov(self, instance):
collection = instance.data["collection"]
staging_dir = instance.data["stagingDir"].replace("\\", "/")
stagingDir = instance.data["stagingDir"].replace("\\", "/")
file_name = collection.format("{head}mov")
review_mov = os.path.join(staging_dir, file_name).replace("\\", "/")
review_mov = os.path.join(stagingDir, file_name).replace("\\", "/")
self.log.info("transcoding review mov: {0}".format(review_mov))
if instance.data.get("baked_colorspace_movie"):
@ -75,18 +75,33 @@ class ExtractDataForReview(pype.api.Extractor):
instance.data["baked_colorspace_movie"]))
os.remove(instance.data["baked_colorspace_movie"])
instance.data["files"].append(file_name)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'files': file_name,
"stagingDir": stagingDir,
"anatomy_template": "render",
"thumbnail": False,
"preview": True,
'startFrameReview': instance.data['startFrame'],
'endFrameReview': instance.data['endFrame'],
'frameRate': instance.context.data["framerate"]
}
instance.data["representations"].append(representation)
def render_review_representation(self,
instance,
representation="mov"):
assert instance.data['files'], "Instance data files should't be empty!"
assert instance.data['representations'][0]['files'], "Instance data files should't be empty!"
import nuke
temporary_nodes = []
staging_dir = instance.data["stagingDir"].replace("\\", "/")
self.log.debug("StagingDir `{0}`...".format(staging_dir))
stagingDir = instance.data["stagingDir"].replace("\\", "/")
self.log.debug("StagingDir `{0}`...".format(stagingDir))
collection = instance.data.get("collection", None)
@ -108,7 +123,7 @@ class ExtractDataForReview(pype.api.Extractor):
node = previous_node = nuke.createNode("Read")
node["file"].setValue(
os.path.join(staging_dir, fname).replace("\\", "/"))
os.path.join(stagingDir, fname).replace("\\", "/"))
node["first"].setValue(first_frame)
node["origfirst"].setValue(first_frame)
@ -147,7 +162,7 @@ class ExtractDataForReview(pype.api.Extractor):
if representation in "mov":
file = fhead + "baked.mov"
path = os.path.join(staging_dir, file).replace("\\", "/")
path = os.path.join(stagingDir, file).replace("\\", "/")
self.log.debug("Path: {}".format(path))
instance.data["baked_colorspace_movie"] = path
write_node["file"].setValue(path)
@ -155,22 +170,39 @@ class ExtractDataForReview(pype.api.Extractor):
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
thumbnail = False
preview = True
elif representation in "jpeg":
file = fhead + "jpeg"
path = os.path.join(staging_dir, file).replace("\\", "/")
path = os.path.join(stagingDir, file).replace("\\", "/")
instance.data["thumbnail"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("jpeg")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
thumbnail = True
preview = False
# retime for
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
# add into files for integration as representation
instance.data["files"].append(file)
if "representations" not in instance.data:
instance.data["representations"] = []
repre = {
'name': representation,
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"anatomy_template": "render",
"thumbnail": thumbnail,
"preview": preview
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))

View file

@ -19,16 +19,22 @@ class ExtractScript(pype.api.Extractor):
current_script = instance.context.data["currentFile"]
# Define extract output file path
dir_path = self.staging_dir(instance)
stagingdir = self.staging_dir(instance)
filename = "{0}".format(instance.data["name"])
path = os.path.join(dir_path, filename)
path = os.path.join(stagingdir, filename)
self.log.info("Performing extraction..")
shutil.copy(current_script, path)
if "files" not in instance.data:
instance.data["files"] = list()
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["files"].append(filename)
representation = {
'name': 'nk',
'ext': '.nk',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -30,33 +30,33 @@ class ValidatePrerenderedFrames(pyblish.api.InstancePlugin):
hosts = ["nuke"]
actions = [RepairCollectionAction]
def process(self, instance):
self.log.debug('instance.data["files"]: {}'.format(instance.data['files']))
assert instance.data.get('files'), "no frames were collected, you need to render them"
for repre in instance.data.get('representations'):
collections, remainder = clique.assemble(*instance.data['files'])
self.log.info('collections: {}'.format(str(collections)))
assert repre.get('files'), "no frames were collected, you need to render them"
collection = collections[0]
collections, remainder = clique.assemble(repre["files"])
self.log.info('collections: {}'.format(str(collections)))
frame_length = instance.data["endFrame"] \
- instance.data["startFrame"] + 1
collection = collections[0]
if frame_length is not 1:
assert len(collections) == 1, "There are multiple collections in the folder"
assert collection.is_contiguous(), "Some frames appear to be missing"
frame_length = instance.data["endFrame"] \
- instance.data["startFrame"] + 1
assert remainder is not None, "There are some extra files in folder"
if frame_length != 1:
assert len(collections) == 1, "There are multiple collections in the folder"
assert collection.is_contiguous(), "Some frames appear to be missing"
self.log.info('frame_length: {}'.format(frame_length))
self.log.info('len(collection.indexes): {}'.format(
len(collection.indexes)))
assert remainder is not None, "There are some extra files in folder"
assert len(
collection.indexes
) is frame_length, "{} missing frames. Use "
"repair to render all frames".format(__name__)
self.log.info('frame_length: {}'.format(frame_length))
self.log.info('len(collection.indexes): {}'.format(
len(collection.indexes)))
instance.data['collection'] = collection
assert len(
collection.indexes
) is frame_length, "{} missing frames. Use "
"repair to render all frames".format(__name__)
instance.data['collection'] = collection

View file

@ -1,40 +0,0 @@
import os
import pyblish.api
try:
import ftrack_api_old as ftrack_api
except Exception:
import ftrack_api
class CollectFtrackApi(pyblish.api.ContextPlugin):
""" Collects an ftrack session and the current task id. """
order = pyblish.api.CollectorOrder
label = "Collect Ftrack Api"
def process(self, context):
# Collect session
session = ftrack_api.Session()
context.data["ftrackSession"] = session
# Collect task
project = os.environ.get('AVALON_PROJECT', '')
asset = os.environ.get('AVALON_ASSET', '')
task = os.environ.get('AVALON_TASK', None)
if task:
result = session.query('Task where\
project.full_name is "{0}" and\
name is "{1}" and\
parent.name is "{2}"'.format(project, task, asset)).one()
context.data["ftrackTask"] = result
else:
result = session.query('TypedContext where\
project.full_name is "{0}" and\
name is "{1}"'.format(project, asset)).one()
context.data["ftrackEntity"] = result
self.log.info(result)

View file

@ -1,17 +0,0 @@
import pype.api as pype
from pypeapp import Anatomy
import pyblish.api
class CollectTemplates(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder
label = "Collect Templates"
def process(self, context):
# pype.load_data_from_templates()
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")

View file

@ -1,12 +0,0 @@
import pyblish.api
from avalon import api
class CollectTime(pyblish.api.ContextPlugin):
"""Store global time at the time of publish"""
label = "Collect Current Time"
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["time"] = api.time()

View file

@ -1,448 +0,0 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
import clique
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = ["animation",
"camera",
"look",
"mayaAscii",
"model",
"pointcache",
"vdbcache",
"setdress",
"assembly",
"layout",
"rig",
"vrayproxy",
"yetiRig",
"yeticache",
"nukescript",
# "review",
"workfile",
"scene",
"ass"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
# stagingdir = instance.data.get("stagingDir")
# assert stagingdir, ("Incomplete instance \"%s\": "
# "Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
# self.log.debug("Establishing staging directory @ %s" % stagingdir)
# Ensure at least one file is set up for transfer in staging dir.
files = instance.data.get("files", [])
assert files, "Instance has no files to transfer"
assert isinstance(files, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(files)
)
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# if assumed_version != next_version:
# raise AttributeError("Assumed version 'v{0:03d}' does not match"
# "next version in database "
# "('v{1:03d}')".format(assumed_version,
# next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
instance.data['version'] = version['name']
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
files = repre['files']
if len(files) > 1:
src_collections, remainder = clique.assemble(files)
self.log.debug("dst_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = src_tail[1:]
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["publish"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
instance.data["representations"][idx]['published_path'] = dst_collection.format()
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
# src = os.path.join(stagingdir, src_file_name)
src = src_file_name
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files[0]
# assert not os.path.isabs(fname), (
# "Given file name is a full path"
# )
# _, ext = os.path.splitext(fname)
template_data["representation"] = repre['representation']
# src = os.path.join(stagingdir, fname)
src = fname
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
instance.data["representations"][idx]['published_path'] = dst
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": repre['representation'],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
# 'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
# "representation": repre['representation']
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
# As such it is recommended to only make hardlinks between static files
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# # create relative source path for DB
# try:
# source = instance.data['source']
# except KeyError:
# source = context.data["currentFile"]
#
# relative_path = os.path.relpath(source, api.registered_root())
# source = os.path.join("{root}", relative_path).replace("\\", "/")
source = "standalone"
# self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = [
"startFrame", "endFrame", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -1,315 +0,0 @@
import os
import sys
import pyblish.api
import clique
class IntegrateFtrackApi(pyblish.api.InstancePlugin):
""" Commit components to server. """
order = pyblish.api.IntegratorOrder+0.499
label = "Integrate Ftrack Api"
families = ["ftrack"]
def query(self, entitytype, data):
""" Generate a query expression from data supplied.
If a value is not a string, we'll add the id of the entity to the
query.
Args:
entitytype (str): The type of entity to query.
data (dict): The data to identify the entity.
exclusions (list): All keys to exclude from the query.
Returns:
str: String query to use with "session.query"
"""
queries = []
if sys.version_info[0] < 3:
for key, value in data.iteritems():
if not isinstance(value, (basestring, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
else:
for key, value in data.items():
if not isinstance(value, (str, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
query = (
"select id from " + entitytype + " where " + " and ".join(queries)
)
self.log.debug(query)
return query
def process(self, instance):
session = instance.context.data["ftrackSession"]
if instance.context.data.get("ftrackTask"):
task = instance.context.data["ftrackTask"]
name = task['full_name']
parent = task["parent"]
elif instance.context.data.get("ftrackEntity"):
task = None
name = instance.context.data.get("ftrackEntity")['name']
parent = instance.context.data.get("ftrackEntity")
info_msg = "Created new {entity_type} with data: {data}"
info_msg += ", metadata: {metadata}."
# Iterate over components and publish
for data in instance.data.get("ftrackComponentsList", []):
# AssetType
# Get existing entity.
assettype_data = {"short": "upload"}
assettype_data.update(data.get("assettype_data", {}))
self.log.debug("data: {}".format(data))
assettype_entity = session.query(
self.query("AssetType", assettype_data)
).first()
# Create a new entity if none exits.
if not assettype_entity:
assettype_entity = session.create("AssetType", assettype_data)
self.log.debug(
"Created new AssetType with data: ".format(assettype_data)
)
# Asset
# Get existing entity.
asset_data = {
"name": name,
"type": assettype_entity,
"parent": parent,
}
asset_data.update(data.get("asset_data", {}))
asset_entity = session.query(
self.query("Asset", asset_data)
).first()
self.log.info("asset entity: {}".format(asset_entity))
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
asset_metadata = asset_data.pop("metadata", {})
# Create a new entity if none exits.
if not asset_entity:
asset_entity = session.create("Asset", asset_data)
self.log.debug(
info_msg.format(
entity_type="Asset",
data=asset_data,
metadata=asset_metadata
)
)
# Adding metadata
existing_asset_metadata = asset_entity["metadata"]
existing_asset_metadata.update(asset_metadata)
asset_entity["metadata"] = existing_asset_metadata
# AssetVersion
# Get existing entity.
assetversion_data = {
"version": 0,
"asset": asset_entity,
}
if task:
assetversion_data['task'] = task
assetversion_data.update(data.get("assetversion_data", {}))
assetversion_entity = session.query(
self.query("AssetVersion", assetversion_data)
).first()
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
assetversion_metadata = assetversion_data.pop("metadata", {})
# Create a new entity if none exits.
if not assetversion_entity:
assetversion_entity = session.create(
"AssetVersion", assetversion_data
)
self.log.debug(
info_msg.format(
entity_type="AssetVersion",
data=assetversion_data,
metadata=assetversion_metadata
)
)
# Adding metadata
existing_assetversion_metadata = assetversion_entity["metadata"]
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
# Have to commit the version and asset, because location can't
# determine the final location without.
session.commit()
# Component
# Get existing entity.
component_data = {
"name": "main",
"version": assetversion_entity
}
component_data.update(data.get("component_data", {}))
component_entity = session.query(
self.query("Component", component_data)
).first()
component_overwrite = data.get("component_overwrite", False)
location = data.get("component_location", session.pick_location())
# Overwrite existing component data if requested.
if component_entity and component_overwrite:
origin_location = session.query(
"Location where name is \"ftrack.origin\""
).one()
# Removing existing members from location
components = list(component_entity.get("members", []))
components += [component_entity]
for component in components:
for loc in component["component_locations"]:
if location["id"] == loc["location_id"]:
location.remove_component(
component, recursive=False
)
# Deleting existing members on component entity
for member in component_entity.get("members", []):
session.delete(member)
del(member)
session.commit()
# Reset members in memory
if "members" in component_entity.keys():
component_entity["members"] = []
# Add components to origin location
try:
collection = clique.parse(data["component_path"])
except ValueError:
# Assume its a single file
# Changing file type
name, ext = os.path.splitext(data["component_path"])
component_entity["file_type"] = ext
origin_location.add_component(
component_entity, data["component_path"]
)
else:
# Changing file type
component_entity["file_type"] = collection.format("{tail}")
# Create member components for sequence.
for member_path in collection:
size = 0
try:
size = os.path.getsize(member_path)
except OSError:
pass
name = collection.match(member_path).group("index")
member_data = {
"name": name,
"container": component_entity,
"size": size,
"file_type": os.path.splitext(member_path)[-1]
}
component = session.create(
"FileComponent", member_data
)
origin_location.add_component(
component, member_path, recursive=False
)
component_entity["members"].append(component)
# Add components to location.
location.add_component(
component_entity, origin_location, recursive=True
)
data["component"] = component_entity
msg = "Overwriting Component with path: {0}, data: {1}, "
msg += "location: {2}"
self.log.info(
msg.format(
data["component_path"],
component_data,
location
)
)
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
component_metadata = component_data.pop("metadata", {})
# Create new component if none exists.
new_component = False
if not component_entity:
component_entity = assetversion_entity.create_component(
data["component_path"],
data=component_data,
location=location
)
data["component"] = component_entity
msg = "Created new Component with path: {0}, data: {1}"
msg += ", metadata: {2}, location: {3}"
self.log.info(
msg.format(
data["component_path"],
component_data,
component_metadata,
location
)
)
new_component = True
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
# if component_data['name'] = 'ftrackreview-mp4-mp4':
# assetversion_entity["thumbnail_id"]
# Setting assetversion thumbnail
if data.get("thumbnail", False):
assetversion_entity["thumbnail_id"] = component_entity["id"]
# Inform user about no changes to the database.
if (component_entity and not component_overwrite and
not new_component):
data["component"] = component_entity
self.log.info(
"Found existing component, and no request to overwrite. "
"Nothing has been changed."
)
else:
# Commit changes.
session.commit()

View file

@ -1,101 +0,0 @@
import pyblish.api
import os
import json
class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"""Collect ftrack component data
Add ftrack component list to instance.
"""
order = pyblish.api.IntegratorOrder + 0.48
label = 'Integrate Ftrack Component'
families = ["ftrack"]
family_mapping = {'camera': 'cam',
'look': 'look',
'mayaAscii': 'scene',
'model': 'geo',
'rig': 'rig',
'setdress': 'setdress',
'pointcache': 'cache',
'write': 'img',
'render': 'render',
'nukescript': 'comp',
'review': 'mov'}
def process(self, instance):
self.log.debug('instance {}'.format(instance))
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
family = instance.data['family'].lower()
asset_type = ''
asset_type = self.family_mapping[family]
componentList = []
ft_session = instance.context.data["ftrackSession"]
components = instance.data['representations']
for comp in components:
self.log.debug('component {}'.format(comp))
# filename, ext = os.path.splitext(file)
# self.log.debug('dest ext: ' + ext)
# ext = comp['Context']
if comp['thumbnail']:
location = ft_session.query(
'Location where name is "ftrack.server"').one()
component_data = {
"name": "thumbnail" # Default component name is "main".
}
elif comp['preview']:
if not comp.get('startFrameReview'):
comp['startFrameReview'] = comp['startFrame']
if not comp.get('endFrameReview'):
comp['endFrameReview'] = instance.data['endFrame']
location = ft_session.query(
'Location where name is "ftrack.server"').one()
component_data = {
# Default component name is "main".
"name": "ftrackreview-mp4",
"metadata": {'ftr_meta': json.dumps({
'frameIn': int(comp['startFrameReview']),
'frameOut': int(comp['endFrameReview']),
'frameRate': float(comp['frameRate')]})}
}
else:
component_data = {
"name": comp['representation'] # Default component name is "main".
}
location = ft_session.query(
'Location where name is "ftrack.unmanaged"').one()
self.log.debug('location {}'.format(location))
componentList.append({"assettype_data": {
"short": asset_type,
},
"asset_data": {
"name": instance.data["subset"],
},
"assetversion_data": {
"version": version_number,
},
"component_data": component_data,
"component_path": comp['published_path'],
'component_location': location,
"component_overwrite": False,
"thumbnail": comp['thumbnail']
}
)
self.log.debug('componentsList: {}'.format(str(componentList)))
instance.data["ftrackComponentsList"] = componentList

View file

@ -1,436 +0,0 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = [
"imagesequence",
"render",
"write",
"source",
'review']
family_targets = [".frames", ".local", ".review", "review", "imagesequence", "render", "source"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
families = [f for f in instance.data["families"]
for search in self.family_targets
if search in f]
if not families:
return
self.register(instance)
# self.log.info("Integrating Asset in to the database ...")
# self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
# stagingdir = instance.data.get("stagingDir")
# assert stagingdir, ("Incomplete instance \"%s\": "
# "Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
# self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset",
"name": ASSET,
"parent": project["_id"]})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one({"type": "version",
"parent": subset["_id"]},
{"name": True},
sort=[("name", -1)])
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
# assumed_data = instance.data["assumedTemplateData"]
# assumed_version = assumed_data["version"]
# if assumed_version != next_version:
# raise AttributeError("Assumed version 'v{0:03d}' does not match"
# "next version in database "
# "('v{1:03d}')".format(assumed_version,
# next_version))
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
instance.data['version'] = next_version
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({"type": 'asset', "name": ASSET})[
'data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"task": api.Session["AVALON_TASK"],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
# for repre in instance.data["representations"]:
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
files = repre['files']
if len(files) > 1:
src_collections, remainder = clique.assemble(files)
self.log.debug("dst_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['representation']
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["render"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
instance.data["representations"][idx]['published_path'] = dst_collection.format()
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
# src = os.path.join(stagingdir, src_file_name)
src = src_file_name
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files[0]
self.log.info("fname: {}".format(fname))
# assert not os.path.isabs(fname), (
# "Given file name is a full path"
# )
# _, ext = os.path.splitext(fname)
template_data["representation"] = repre['representation']
# src = os.path.join(stagingdir, fname)
src = src_file_name
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["render"]["path"]
instance.data["transfers"].append([src, dst])
instance.data["representations"][idx]['published_path'] = dst
if repre['ext'] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
anatomy_filled = anatomy.format(template_data)
path_to_save = anatomy_filled["render"]["path"]
template = anatomy.templates["render"]["path"]
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": repre['representation'],
"data": {'path': path_to_save, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {
"name": PROJECT,
"code": project['data']['code']
},
"task": api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy,
"representation": repre['representation']
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if src in dest:
continue
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
# try:
# source = instance.data['source']
# except KeyError:
# source = context.data["currentFile"]
#
# relative_path = os.path.relpath(source, api.registered_root())
# source = os.path.join("{root}", relative_path).replace("\\", "/")
source = "standalone"
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data.get(key, None)
return version_data

238
pype/scripts/otio_burnin.py Normal file
View file

@ -0,0 +1,238 @@
import os
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
from pypeapp.lib import config
from pype import api as pype
# FFmpeg in PATH is required
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
class ModifiedBurnins(ffmpeg_burnins.Burnins):
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
BOTTOM_CENTERED = ffmpeg_burnins.BOTTOM_CENTERED
TOP_LEFT = ffmpeg_burnins.TOP_LEFT
BOTTOM_LEFT = ffmpeg_burnins.BOTTOM_LEFT
TOP_RIGHT = ffmpeg_burnins.TOP_RIGHT
BOTTOM_RIGHT = ffmpeg_burnins.BOTTOM_RIGHT
options_init = {
'opacity': 1,
'x_offset': 5,
'y_offset': 5,
'bg_padding': 5,
'bg_opacity': 0.5,
'font_size': 42
}
def __init__(self, source, streams=None, options_init=None):
super().__init__(source, streams)
if options_init:
self.options_init.update(options_init)
def add_text(self, text, align, options=None):
"""
Adding static text to a filter.
:param str text: text to apply to the drawtext
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_frame_numbers(self, align, options=None, start_frame=None):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use FrameNumberOptions
"""
if not options:
options = ffmpeg_burnins.FrameNumberOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset']
text = str(int(self.end_frame + options['frame_offset']))
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use TimeCodeOptions
"""
if not options:
options = ffmpeg_burnins.TimeCodeOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
timecode = ffmpeg_burnins._frames_to_timecode(
options['frame_offset'],
self.frame_rate
)
options = options.copy()
if not options.get('fps'):
options['fps'] = self.frame_rate
self._add_burnin(
timecode.replace(':', r'\:'),
align,
options,
ffmpeg_burnins.TIMECODE
)
def _add_burnin(self, text, align, options, draw):
"""
Generic method for building the filter flags.
:param str text: text to apply to the drawtext
:param enum align: alignment, must use provided enum flags
:param dict options:
"""
resolution = self.resolution
data = {
'text': options.get('expression') or text,
'color': options['font_color'],
'size': options['font_size']
}
data.update(options)
data.update(ffmpeg_burnins._drawtext(align, resolution, text, options))
if 'font' in data and ffmpeg_burnins._is_windows():
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
data['font'] = data['font'].replace(':', r'\:')
self.filters['drawtext'].append(draw % data)
if options.get('bg_color') is not None:
box = ffmpeg_burnins.BOX % {
'border': options['bg_padding'],
'color': options['bg_color'],
'opacity': options['bg_opacity']
}
self.filters['drawtext'][-1] += ':%s' % box
def command(self, output=None, args=None, overwrite=False):
"""
Generate the entire FFMPEG command.
:param str output: output file
:param str args: additional FFMPEG arguments
:param bool overwrite: overwrite the output if it exists
:returns: completed command
:rtype: str
"""
output = output or ''
if overwrite:
output = '-y {}'.format(output)
filters = ''
if self.filter_string:
filters = '-vf "{}"'.format(self.filter_string)
return (ffmpeg_burnins.FFMPEG % {
'input': self.source,
'output': output,
'args': '%s ' % args if args else '',
'filters': filters
}).strip()
def example(input_path, output_path):
options_init = {
'opacity': 1,
'x_offset': 10,
'y_offset': 10,
'bg_padding': 10,
'bg_opacity': 0.5,
'font_size': 52
}
# First frame in burnin
start_frame = 2000
# Options init sets burnin look
burnin = ModifiedBurnins(input_path, options_init=options_init)
# Static text
burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED)
# Frame number
burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame)
# Timecode
burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame)
# Start render (overwrite output file if exist)
burnin.render(output_path, overwrite=True)
def example_with_presets(input_path, output_path, data):
presets = config.get_presets().get('tools', {}).get('burnins', {})
options_init = presets.get('options')
burnin = ModifiedBurnins(input_path, options_init=options_init)
start_frame = data.get("start_frame")
for align_text, preset in presets.get('burnins', {}).items():
align = None
if align_text == 'TOP_LEFT':
align = ModifiedBurnins.TOP_LEFT
elif align_text == 'TOP_CENTERED':
align = ModifiedBurnins.TOP_CENTERED
elif align_text == 'TOP_RIGHT':
align = ModifiedBurnins.TOP_RIGHT
elif align_text == 'BOTTOM_LEFT':
align = ModifiedBurnins.BOTTOM_LEFT
elif align_text == 'BOTTOM_CENTERED':
align = ModifiedBurnins.BOTTOM_CENTERED
elif align_text == 'BOTTOM_RIGHT':
align = ModifiedBurnins.BOTTOM_RIGHT
bi_func = preset.get('function')
if not bi_func:
log.error(
'Missing function for burnin!'
'Burnins are not created!'
)
return
if (
bi_func in ['frame_numbers', 'timecode'] and
not start_frame
):
log.error(
'start_frame is not set in entered data!'
'Burnins are not created!'
)
return
if bi_func == 'frame_numbers':
burnin.add_frame_numbers(align, start_frame=start_frame)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=start_frame)
elif: bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
else:
log.error(
'Unknown function for burnins {}'.format(bi_func)
)
return
burnin.render(output_path, overwrite=True)
'''
# TODO: implement image sequence
# Changes so OpenTimelineIo burnins is possible to render from image sequence.
#
# before input:
# # -start_number is number of first frame / -r is fps
# -start_number 375 -r 25
# before output:
# # -c: set output codec (h264, ...)
# -c:v libx264
#
#
# ffmpeg -loglevel panic -i image_sequence -vf "drawtext=text='Test':x=w/2-tw/2:y=0:fontcolor=white@1.0:fontsize=42:fontfile='C\:\\\WINDOWS\\\Fonts\\\arial.ttf':box=1:boxborderw=5:boxcolor=black@0.5,drawtext=text='%{eif\:n+1001\:d}':x=0:y=0:fontcolor=white@1.0:fontsize=42:fontfile='C\:\\\WINDOWS\\\Fonts\\\arial.ttf':box=1:boxborderw=5:boxcolor=black@0.5" C:\Users\jakub.trllo\Desktop\Tests\files\mov\render\test_output.mov'
# ffmpeg -loglevel panic -start_number 375 -r 25 -i "C:\Users\jakub.trllo\Desktop\Tests\files\exr\int_c022_lighting_v001_main_AO.%04d.exr" -vf "drawtext=text='Test':x=w/2-tw/2:y=0:fontcolor=white@1.0:fontsize=42:fontfile='C\:\\\WINDOWS\\\Fonts\\\arial.ttf':box=1:boxborderw=5:boxcolor=black@0.5,drawtext=text='%{eif\:n+1001\:d}':x=0:y=0:fontcolor=white@1.0:fontsize=42:fontfile='C\:\\\WINDOWS\\\Fonts\\\arial.ttf':box=1:boxborderw=5:boxcolor=black@0.5,colormatrix=bt601:bt709" -c:v libx264 "output_path.mov"
'''

View file

@ -16,21 +16,21 @@ import pyblish.api
# Registers Global pyblish plugins
# pype.install()
pype.install()
# Registers Standalone pyblish plugins
PUBLISH_PATH = os.path.sep.join(
[pype.PLUGINS_DIR, 'standalonepublish', 'publish']
)
pyblish.api.register_plugin_path(PUBLISH_PATH)
# # Registers Standalone pyblish plugins
# PUBLISH_PATH = os.path.sep.join(
# [pype.PLUGINS_DIR, 'ftrack', 'publish']
# [pype.PLUGINS_DIR, 'standalonepublish', 'publish']
# )
# pyblish.api.register_plugin_path(PUBLISH_PATH)
# Registers Standalone pyblish plugins
PUBLISH_PATH = os.path.sep.join(
[pype.PLUGINS_DIR, 'ftrack', 'publish']
)
pyblish.api.register_plugin_path(PUBLISH_PATH)
def set_context(project, asset, app):
def set_context(project, asset, task, app):
''' Sets context for pyblish (must be done before pyblish is launched)
:param project: Name of `Project` where instance should be published
:type project: str
@ -41,6 +41,11 @@ def set_context(project, asset, app):
io.Session["AVALON_PROJECT"] = project
os.environ["AVALON_ASSET"] = asset
io.Session["AVALON_ASSET"] = asset
if not task:
task = ''
os.environ["AVALON_TASK"] = task
io.Session["AVALON_TASK"] = task
io.install()

View file

@ -117,7 +117,7 @@ class ComponentsWidget(QtWidgets.QWidget):
try:
data = self.parent_widget.collect_data()
publish.set_context(
data['project'], data['asset'], 'standalonepublish'
data['project'], data['asset'], data['task'], 'standalonepublish'
)
result = publish.publish(data)
# Clear widgets from components list if publishing was successful

View file

@ -241,14 +241,18 @@ class DropDataFrame(QtWidgets.QFrame):
def get_file_data(self, data):
filepath = data['files'][0]
ext = data['ext']
ext = data['ext'].lower()
output = {}
probe_data = self.load_data_with_probe(filepath)
file_info = None
if 'file_info' in data:
file_info = data['file_info']
if (
ext in self.presets['extensions']['image_file'] or
ext in self.presets['extensions']['video_file']
):
probe_data = self.load_data_with_probe(filepath)
if 'frameRate' not in data:
# default value
frameRate = 25
@ -268,11 +272,8 @@ class DropDataFrame(QtWidgets.QFrame):
output['startFrame'] = startFrame
output['endFrame'] = endFrame
file_info = None
if 'file_info' in data:
file_info = data['file_info']
elif ext in ['.mov']:
file_info = probe_data.get('codec_name')
if (ext == '.mov') and (not file_info):
file_info = probe_data.get('codec_name')
output['file_info'] = file_info