Merge remote-tracking branch 'origin/develop' into feature/PYPE-316-muster-render-support
|
|
@ -39,6 +39,7 @@ from .templates import (
|
|||
)
|
||||
|
||||
from .lib import (
|
||||
get_handle_irregular,
|
||||
get_project_data,
|
||||
get_asset_data,
|
||||
modified_environ,
|
||||
|
|
@ -67,6 +68,7 @@ __all__ = [
|
|||
"reset_data_from_templates",
|
||||
|
||||
# get contextual data
|
||||
"get_handle_irregular",
|
||||
"get_project_data",
|
||||
"get_asset_data",
|
||||
"get_project_name",
|
||||
|
|
|
|||
|
|
@ -1,7 +1,2 @@
|
|||
from .lib import *
|
||||
from .ftrack_server import *
|
||||
from .ftrack_module import FtrackModule
|
||||
|
||||
|
||||
def tray_init(tray_widget, main_widget):
|
||||
return FtrackModule(main_widget, tray_widget)
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ class SyncToAvalon(BaseAction):
|
|||
for entity in self.importable:
|
||||
ftracklib.avalon_check_name(entity)
|
||||
if entity['name'] in all_names:
|
||||
duplicates.append("'{}'".format(e['name']))
|
||||
duplicates.append("'{}'".format(entity['name']))
|
||||
else:
|
||||
all_names.append(entity['name'])
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
from .ftrack_server import FtrackServer
|
||||
from . import event_server, event_server_cli
|
||||
from . import event_server_cli
|
||||
|
||||
__all__ = [
|
||||
'event_server',
|
||||
'event_server_cli',
|
||||
'FtrackServer'
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,41 +0,0 @@
|
|||
import sys
|
||||
from pype.ftrack import credentials, login_dialog as login_dialog
|
||||
from pype.ftrack.ftrack_server import FtrackServer
|
||||
from Qt import QtWidgets
|
||||
from pype import api
|
||||
|
||||
log = api.Logger().get_logger(__name__, "ftrack-event-server")
|
||||
|
||||
|
||||
class EventServer:
|
||||
def __init__(self):
|
||||
self.login_widget = login_dialog.Login_Dialog_ui(
|
||||
parent=self, is_event=True
|
||||
)
|
||||
self.event_server = FtrackServer('event')
|
||||
|
||||
cred = credentials._get_credentials(True)
|
||||
|
||||
if 'username' in cred and 'apiKey' in cred:
|
||||
self.login_widget.user_input.setText(cred['username'])
|
||||
self.login_widget.api_input.setText(cred['apiKey'])
|
||||
|
||||
self.login_widget.setError("Credentials should be for API User")
|
||||
|
||||
self.login_widget.show()
|
||||
|
||||
def loginChange(self):
|
||||
log.info("Logged successfully")
|
||||
|
||||
self.login_widget.close()
|
||||
self.event_server.run_server()
|
||||
|
||||
|
||||
def main():
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
event = EventServer()
|
||||
sys.exit(app.exec_())
|
||||
|
||||
|
||||
if (__name__ == ('__main__')):
|
||||
main()
|
||||
|
|
@ -1,114 +1,241 @@
|
|||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import requests
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import credentials
|
||||
from pype.ftrack.ftrack_server import FtrackServer
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "ftrack-event-server-cli")
|
||||
|
||||
possible_yes = ['y', 'yes']
|
||||
possible_no = ['n', 'no']
|
||||
possible_third = ['a', 'auto']
|
||||
possible_exit = ['exit']
|
||||
log = Logger().get_logger('Ftrack event server', "ftrack-event-server-cli")
|
||||
|
||||
|
||||
def ask_yes_no(third=False):
|
||||
msg = "Y/N:"
|
||||
if third:
|
||||
msg = "Y/N/AUTO:"
|
||||
log.info(msg)
|
||||
response = input().lower()
|
||||
if response in possible_exit:
|
||||
sys.exit()
|
||||
elif response in possible_yes:
|
||||
return True
|
||||
elif response in possible_no:
|
||||
def check_url(url):
|
||||
if not url:
|
||||
log.error('Ftrack URL is not set!')
|
||||
return None
|
||||
|
||||
url = url.strip('/ ')
|
||||
|
||||
if 'http' not in url:
|
||||
if url.endswith('ftrackapp.com'):
|
||||
url = 'https://' + url
|
||||
else:
|
||||
url = 'https://{0}.ftrackapp.com'.format(url)
|
||||
try:
|
||||
result = requests.get(url, allow_redirects=False)
|
||||
except requests.exceptions.RequestException:
|
||||
log.error('Entered Ftrack URL is not accesible!')
|
||||
return None
|
||||
|
||||
if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers):
|
||||
log.error('Entered Ftrack URL is not accesible!')
|
||||
return None
|
||||
|
||||
log.debug('Ftrack server {} is accessible.'.format(url))
|
||||
|
||||
return url
|
||||
|
||||
def validate_credentials(url, user, api):
|
||||
first_validation = True
|
||||
if not user:
|
||||
log.error('Ftrack Username is not set! Exiting.')
|
||||
first_validation = False
|
||||
if not api:
|
||||
log.error('Ftrack API key is not set! Exiting.')
|
||||
first_validation = False
|
||||
if not first_validation:
|
||||
return False
|
||||
else:
|
||||
all_entries = possible_no
|
||||
all_entries.extend(possible_yes)
|
||||
if third is True:
|
||||
if response in possible_third:
|
||||
return 'auto'
|
||||
else:
|
||||
all_entries.extend(possible_third)
|
||||
all_entries.extend(possible_exit)
|
||||
all_entries = ', '.join(all_entries)
|
||||
log.info(
|
||||
'Invalid input. Possible entries: [{}]. Try it again:'.foramt(
|
||||
all_entries
|
||||
)
|
||||
|
||||
try:
|
||||
session = ftrack_api.Session(
|
||||
server_url=url,
|
||||
api_user=user,
|
||||
api_key=api
|
||||
)
|
||||
return ask_yes_no()
|
||||
session.close()
|
||||
except Exception as e:
|
||||
log.error(
|
||||
'Can\'t log into Ftrack with used credentials:'
|
||||
' Ftrack server: "{}" // Username: {} // API key: {}'.format(
|
||||
url, user, api
|
||||
))
|
||||
return False
|
||||
|
||||
log.debug('Credentials Username: "{}", API key: "{}" are valid.'.format(
|
||||
user, api
|
||||
))
|
||||
return True
|
||||
|
||||
|
||||
def cli_login():
|
||||
enter_cred = True
|
||||
cred_data = credentials._get_credentials(True)
|
||||
def process_event_paths(event_paths):
|
||||
log.debug('Processing event paths: {}.'.format(str(event_paths)))
|
||||
return_paths = []
|
||||
not_found = []
|
||||
if not event_paths:
|
||||
return return_paths, not_found
|
||||
|
||||
user = cred_data.get('username', None)
|
||||
key = cred_data.get('apiKey', None)
|
||||
auto = cred_data.get('auto_connect', False)
|
||||
if user is None or key is None:
|
||||
log.info(
|
||||
'Credentials are not set. Do you want to enter them now? (Y/N)'
|
||||
)
|
||||
if ask_yes_no() is False:
|
||||
log.info("Exiting...")
|
||||
return
|
||||
elif credentials._check_credentials(user, key):
|
||||
if auto is False:
|
||||
log.info((
|
||||
'Do you want to log with username {}'
|
||||
' enter "auto" if want to autoconnect next time (Y/N/AUTO)'
|
||||
).format(
|
||||
user
|
||||
))
|
||||
result = ask_yes_no(True)
|
||||
if result is True:
|
||||
enter_cred = False
|
||||
elif result == 'auto':
|
||||
credentials._save_credentials(user, key, True, True)
|
||||
enter_cred = False
|
||||
if isinstance(event_paths, str):
|
||||
event_paths = event_paths.split(os.pathsep)
|
||||
|
||||
for path in event_paths:
|
||||
if os.path.exists(path):
|
||||
return_paths.append(path)
|
||||
else:
|
||||
enter_cred = False
|
||||
else:
|
||||
log.info(
|
||||
'Stored credentials are not valid.'
|
||||
' Do you want enter them now?(Y/N)'
|
||||
)
|
||||
if ask_yes_no() is False:
|
||||
log.info("Exiting...")
|
||||
return
|
||||
not_found.append(path)
|
||||
|
||||
while enter_cred:
|
||||
log.info('Please enter Ftrack API User:')
|
||||
user = input()
|
||||
log.info('And now enter Ftrack API Key:')
|
||||
key = input()
|
||||
if credentials._check_credentials(user, key):
|
||||
log.info(
|
||||
'Credentials are valid.'
|
||||
' Do you want to auto-connect next time?(Y/N)'
|
||||
)
|
||||
credentials._save_credentials(user, key, True, ask_yes_no())
|
||||
enter_cred = False
|
||||
break
|
||||
else:
|
||||
log.info(
|
||||
'Entered credentials are not valid.'
|
||||
' Do you want to try it again?(Y/N)'
|
||||
)
|
||||
if ask_yes_no() is False:
|
||||
log.info('Exiting...')
|
||||
return
|
||||
return os.pathsep.join(return_paths), not_found
|
||||
|
||||
|
||||
def run_event_server(ftrack_url, username, api_key, event_paths):
|
||||
os.environ['FTRACK_SERVER'] = ftrack_url
|
||||
os.environ['FTRACK_API_USER'] = username
|
||||
os.environ['FTRACK_API_KEY'] = api_key
|
||||
os.environ['FTRACK_EVENTS_PATH'] = event_paths
|
||||
|
||||
server = FtrackServer('event')
|
||||
server.run_server()
|
||||
|
||||
def main(argv):
|
||||
'''
|
||||
There are 4 values neccessary for event server:
|
||||
1.) Ftrack url - "studio.ftrackapp.com"
|
||||
2.) Username - "my.username"
|
||||
3.) API key - "apikey-long11223344-6665588-5565"
|
||||
4.) Path/s to events - "X:/path/to/folder/with/events"
|
||||
|
||||
def main():
|
||||
cli_login()
|
||||
All these values can be entered with arguments or environment variables.
|
||||
- arguments:
|
||||
"-ftrackurl {url}"
|
||||
"-ftrackuser {username}"
|
||||
"-ftrackapikey {api key}"
|
||||
"-ftrackeventpaths {path to events}"
|
||||
- environment variables:
|
||||
FTRACK_SERVER
|
||||
FTRACK_API_USER
|
||||
FTRACK_API_KEY
|
||||
FTRACK_EVENTS_PATH
|
||||
|
||||
Credentials (Username & API key):
|
||||
- Credentials can be stored for auto load on next start
|
||||
- To *Store/Update* these values add argument "-storecred"
|
||||
- They will be stored to appsdir file when login is successful
|
||||
- To *Update/Override* values with enviromnet variables is also needed to:
|
||||
- *don't enter argument for that value*
|
||||
- add argument "-noloadcred" (currently stored credentials won't be loaded)
|
||||
|
||||
Order of getting values:
|
||||
1.) Arguments are always used when entered.
|
||||
- entered values through args have most priority! (in each case)
|
||||
2.) Credentials are tried to load from appsdir file.
|
||||
- skipped when credentials were entered through args or credentials
|
||||
are not stored yet
|
||||
- can be skipped with "-noloadcred" argument
|
||||
3.) Environment variables are last source of values.
|
||||
- will try to get not yet set values from environments
|
||||
|
||||
Best practice:
|
||||
- set environment variables FTRACK_SERVER & FTRACK_EVENTS_PATH
|
||||
- launch event_server_cli with args:
|
||||
~/event_server_cli.py -ftrackuser "{username}" -ftrackapikey "{API key}" -storecred
|
||||
- next time launch event_server_cli.py only with set environment variables
|
||||
FTRACK_SERVER & FTRACK_EVENTS_PATH
|
||||
'''
|
||||
parser = argparse.ArgumentParser(description='Ftrack event server')
|
||||
parser.add_argument(
|
||||
"-ftrackurl", type=str, metavar='FTRACKURL',
|
||||
help=(
|
||||
"URL to ftrack server where events should handle"
|
||||
" (default from environment: $FTRACK_SERVER)"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-ftrackuser", type=str,
|
||||
help=(
|
||||
"Username should be the username of the user in ftrack"
|
||||
" to record operations against."
|
||||
" (default from environment: $FTRACK_API_USER)"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-ftrackapikey", type=str,
|
||||
help=(
|
||||
"Should be the API key to use for authentication"
|
||||
" (default from environment: $FTRACK_API_KEY)"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-ftrackeventpaths", nargs='+',
|
||||
help=(
|
||||
"List of paths where events are stored."
|
||||
" (default from environment: $FTRACK_EVENTS_PATH)"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
'-storecred',
|
||||
help=(
|
||||
"Entered credentials will be also stored"
|
||||
" to apps dir for future usage"
|
||||
),
|
||||
action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
'-noloadcred',
|
||||
help="Load creadentials from apps dir",
|
||||
action="store_true"
|
||||
)
|
||||
|
||||
ftrack_url = os.environ.get('FTRACK_SERVER')
|
||||
username = os.environ.get('FTRACK_API_USER')
|
||||
api_key = os.environ.get('FTRACK_API_KEY')
|
||||
event_paths = os.environ.get('FTRACK_EVENTS_PATH')
|
||||
|
||||
kwargs, args = parser.parse_known_args(argv)
|
||||
|
||||
if kwargs.ftrackurl:
|
||||
ftrack_url = kwargs.ftrackurl
|
||||
|
||||
if kwargs.ftrackeventpaths:
|
||||
event_paths = kwargs.ftrackeventpaths
|
||||
|
||||
if not kwargs.noloadcred:
|
||||
cred = credentials._get_credentials(True)
|
||||
username = cred.get('username')
|
||||
api_key = cred.get('apiKey')
|
||||
|
||||
if kwargs.ftrackuser:
|
||||
username = kwargs.ftrackuser
|
||||
|
||||
if kwargs.ftrackapikey:
|
||||
api_key = kwargs.ftrackapikey
|
||||
|
||||
# Check url regex and accessibility
|
||||
ftrack_url = check_url(ftrack_url)
|
||||
if not ftrack_url:
|
||||
return 1
|
||||
|
||||
# Validate entered credentials
|
||||
if not validate_credentials(ftrack_url, username, api_key):
|
||||
return 1
|
||||
|
||||
# Process events path
|
||||
event_paths, not_found = process_event_paths(event_paths)
|
||||
if not_found:
|
||||
log.warning(
|
||||
'These paths were not found: {}'.format(str(not_found))
|
||||
)
|
||||
if not event_paths:
|
||||
if not_found:
|
||||
log.error('Any of entered paths is valid or can be accesible.')
|
||||
else:
|
||||
log.error('Paths to events are not set. Exiting.')
|
||||
return 1
|
||||
|
||||
if kwargs.storecred:
|
||||
credentials._save_credentials(username, api_key, True)
|
||||
|
||||
run_event_server(ftrack_url, username, api_key, event_paths)
|
||||
|
||||
|
||||
if (__name__ == ('__main__')):
|
||||
main()
|
||||
sys.exit(main(sys.argv))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from .avalon_sync import *
|
||||
from .credentials import *
|
||||
from .ftrack_app_handler import *
|
||||
from .ftrack_event_handler import *
|
||||
from .ftrack_action_handler import *
|
||||
|
|
|
|||
|
|
@ -64,6 +64,10 @@ def _clear_credentials(event=False):
|
|||
|
||||
|
||||
def _set_env(username, apiKey):
|
||||
if not username:
|
||||
username = ''
|
||||
if not apiKey:
|
||||
apiKey = ''
|
||||
os.environ['FTRACK_API_USER'] = username
|
||||
os.environ['FTRACK_API_KEY'] = apiKey
|
||||
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import functools
|
||||
import time
|
||||
from pype import api as pype
|
||||
from pypeapp import Logger
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.vendor.ftrack_api import session as fa_session
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ class BaseHandler(object):
|
|||
def __init__(self, session):
|
||||
'''Expects a ftrack_api.Session instance'''
|
||||
self._session = session
|
||||
self.log = pype.Logger().get_logger(self.__class__.__name__)
|
||||
self.log = Logger().get_logger(self.__class__.__name__)
|
||||
|
||||
# Using decorator
|
||||
self.register = self.register_decorator(self.register)
|
||||
|
|
|
|||
5
pype/ftrack/tray/__init__.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
from .ftrack_module import FtrackModule
|
||||
|
||||
|
||||
def tray_init(tray_widget, main_widget):
|
||||
return FtrackModule(main_widget, tray_widget)
|
||||
|
|
@ -6,7 +6,8 @@ from Qt import QtCore, QtGui, QtWidgets
|
|||
|
||||
from pype.vendor import ftrack_api
|
||||
from pypeapp import style
|
||||
from pype.ftrack import FtrackServer, credentials, login_dialog as login_dialog
|
||||
from pype.ftrack import FtrackServer, credentials
|
||||
from . import login_dialog
|
||||
|
||||
from pype import api as pype
|
||||
|
||||
|
|
@ -16,7 +17,6 @@ log = pype.Logger().get_logger("FtrackModule", "ftrack")
|
|||
|
||||
class FtrackModule:
|
||||
def __init__(self, main_parent=None, parent=None):
|
||||
|
||||
self.parent = parent
|
||||
self.widget_login = login_dialog.Login_Dialog_ui(self)
|
||||
self.action_server = FtrackServer('action')
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
import requests
|
||||
from Qt import QtCore, QtGui, QtWidgets
|
||||
from pypeapp import style
|
||||
from . import credentials, login_tools
|
||||
from pype.ftrack import credentials
|
||||
from . import login_tools
|
||||
from Qt import QtCore, QtGui, QtWidgets
|
||||
|
||||
|
||||
class Login_Dialog_ui(QtWidgets.QWidget):
|
||||
|
|
@ -15,6 +15,13 @@ import avalon
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_handle_irregular(asset):
|
||||
data = asset["data"]
|
||||
handle_start = data.get("handle_start", 0)
|
||||
handle_end = data.get("handle_end", 0)
|
||||
return (handle_start, handle_end)
|
||||
|
||||
|
||||
def add_tool_to_environment(tools):
|
||||
"""
|
||||
It is adding dynamic environment to os environment.
|
||||
|
|
|
|||
|
|
@ -114,16 +114,7 @@ def on_init(_):
|
|||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
launch_workfiles = True
|
||||
try:
|
||||
presets = config.get_presets()
|
||||
launch_workfiles = presets['tools']['workfiles']['start_on_app_launch']
|
||||
except KeyError:
|
||||
log.info(
|
||||
"Workfiles app start on launch configuration was not found."
|
||||
" Defaulting to False."
|
||||
)
|
||||
launch_workfiles = False
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(launch_workfiles_app)
|
||||
|
|
|
|||
116
pype/nuke/lib.py
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from pprint import pprint
|
||||
from avalon import api, io, lib
|
||||
|
|
@ -196,14 +195,19 @@ def create_write_node(name, data):
|
|||
except Exception as e:
|
||||
log.error("problem with resolving anatomy tepmlate: {}".format(e))
|
||||
|
||||
fpath = str(anatomy_filled["render"]["path"]).replace("\\", "/")
|
||||
|
||||
# create directory
|
||||
os.makedirs( os.path.dirname(fpath), 0766 )
|
||||
|
||||
_data = OrderedDict({
|
||||
"file": str(anatomy_filled["render"]["path"]).replace("\\", "/")
|
||||
"file": fpath
|
||||
})
|
||||
|
||||
# adding dataflow template
|
||||
{_data.update({k: v})
|
||||
for k, v in nuke_dataflow_writes.items()
|
||||
if k not in ["id", "previous"]}
|
||||
if k not in ["_id", "_previous"]}
|
||||
|
||||
# adding dataflow template
|
||||
{_data.update({k: v})
|
||||
|
|
@ -335,6 +339,58 @@ def set_colorspace():
|
|||
"contact your supervisor!")
|
||||
|
||||
|
||||
def reset_frame_range_handles():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
fps = float(api.Session.get("AVALON_FPS", 25))
|
||||
nuke.root()["fps"].setValue(fps)
|
||||
name = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": name, "type": "asset"})
|
||||
|
||||
if "data" not in asset:
|
||||
msg = "Asset {} don't have set any 'data'".format(name)
|
||||
log.warning(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
data = asset["data"]
|
||||
|
||||
missing_cols = []
|
||||
check_cols = ["fstart", "fend", "handle_start", "handle_end"]
|
||||
|
||||
for col in check_cols:
|
||||
if col not in data:
|
||||
missing_cols.append(col)
|
||||
|
||||
if len(missing_cols) > 0:
|
||||
missing = ", ".join(missing_cols)
|
||||
msg = "'{}' are not set for asset '{}'!".format(missing, name)
|
||||
log.warning(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
# get handles values
|
||||
handles = avalon.nuke.get_handles(asset)
|
||||
handle_start, handle_end = pype.get_handle_irregular(asset)
|
||||
|
||||
log.info("__ handles: `{}`".format(handles))
|
||||
log.info("__ handle_start: `{}`".format(handle_start))
|
||||
log.info("__ handle_end: `{}`".format(handle_end))
|
||||
|
||||
edit_in = int(asset["data"]["fstart"]) - handles - handle_start
|
||||
edit_out = int(asset["data"]["fend"]) + handles + handle_end
|
||||
|
||||
nuke.root()["first_frame"].setValue(edit_in)
|
||||
nuke.root()["last_frame"].setValue(edit_out)
|
||||
|
||||
# setting active viewers
|
||||
vv = nuke.activeViewer().node()
|
||||
vv['frame_range_lock'].setValue(True)
|
||||
vv['frame_range'].setValue('{0}-{1}'.format(
|
||||
int(asset["data"]["fstart"]),
|
||||
int(asset["data"]["fend"]))
|
||||
)
|
||||
|
||||
|
||||
def get_avalon_knob_data(node):
|
||||
import toml
|
||||
try:
|
||||
|
|
@ -451,33 +507,33 @@ def make_format(**args):
|
|||
# TODO: bellow functions are wip and needs to be check where they are used
|
||||
# ------------------------------------
|
||||
|
||||
|
||||
def update_frame_range(start, end, root=None):
|
||||
"""Set Nuke script start and end frame range
|
||||
|
||||
Args:
|
||||
start (float, int): start frame
|
||||
end (float, int): end frame
|
||||
root (object, Optional): root object from nuke's script
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
knobs = {
|
||||
"first_frame": start,
|
||||
"last_frame": end
|
||||
}
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
for key, value in knobs.items():
|
||||
if root:
|
||||
root[key].setValue(value)
|
||||
else:
|
||||
nuke.root()[key].setValue(value)
|
||||
|
||||
#
|
||||
#
|
||||
# def update_frame_range(start, end, root=None):
|
||||
# """Set Nuke script start and end frame range
|
||||
#
|
||||
# Args:
|
||||
# start (float, int): start frame
|
||||
# end (float, int): end frame
|
||||
# root (object, Optional): root object from nuke's script
|
||||
#
|
||||
# Returns:
|
||||
# None
|
||||
#
|
||||
# """
|
||||
#
|
||||
# knobs = {
|
||||
# "first_frame": start,
|
||||
# "last_frame": end
|
||||
# }
|
||||
#
|
||||
# with avalon.nuke.viewer_update_and_undo_stop():
|
||||
# for key, value in knobs.items():
|
||||
# if root:
|
||||
# root[key].setValue(value)
|
||||
# else:
|
||||
# nuke.root()[key].setValue(value)
|
||||
#
|
||||
# #
|
||||
# def get_additional_data(container):
|
||||
# """Get Nuke's related data for the container
|
||||
#
|
||||
|
|
|
|||
|
|
@ -17,5 +17,17 @@ def install():
|
|||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(rm_item[1].name(), lib.reset_resolution, index=rm_item[0])
|
||||
|
||||
# replace reset resolution from avalon core to pype's
|
||||
name = "Reset Frame Range"
|
||||
rm_item = [(i, item)
|
||||
for i, item in enumerate(menu.items())
|
||||
if name in item.name()][0]
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(
|
||||
rm_item[1].name(),
|
||||
lib.reset_frame_range_handles,
|
||||
index=rm_item[0])
|
||||
|
||||
# add colorspace menu item
|
||||
menu.addCommand("Set colorspace...", lib.set_colorspace, index=rm_item[0]+1)
|
||||
menu.addCommand("Set colorspace...", lib.set_colorspace,
|
||||
index=rm_item[0] + 1)
|
||||
|
|
|
|||
|
|
@ -5,17 +5,16 @@ import sys
|
|||
# Pyblish libraries
|
||||
import pyblish.api
|
||||
|
||||
# Host libraries
|
||||
import hiero
|
||||
|
||||
from pypeapp import Logger
|
||||
log = Logger().get_logger(__name__, "nukestudio")
|
||||
|
||||
import avalon.api as avalon
|
||||
import pype.api as pype
|
||||
|
||||
from avalon.vendor.Qt import (QtWidgets, QtGui)
|
||||
|
||||
# Host libraries
|
||||
import hiero
|
||||
|
||||
from pypeapp import Logger
|
||||
log = Logger().get_logger(__name__, "nukestudio")
|
||||
|
||||
cached_process = None
|
||||
|
||||
|
|
@ -72,8 +71,11 @@ def reload_config():
|
|||
import importlib
|
||||
|
||||
for module in (
|
||||
"avalon",
|
||||
"avalon.lib",
|
||||
"avalon.pipeline",
|
||||
"pyblish",
|
||||
"pyblish_lite",
|
||||
"pypeapp",
|
||||
"{}.api".format(AVALON_CONFIG),
|
||||
"{}.templates".format(AVALON_CONFIG),
|
||||
|
|
|
|||
|
|
@ -95,26 +95,26 @@ def install():
|
|||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
"separator",
|
||||
{
|
||||
'action': QAction('Create...', None),
|
||||
'function': creator.show,
|
||||
'icon': QIcon('icons:ColorAdd.png')
|
||||
},
|
||||
{
|
||||
'action': QAction('Load...', None),
|
||||
'function': cbloader.show,
|
||||
'icon': QIcon('icons:CopyRectangle.png')
|
||||
},
|
||||
# {
|
||||
# 'action': QAction('Create...', None),
|
||||
# 'function': creator.show,
|
||||
# 'icon': QIcon('icons:ColorAdd.png')
|
||||
# },
|
||||
# {
|
||||
# 'action': QAction('Load...', None),
|
||||
# 'function': cbloader.show,
|
||||
# 'icon': QIcon('icons:CopyRectangle.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Publish...', None),
|
||||
'function': publish.show,
|
||||
'icon': QIcon('icons:Output.png')
|
||||
},
|
||||
{
|
||||
'action': QAction('Manage...', None),
|
||||
'function': cbsceneinventory.show,
|
||||
'icon': QIcon('icons:ModifyMetaData.png')
|
||||
},
|
||||
# {
|
||||
# 'action': QAction('Manage...', None),
|
||||
# 'function': cbsceneinventory.show,
|
||||
# 'icon': QIcon('icons:ModifyMetaData.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Library...', None),
|
||||
'function': libraryloader.show,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from hiero.core import *
|
||||
from hiero.ui import *
|
||||
import ft_utils
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
|
|
@ -11,31 +11,30 @@ def create_nk_script_clips(script_lst, seq=None):
|
|||
[{
|
||||
'path': 'P:/Jakub_testy_pipeline/test_v01.nk',
|
||||
'name': 'test',
|
||||
'timeline_frame_in': 10,
|
||||
'handles': 10,
|
||||
'source_start': 0,
|
||||
'source_end': 54,
|
||||
'handleStart': 15, # added asymetrically to handles
|
||||
'handleEnd': 10, # added asymetrically to handles
|
||||
'timelineIn': 16,
|
||||
'startFrame': 991,
|
||||
'endFrame': 1023,
|
||||
'task': 'Comp-tracking',
|
||||
'work_dir': 'VFX_PR',
|
||||
'shot': '00010'
|
||||
}]
|
||||
'''
|
||||
env = ft_utils.Env()
|
||||
proj = projects()[-1]
|
||||
|
||||
proj = hiero.core.projects()[-1]
|
||||
root = proj.clipsBin()
|
||||
|
||||
if not seq:
|
||||
seq = Sequence('NewSequences')
|
||||
root.addItem(BinItem(seq))
|
||||
seq = hiero.core.Sequence('NewSequences')
|
||||
root.addItem(hiero.core.BinItem(seq))
|
||||
# todo will ned to define this better
|
||||
# track = seq[1] # lazy example to get a destination# track
|
||||
clips_lst = []
|
||||
for nk in script_lst:
|
||||
task_short = env.task_codes[nk['task']]
|
||||
script_file = task_short
|
||||
task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']])
|
||||
bin = create_bin_in_project(task_path, proj)
|
||||
task_path += script_file
|
||||
|
||||
if nk['task'] not in seq.videoTracks():
|
||||
track = hiero.core.VideoTrack(nk['task'])
|
||||
|
|
@ -44,33 +43,63 @@ def create_nk_script_clips(script_lst, seq=None):
|
|||
track = seq.tracks(nk['task'])
|
||||
|
||||
# create slip media
|
||||
print nk['path']
|
||||
media = MediaSource(nk['path'])
|
||||
print media
|
||||
source = Clip(media)
|
||||
print source
|
||||
print("__ path: `{}`".format(nk['path']))
|
||||
|
||||
media = hiero.core.MediaSource(nk['path'])
|
||||
media_in = int(media.startTime() or 0)
|
||||
media_duration = int(media.duration() or 0)
|
||||
|
||||
handle_start = nk.get("handleStart") or nk['handles']
|
||||
handle_end = nk.get("handleEnd") or nk['handles']
|
||||
|
||||
if media_in:
|
||||
source_in = media_in + handle_start
|
||||
else:
|
||||
source_in = nk['startFrame'] + handle_start
|
||||
|
||||
if media_duration:
|
||||
source_out = (media_in + media_duration - 1) - handle_end
|
||||
else:
|
||||
source_out = nk['endFrame'] - handle_end
|
||||
|
||||
print("__ media: `{}`".format(media))
|
||||
print("__ media_in: `{}`".format(media_in))
|
||||
print("__ media_duration : `{}`".format(media_duration))
|
||||
print("__ source_in: `{}`".format(source_in))
|
||||
print("__ source_out : `{}`".format(source_out))
|
||||
|
||||
source = hiero.core.Clip(media)
|
||||
print("__ source : `{}`".format(source))
|
||||
print("__ source.sourceIn(): `{}`".format(source.sourceIn()))
|
||||
|
||||
name = os.path.basename(os.path.splitext(nk['path'])[0])
|
||||
split_name = split_by_client_version(name, env)[0] or name
|
||||
print split_name
|
||||
# print source
|
||||
split_name = split_by_client_version(name)[0] or name
|
||||
|
||||
print("__ split_name: `{}`".format(split_name))
|
||||
|
||||
# add to bin as clip item
|
||||
items_in_bin = [b.name() for b in bin.items()]
|
||||
if split_name not in items_in_bin:
|
||||
binItem = BinItem(source)
|
||||
binItem = hiero.core.BinItem(source)
|
||||
bin.addItem(binItem)
|
||||
print bin.items()
|
||||
|
||||
print("__ bin.items(): `{}`".format(bin.items()))
|
||||
|
||||
new_source = [
|
||||
item for item in bin.items() if split_name in item.name()
|
||||
][0].items()[0].item()
|
||||
print new_source
|
||||
|
||||
print("__ new_source: `{}`".format(new_source))
|
||||
print("__ new_source: `{}`".format(new_source))
|
||||
|
||||
# add to track as clip item
|
||||
trackItem = TrackItem(split_name, TrackItem.kVideo)
|
||||
trackItem = hiero.core.TrackItem(split_name, hiero.core.TrackItem.kVideo)
|
||||
trackItem.setSource(new_source)
|
||||
trackItem.setSourceIn(nk['source_start'] + nk['handles'])
|
||||
trackItem.setSourceOut(nk['source_end'] - nk['handles'])
|
||||
trackItem.setTimelineIn(nk['source_start'] + nk['timeline_frame_in'])
|
||||
trackItem.setTimelineOut(
|
||||
(nk['source_end'] - (nk['handles'] * 2)) + nk['timeline_frame_in'])
|
||||
trackItem.setSourceIn(source_in)
|
||||
trackItem.setSourceOut(source_out)
|
||||
trackItem.setSourceIn(source_in)
|
||||
trackItem.setTimelineIn(nk['timelineIn'])
|
||||
trackItem.setTimelineOut(nk['timelineIn'] + (source_out - source_in))
|
||||
track.addTrackItem(trackItem)
|
||||
track.addTrackItem(trackItem)
|
||||
clips_lst.append(trackItem)
|
||||
|
|
@ -86,7 +115,7 @@ def create_bin_in_project(bin_name='', project=''):
|
|||
|
||||
if not project:
|
||||
# get the first loaded project
|
||||
project = projects()[0]
|
||||
project = hiero.core.projects()[-1]
|
||||
if not bin_name:
|
||||
return None
|
||||
if '/' in bin_name:
|
||||
|
|
@ -103,7 +132,7 @@ def create_bin_in_project(bin_name='', project=''):
|
|||
bin = [bin for bin in clipsBin.bins() if b in bin.name()][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = Bin(b)
|
||||
create_bin = hiero.core.Bin(b)
|
||||
clipsBin.addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
|
|
@ -115,7 +144,7 @@ def create_bin_in_project(bin_name='', project=''):
|
|||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = Bin(b)
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
|
|
@ -127,22 +156,33 @@ def create_bin_in_project(bin_name='', project=''):
|
|||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = Bin(b)
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
# print [bin.name() for bin in clipsBin.bins()]
|
||||
return done_bin_lst[-1]
|
||||
|
||||
|
||||
def split_by_client_version(string, env=None):
|
||||
if not env:
|
||||
env = ft_utils.Env()
|
||||
|
||||
client_letter, client_digits = env.get_version_type('client')
|
||||
regex = "[/_.]" + client_letter + "\d+"
|
||||
def split_by_client_version(string):
|
||||
regex = r"[/_.]v\d+"
|
||||
try:
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
return string.split(matches[0])
|
||||
except Exception, e:
|
||||
print e
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
|
||||
script_lst = [{
|
||||
'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk',
|
||||
'name': '120sh020_platesMain',
|
||||
'handles': 10,
|
||||
'handleStart': 10,
|
||||
'handleEnd': 10,
|
||||
'timelineIn': 16,
|
||||
'startFrame': 991,
|
||||
'endFrame': 1023,
|
||||
'task': 'platesMain',
|
||||
'work_dir': 'shots',
|
||||
'shot': '120sh020'
|
||||
}]
|
||||
|
|
|
|||
|
|
@ -37,11 +37,11 @@ def update_tag(tag, value):
|
|||
"""
|
||||
|
||||
tag.setNote(value['note'])
|
||||
tag.setIcon(value['icon']['path'])
|
||||
tag.setIcon(str(value['icon']['path']))
|
||||
mtd = tag.metadata()
|
||||
pres_mtd = value.get('metadata', None)
|
||||
if pres_mtd:
|
||||
[mtd.setValue("tag.{}".format(k), v)
|
||||
[mtd.setValue("tag.{}".format(str(k)), str(v))
|
||||
for k, v in pres_mtd.items()]
|
||||
|
||||
return tag
|
||||
|
|
|
|||
|
|
@ -2,27 +2,6 @@ import os
|
|||
import pyblish.api
|
||||
import logging
|
||||
|
||||
if not os.environ.get("FTRACK_API_KEY"):
|
||||
import appdirs
|
||||
import json
|
||||
|
||||
config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
|
||||
action_file_name = 'ftrack_cred.json'
|
||||
action_fpath = os.path.join(config_path, action_file_name)
|
||||
|
||||
validation = False
|
||||
credentials = {}
|
||||
try:
|
||||
file = open(action_fpath, 'r')
|
||||
credentials = json.load(file)
|
||||
except Exception:
|
||||
raise Exception("Ftrack credentials are missing ...")
|
||||
else:
|
||||
file.close()
|
||||
|
||||
os.environ['FTRACK_API_USER'] = credentials["username"]
|
||||
os.environ['FTRACK_API_KEY'] = credentials["apiKey"]
|
||||
|
||||
try:
|
||||
import ftrack_api_old as ftrack_api
|
||||
except Exception:
|
||||
|
|
@ -37,6 +16,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
ftrack_log = logging.getLogger('ftrack_api')
|
||||
ftrack_log.setLevel(logging.WARNING)
|
||||
# Collect session
|
||||
session = ftrack_api.Session()
|
||||
context.data["ftrackSession"] = session
|
||||
|
|
|
|||
|
|
@ -56,7 +56,15 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
|
||||
session = instance.context.data["ftrackSession"]
|
||||
if instance.context.data.get("ftrackTask"):
|
||||
if instance.data.get("ftrackTask"):
|
||||
task = instance.data["ftrackTask"]
|
||||
name = task
|
||||
parent = task["parent"]
|
||||
elif instance.data.get("ftrackEntity"):
|
||||
task = None
|
||||
name = instance.data.get("ftrackEntity")['name']
|
||||
parent = instance.data.get("ftrackEntity")
|
||||
elif instance.context.data.get("ftrackTask"):
|
||||
task = instance.context.data["ftrackTask"]
|
||||
name = task
|
||||
parent = task["parent"]
|
||||
|
|
|
|||
|
|
@ -25,7 +25,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
'write': 'img',
|
||||
'render': 'render',
|
||||
'nukescript': 'comp',
|
||||
'review': 'mov'}
|
||||
'review': 'mov',
|
||||
'plate': 'img'
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug('instance {}'.format(instance))
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
}
|
||||
"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder
|
||||
order = pyblish.api.IntegratorOrder - 0.04
|
||||
label = 'Integrate Hierarchy To Ftrack'
|
||||
families = ["clip"]
|
||||
optional = False
|
||||
|
|
@ -45,7 +45,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
def import_to_ftrack(self, input_data, parent=None):
|
||||
for entity_name in input_data:
|
||||
entity_data = input_data[entity_name]
|
||||
entity_type = entity_data['entity_type'].capitalize()
|
||||
entity_type = entity_data['entity_type']
|
||||
|
||||
if entity_type.lower() == 'project':
|
||||
query = 'Project where full_name is "{}"'.format(entity_name)
|
||||
|
|
@ -82,11 +82,12 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
i for i in self.context[:] if i.data['asset'] in entity['name']]
|
||||
for key in custom_attributes:
|
||||
assert (key in entity['custom_attributes']), (
|
||||
'Missing custom attribute')
|
||||
'Missing custom attribute key: `{0}` in attrs: `{1}`'.format(key, entity['custom_attributes'].keys()))
|
||||
|
||||
entity['custom_attributes'][key] = custom_attributes[key]
|
||||
|
||||
for instance in instances:
|
||||
instance.data['ftrackShotId'] = entity['id']
|
||||
instance.data['ftrackEntity'] = entity
|
||||
|
||||
self.session.commit()
|
||||
|
||||
|
|
@ -108,7 +109,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
for task in tasks_to_create:
|
||||
self.create_task(
|
||||
name=task,
|
||||
task_type=task.capitalize(),
|
||||
task_type=task,
|
||||
parent=entity
|
||||
)
|
||||
self.session.commit()
|
||||
|
|
@ -14,6 +14,8 @@ class CleanUp(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder + 10
|
||||
label = "Clean Up"
|
||||
exclude_families = ["clip"]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
if [ef for ef in self.exclude_families
|
||||
|
|
|
|||
|
|
@ -9,16 +9,18 @@ class CollectAssumedDestination(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Collect Assumed Destination"
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
exclude_families = ["clip"]
|
||||
exclude_families = ["plate"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
for instance in context:
|
||||
if [ef for ef in self.exclude_families
|
||||
if ef in instance.data["family"]]:
|
||||
self.log.info("Ignoring instance: {}".format(instance))
|
||||
return
|
||||
self.process_item(instance)
|
||||
|
||||
def process_item(self, instance):
|
||||
if [ef for ef in self.exclude_families
|
||||
if instance.data["family"] in ef]:
|
||||
return
|
||||
|
||||
self.create_destination_template(instance)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,23 @@ class CollectPresets(api.ContextPlugin):
|
|||
label = "Collect Presets"
|
||||
|
||||
def process(self, context):
|
||||
context.data["presets"] = config.get_presets()
|
||||
# self.log.debug(context.data["presets"])
|
||||
presets = config.get_presets()
|
||||
try:
|
||||
# try if it is not in projects custom directory
|
||||
# `{PYPE_PROJECT_CONFIGS}/[PROJECT_NAME]/init.json`
|
||||
# init.json define preset names to be used
|
||||
p_init = presets["init"]
|
||||
presets["colorspace"] = presets["colorspace"][p_init["colorspace"]]
|
||||
presets["dataflow"] = presets["dataflow"][p_init["dataflow"]]
|
||||
except KeyError:
|
||||
self.log.warning("No projects custom preset available...")
|
||||
presets["colorspace"] = presets["colorspace"]["default"]
|
||||
presets["dataflow"] = presets["dataflow"]["default"]
|
||||
self.log.info(
|
||||
"Presets `colorspace` and `dataflow` loaded from `default`..."
|
||||
)
|
||||
|
||||
context.data["presets"] = presets
|
||||
|
||||
self.log.info(context.data["presets"])
|
||||
return
|
||||
|
|
|
|||
17
pype/plugins/global/publish/collect_project_data.py
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
import pyblish.api
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
|
||||
class CollectProjectData(pyblish.api.ContextPlugin):
|
||||
"""Collecting project data from avalon db"""
|
||||
|
||||
label = "Collect Project Data"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
hosts = ["nukestudio"]
|
||||
|
||||
def process(self, context):
|
||||
# get project data from avalon db
|
||||
context.data["projectData"] = pype.get_project_data()
|
||||
|
||||
return
|
||||
|
|
@ -13,14 +13,14 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
self.create_destination_template(instance)
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
self.create_destination_template(instance, anatomy)
|
||||
|
||||
template_data = instance.data["assumedTemplateData"]
|
||||
# template = instance.data["template"]
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
# self.log.info(anatomy.templates)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
# self.log.info(anatomy_filled)
|
||||
mock_template = anatomy_filled["publish"]["path"]
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
"resources")
|
||||
|
||||
# Clean the path
|
||||
mock_destination = os.path.abspath(os.path.normpath(mock_destination))
|
||||
mock_destination = os.path.abspath(os.path.normpath(mock_destination)).replace("\\", "/")
|
||||
|
||||
# Define resource destination and transfers
|
||||
resources = instance.data.get("resources", list())
|
||||
|
|
@ -38,7 +38,7 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
for resource in resources:
|
||||
|
||||
# Add destination to the resource
|
||||
source_filename = os.path.basename(resource["source"])
|
||||
source_filename = os.path.basename(resource["source"]).replace("\\", "/")
|
||||
destination = os.path.join(mock_destination, source_filename)
|
||||
|
||||
# Force forward slashes to fix issue with software unable
|
||||
|
|
@ -53,13 +53,13 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
files = resource['files']
|
||||
for fsrc in files:
|
||||
fname = os.path.basename(fsrc)
|
||||
fdest = os.path.join(mock_destination, fname)
|
||||
fdest = os.path.join(mock_destination, fname).replace("\\", "/")
|
||||
transfers.append([fsrc, fdest])
|
||||
|
||||
instance.data["resources"] = resources
|
||||
instance.data["transfers"] = transfers
|
||||
|
||||
def create_destination_template(self, instance):
|
||||
def create_destination_template(self, instance, anatomy):
|
||||
"""Create a filepath based on the current data available
|
||||
|
||||
Example template:
|
||||
|
|
@ -77,12 +77,13 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
self.log.info(subset_name)
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
a_template = anatomy.templates
|
||||
|
||||
project = io.find_one({"type": "project",
|
||||
"name": project_name},
|
||||
projection={"config": True, "data": True})
|
||||
|
||||
template = project["config"]["template"]["publish"]
|
||||
template = a_template['publish']['path']
|
||||
# anatomy = instance.context.data['anatomy']
|
||||
|
||||
asset = io.find_one({"type": "asset",
|
||||
|
|
@ -112,10 +113,12 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
if instance.data.get('version'):
|
||||
version_number = int(instance.data.get('version'))
|
||||
|
||||
padding = int(a_template['render']['padding'])
|
||||
|
||||
hierarchy = asset['data']['parents']
|
||||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*hierarchy)
|
||||
hierarchy = "/".join(hierarchy)
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
|
|
@ -124,6 +127,7 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
|||
"family": instance.data['family'],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"frame": ('#' * padding),
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"representation": "TEMP"}
|
||||
|
|
@ -14,6 +14,7 @@ class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
if "hierarchyContext" not in context.data:
|
||||
self.log.info('skipping IntegrateHierarchyToAvalon')
|
||||
return
|
||||
|
||||
self.db = io
|
||||
|
|
@ -60,7 +60,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"nukescript",
|
||||
"render",
|
||||
"write",
|
||||
"plates"
|
||||
"rig",
|
||||
"plate"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
|
||||
|
|
@ -163,6 +164,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Next version: v{0:03d}".format(next_version))
|
||||
|
||||
version_data = self.create_version_data(context, instance)
|
||||
|
||||
version_data_instance = instance.data.get('versionData')
|
||||
|
||||
if version_data_instance:
|
||||
version_data.update(version_data_instance)
|
||||
|
||||
version = self.create_version(subset=subset,
|
||||
version_number=next_version,
|
||||
locations=[LOCATION],
|
||||
|
|
@ -253,13 +260,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
os.path.normpath(
|
||||
anatomy_filled[template_name]["path"])
|
||||
)
|
||||
self.log.debug(
|
||||
"test_dest_files: {}".format(str(test_dest_files)))
|
||||
|
||||
dst_collections, remainder = clique.assemble(test_dest_files)
|
||||
dst_collection = dst_collections[0]
|
||||
dst_head = dst_collection.format("{head}")
|
||||
dst_tail = dst_collection.format("{tail}")
|
||||
|
||||
instance.data["representations"][idx]['published_path'] = dst_collection.format() # noqa E01
|
||||
repre['published_path'] = dst_collection.format()
|
||||
|
||||
for i in src_collection.indexes:
|
||||
src_padding = src_collection.format("{padding}") % i
|
||||
|
|
@ -270,9 +279,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
|
||||
self.log.debug("destination: `{}`".format(dst))
|
||||
src = os.path.join(stagingdir, src_file_name)
|
||||
self.log.debug("source: `{}`".format(src))
|
||||
self.log.debug("source: {}".format(src))
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
# for imagesequence version data
|
||||
hashes = '#' * len(dst_padding)
|
||||
dst = os.path.normpath("{0}{1}{2}".format(
|
||||
dst_head, hashes, dst_tail))
|
||||
|
||||
else:
|
||||
# Single file
|
||||
# _______
|
||||
|
|
@ -293,10 +307,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = anatomy_filled[template_name]["path"]
|
||||
dst = os.path.normpath(
|
||||
anatomy_filled[template_name]["path"])
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
instance.data["representations"][idx]['published_path'] = dst
|
||||
|
||||
repre['published_path'] = dst
|
||||
self.log.debug("__ dst: {}".format(dst))
|
||||
|
||||
representation = {
|
||||
"schema": "pype:representation-2.0",
|
||||
|
|
@ -322,14 +339,20 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"representation": repre['ext']
|
||||
}
|
||||
}
|
||||
|
||||
self.log.debug("__ _representation: {}".format(representation))
|
||||
destination_list.append(dst)
|
||||
self.log.debug("__ destination_list: {}".format(destination_list))
|
||||
instance.data['destination_list'] = destination_list
|
||||
representations.append(representation)
|
||||
self.log.debug("__ representations: {}".format(representations))
|
||||
|
||||
self.log.info("Registering {} items".format(len(representations)))
|
||||
|
||||
self.log.debug("__ representations: {}".format(representations))
|
||||
for rep in instance.data["representations"]:
|
||||
self.log.debug("__ represNAME: {}".format(rep['name']))
|
||||
self.log.debug("__ represPATH: {}".format(rep['published_path']))
|
||||
io.insert_many(representations)
|
||||
# self.log.debug("Representation: {}".format(representations))
|
||||
self.log.info("Registered {} items".format(len(representations)))
|
||||
|
||||
def integrate(self, instance):
|
||||
"""Move the files
|
||||
|
|
@ -343,7 +366,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
transfers = instance.data.get("transfers", list())
|
||||
|
||||
for src, dest in transfers:
|
||||
self.log.info("Copying file .. {} -> {}".format(src, dest))
|
||||
self.log.debug("Copying file .. {} -> {}".format(src, dest))
|
||||
self.copy_file(src, dest)
|
||||
|
||||
# Produce hardlinked copies
|
||||
|
|
@ -353,7 +376,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
# to ensure publishes remain safe and non-edited.
|
||||
hardlinks = instance.data.get("hardlinks", list())
|
||||
for src, dest in hardlinks:
|
||||
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
|
||||
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
|
||||
self.hardlink_file(src, dest)
|
||||
|
||||
def copy_file(self, src, dst):
|
||||
|
|
|
|||
|
|
@ -36,8 +36,7 @@ class RigLoader(pype.maya.plugin.ReferenceLoader):
|
|||
groupReference=True,
|
||||
groupName=groupName)
|
||||
|
||||
# cmds.makeIdentity(groupName, apply=False, rotate=True,
|
||||
# translate=True, scale=True)
|
||||
cmds.xform(groupName, pivots=(0, 0, 0))
|
||||
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
|
|
|||
|
|
@ -1,228 +1,226 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.maya.action
|
||||
import math
|
||||
import maya.api.OpenMaya as om
|
||||
from pymel.core import polyUVSet
|
||||
import pymel.core as pm
|
||||
|
||||
|
||||
class GetOverlappingUVs(object):
|
||||
|
||||
def _createBoundingCircle(self, meshfn):
|
||||
""" Represent a face by center and radius
|
||||
def _createBoundingCircle(self, meshfn):
|
||||
""" Represent a face by center and radius
|
||||
|
||||
:param meshfn: MFnMesh class
|
||||
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
|
||||
:returns: (center, radius)
|
||||
:rtype: tuple
|
||||
"""
|
||||
center = []
|
||||
radius = []
|
||||
for i in xrange(meshfn.numPolygons): # noqa: F821
|
||||
# get uvs from face
|
||||
uarray = []
|
||||
varray = []
|
||||
for j in range(len(meshfn.getPolygonVertices(i))):
|
||||
uv = meshfn.getPolygonUV(i, j)
|
||||
uarray.append(uv[0])
|
||||
varray.append(uv[1])
|
||||
|
||||
# loop through all vertices to construct edges/rays
|
||||
cu = 0.0
|
||||
cv = 0.0
|
||||
for j in range(len(uarray)):
|
||||
cu += uarray[j]
|
||||
cv += varray[j]
|
||||
|
||||
cu /= len(uarray)
|
||||
cv /= len(varray)
|
||||
rsqr = 0.0
|
||||
for j in range(len(varray)):
|
||||
du = uarray[j] - cu
|
||||
dv = varray[j] - cv
|
||||
dsqr = du * du + dv * dv
|
||||
rsqr = dsqr if dsqr > rsqr else rsqr
|
||||
|
||||
center.append(cu)
|
||||
center.append(cv)
|
||||
radius.append(math.sqrt(rsqr))
|
||||
|
||||
return center, radius
|
||||
|
||||
def _createRayGivenFace(self, meshfn, faceId):
|
||||
""" Represent a face by a series of edges(rays), i.e.
|
||||
|
||||
:param meshfn: MFnMesh class
|
||||
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
|
||||
:param faceId: face id
|
||||
:type faceId: int
|
||||
:returns: False if no valid uv's.
|
||||
""(True, orig, vec)"" or ""(False, None, None)""
|
||||
:rtype: tuple
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
orig = [orig1u, orig1v, orig2u, orig2v, ... ]
|
||||
vec = [vec1u, vec1v, vec2u, vec2v, ... ]
|
||||
"""
|
||||
orig = []
|
||||
vec = []
|
||||
# get uvs
|
||||
:param meshfn: MFnMesh class
|
||||
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
|
||||
:returns: (center, radius)
|
||||
:rtype: tuple
|
||||
"""
|
||||
center = []
|
||||
radius = []
|
||||
for i in xrange(meshfn.numPolygons): # noqa: F821
|
||||
# get uvs from face
|
||||
uarray = []
|
||||
varray = []
|
||||
for i in range(len(meshfn.getPolygonVertices(faceId))):
|
||||
uv = meshfn.getPolygonUV(faceId, i)
|
||||
for j in range(len(meshfn.getPolygonVertices(i))):
|
||||
uv = meshfn.getPolygonUV(i, j)
|
||||
uarray.append(uv[0])
|
||||
varray.append(uv[1])
|
||||
|
||||
if len(uarray) == 0 or len(varray) == 0:
|
||||
return (False, None, None)
|
||||
# loop through all vertices to construct edges/rays
|
||||
cu = 0.0
|
||||
cv = 0.0
|
||||
for j in range(len(uarray)):
|
||||
cu += uarray[j]
|
||||
cv += varray[j]
|
||||
|
||||
# loop throught all vertices to construct edges/rays
|
||||
u = uarray[-1]
|
||||
v = varray[-1]
|
||||
for i in xrange(len(uarray)): # noqa: F821
|
||||
orig.append(uarray[i])
|
||||
orig.append(varray[i])
|
||||
vec.append(u - uarray[i])
|
||||
vec.append(v - varray[i])
|
||||
u = uarray[i]
|
||||
v = varray[i]
|
||||
cu /= len(uarray)
|
||||
cv /= len(varray)
|
||||
rsqr = 0.0
|
||||
for j in range(len(varray)):
|
||||
du = uarray[j] - cu
|
||||
dv = varray[j] - cv
|
||||
dsqr = du * du + dv * dv
|
||||
rsqr = dsqr if dsqr > rsqr else rsqr
|
||||
|
||||
return (True, orig, vec)
|
||||
center.append(cu)
|
||||
center.append(cv)
|
||||
radius.append(math.sqrt(rsqr))
|
||||
|
||||
def _checkCrossingEdges(self,
|
||||
face1Orig,
|
||||
face1Vec,
|
||||
face2Orig,
|
||||
face2Vec):
|
||||
""" Check if there are crossing edges between two faces.
|
||||
Return True if there are crossing edges and False otherwise.
|
||||
return center, radius
|
||||
|
||||
:param face1Orig: origin of face 1
|
||||
:type face1Orig: tuple
|
||||
:param face1Vec: face 1 edges
|
||||
:type face1Vec: list
|
||||
:param face2Orig: origin of face 2
|
||||
:type face2Orig: tuple
|
||||
:param face2Vec: face 2 edges
|
||||
:type face2Vec: list
|
||||
def _createRayGivenFace(self, meshfn, faceId):
|
||||
""" Represent a face by a series of edges(rays), i.e.
|
||||
|
||||
A face is represented by a series of edges(rays), i.e.
|
||||
.. code-block:: python
|
||||
:param meshfn: MFnMesh class
|
||||
:type meshfn: :class:`maya.api.OpenMaya.MFnMesh`
|
||||
:param faceId: face id
|
||||
:type faceId: int
|
||||
:returns: False if no valid uv's.
|
||||
""(True, orig, vec)"" or ""(False, None, None)""
|
||||
:rtype: tuple
|
||||
|
||||
faceOrig[] = [orig1u, orig1v, orig2u, orig2v, ... ]
|
||||
faceVec[] = [vec1u, vec1v, vec2u, vec2v, ... ]
|
||||
"""
|
||||
face1Size = len(face1Orig)
|
||||
face2Size = len(face2Orig)
|
||||
for i in xrange(0, face1Size, 2): # noqa: F821
|
||||
o1x = face1Orig[i]
|
||||
o1y = face1Orig[i+1]
|
||||
v1x = face1Vec[i]
|
||||
v1y = face1Vec[i+1]
|
||||
n1x = v1y
|
||||
n1y = -v1x
|
||||
for j in xrange(0, face2Size, 2): # noqa: F821
|
||||
# Given ray1(O1, V1) and ray2(O2, V2)
|
||||
# Normal of ray1 is (V1.y, V1.x)
|
||||
o2x = face2Orig[j]
|
||||
o2y = face2Orig[j+1]
|
||||
v2x = face2Vec[j]
|
||||
v2y = face2Vec[j+1]
|
||||
n2x = v2y
|
||||
n2y = -v2x
|
||||
.. code-block:: python
|
||||
|
||||
# Find t for ray2
|
||||
# t = [(o1x-o2x)n1x + (o1y-o2y)n1y] /
|
||||
# (v2x * n1x + v2y * n1y)
|
||||
denum = v2x * n1x + v2y * n1y
|
||||
# Edges are parallel if denum is close to 0.
|
||||
if math.fabs(denum) < 0.000001:
|
||||
continue
|
||||
t2 = ((o1x-o2x) * n1x + (o1y-o2y) * n1y) / denum
|
||||
if (t2 < 0.00001 or t2 > 0.99999):
|
||||
continue
|
||||
orig = [orig1u, orig1v, orig2u, orig2v, ... ]
|
||||
vec = [vec1u, vec1v, vec2u, vec2v, ... ]
|
||||
"""
|
||||
orig = []
|
||||
vec = []
|
||||
# get uvs
|
||||
uarray = []
|
||||
varray = []
|
||||
for i in range(len(meshfn.getPolygonVertices(faceId))):
|
||||
uv = meshfn.getPolygonUV(faceId, i)
|
||||
uarray.append(uv[0])
|
||||
varray.append(uv[1])
|
||||
|
||||
# Find t for ray1
|
||||
# t = [(o2x-o1x)n2x
|
||||
# + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y)
|
||||
denum = v1x * n2x + v1y * n2y
|
||||
# Edges are parallel if denum is close to 0.
|
||||
if math.fabs(denum) < 0.000001:
|
||||
continue
|
||||
t1 = ((o2x-o1x) * n2x + (o2y-o1y) * n2y) / denum
|
||||
if len(uarray) == 0 or len(varray) == 0:
|
||||
return (False, None, None)
|
||||
|
||||
# Edges intersect
|
||||
if (t1 > 0.00001 and t1 < 0.99999):
|
||||
return 1
|
||||
# loop throught all vertices to construct edges/rays
|
||||
u = uarray[-1]
|
||||
v = varray[-1]
|
||||
for i in xrange(len(uarray)): # noqa: F821
|
||||
orig.append(uarray[i])
|
||||
orig.append(varray[i])
|
||||
vec.append(u - uarray[i])
|
||||
vec.append(v - varray[i])
|
||||
u = uarray[i]
|
||||
v = varray[i]
|
||||
|
||||
return 0
|
||||
return (True, orig, vec)
|
||||
|
||||
def _getOverlapUVFaces(self, meshName):
|
||||
""" Return overlapping faces
|
||||
def _checkCrossingEdges(self,
|
||||
face1Orig,
|
||||
face1Vec,
|
||||
face2Orig,
|
||||
face2Vec):
|
||||
""" Check if there are crossing edges between two faces.
|
||||
Return True if there are crossing edges and False otherwise.
|
||||
|
||||
:param meshName: name of mesh
|
||||
:type meshName: str
|
||||
:returns: list of overlapping faces
|
||||
:rtype: list
|
||||
"""
|
||||
faces = []
|
||||
# find polygon mesh node
|
||||
selList = om.MSelectionList()
|
||||
selList.add(meshName)
|
||||
mesh = selList.getDependNode(0)
|
||||
if mesh.apiType() == om.MFn.kTransform:
|
||||
dagPath = selList.getDagPath(0)
|
||||
dagFn = om.MFnDagNode(dagPath)
|
||||
child = dagFn.child(0)
|
||||
if child.apiType() != om.MFn.kMesh:
|
||||
raise Exception("Can't find polygon mesh")
|
||||
mesh = child
|
||||
meshfn = om.MFnMesh(mesh)
|
||||
:param face1Orig: origin of face 1
|
||||
:type face1Orig: tuple
|
||||
:param face1Vec: face 1 edges
|
||||
:type face1Vec: list
|
||||
:param face2Orig: origin of face 2
|
||||
:type face2Orig: tuple
|
||||
:param face2Vec: face 2 edges
|
||||
:type face2Vec: list
|
||||
|
||||
center, radius = self._createBoundingCircle(meshfn)
|
||||
for i in xrange(meshfn.numPolygons): # noqa: F821
|
||||
rayb1, face1Orig, face1Vec = self._createRayGivenFace(
|
||||
meshfn, i)
|
||||
if not rayb1:
|
||||
A face is represented by a series of edges(rays), i.e.
|
||||
.. code-block:: python
|
||||
|
||||
faceOrig[] = [orig1u, orig1v, orig2u, orig2v, ... ]
|
||||
faceVec[] = [vec1u, vec1v, vec2u, vec2v, ... ]
|
||||
"""
|
||||
face1Size = len(face1Orig)
|
||||
face2Size = len(face2Orig)
|
||||
for i in xrange(0, face1Size, 2): # noqa: F821
|
||||
o1x = face1Orig[i]
|
||||
o1y = face1Orig[i+1]
|
||||
v1x = face1Vec[i]
|
||||
v1y = face1Vec[i+1]
|
||||
n1x = v1y
|
||||
n1y = -v1x
|
||||
for j in xrange(0, face2Size, 2): # noqa: F821
|
||||
# Given ray1(O1, V1) and ray2(O2, V2)
|
||||
# Normal of ray1 is (V1.y, V1.x)
|
||||
o2x = face2Orig[j]
|
||||
o2y = face2Orig[j+1]
|
||||
v2x = face2Vec[j]
|
||||
v2y = face2Vec[j+1]
|
||||
n2x = v2y
|
||||
n2y = -v2x
|
||||
|
||||
# Find t for ray2
|
||||
# t = [(o1x-o2x)n1x + (o1y-o2y)n1y] /
|
||||
# (v2x * n1x + v2y * n1y)
|
||||
denum = v2x * n1x + v2y * n1y
|
||||
# Edges are parallel if denum is close to 0.
|
||||
if math.fabs(denum) < 0.000001:
|
||||
continue
|
||||
t2 = ((o1x-o2x) * n1x + (o1y-o2y) * n1y) / denum
|
||||
if (t2 < 0.00001 or t2 > 0.99999):
|
||||
continue
|
||||
cui = center[2*i]
|
||||
cvi = center[2*i+1]
|
||||
ri = radius[i]
|
||||
# Exclude the degenerate face
|
||||
# if(area(face1Orig) < 0.000001) continue;
|
||||
# Loop through face j where j != i
|
||||
for j in range(i+1, meshfn.numPolygons):
|
||||
cuj = center[2*j]
|
||||
cvj = center[2*j+1]
|
||||
rj = radius[j]
|
||||
du = cuj - cui
|
||||
dv = cvj - cvi
|
||||
dsqr = du * du + dv * dv
|
||||
# Quick rejection if bounding circles don't overlap
|
||||
if (dsqr >= (ri + rj) * (ri + rj)):
|
||||
continue
|
||||
|
||||
rayb2, face2Orig, face2Vec = self._createRayGivenFace(
|
||||
meshfn, j)
|
||||
if not rayb2:
|
||||
continue
|
||||
# Exclude the degenerate face
|
||||
# if(area(face2Orig) < 0.000001): continue;
|
||||
if self._checkCrossingEdges(face1Orig,
|
||||
face1Vec,
|
||||
face2Orig,
|
||||
face2Vec):
|
||||
face1 = '%s.f[%d]' % (meshfn.name(), i)
|
||||
face2 = '%s.f[%d]' % (meshfn.name(), j)
|
||||
if face1 not in faces:
|
||||
faces.append(face1)
|
||||
if face2 not in faces:
|
||||
faces.append(face2)
|
||||
return faces
|
||||
# Find t for ray1
|
||||
# t = [(o2x-o1x)n2x
|
||||
# + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y)
|
||||
denum = v1x * n2x + v1y * n2y
|
||||
# Edges are parallel if denum is close to 0.
|
||||
if math.fabs(denum) < 0.000001:
|
||||
continue
|
||||
t1 = ((o2x-o1x) * n2x + (o2y-o1y) * n2y) / denum
|
||||
|
||||
# Edges intersect
|
||||
if (t1 > 0.00001 and t1 < 0.99999):
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def _getOverlapUVFaces(self, meshName):
|
||||
""" Return overlapping faces
|
||||
|
||||
:param meshName: name of mesh
|
||||
:type meshName: str
|
||||
:returns: list of overlapping faces
|
||||
:rtype: list
|
||||
"""
|
||||
faces = []
|
||||
# find polygon mesh node
|
||||
selList = om.MSelectionList()
|
||||
selList.add(meshName)
|
||||
mesh = selList.getDependNode(0)
|
||||
if mesh.apiType() == om.MFn.kTransform:
|
||||
dagPath = selList.getDagPath(0)
|
||||
dagFn = om.MFnDagNode(dagPath)
|
||||
child = dagFn.child(0)
|
||||
if child.apiType() != om.MFn.kMesh:
|
||||
raise Exception("Can't find polygon mesh")
|
||||
mesh = child
|
||||
meshfn = om.MFnMesh(mesh)
|
||||
|
||||
center, radius = self._createBoundingCircle(meshfn)
|
||||
for i in xrange(meshfn.numPolygons): # noqa: F821
|
||||
rayb1, face1Orig, face1Vec = self._createRayGivenFace(
|
||||
meshfn, i)
|
||||
if not rayb1:
|
||||
continue
|
||||
cui = center[2*i]
|
||||
cvi = center[2*i+1]
|
||||
ri = radius[i]
|
||||
# Exclude the degenerate face
|
||||
# if(area(face1Orig) < 0.000001) continue;
|
||||
# Loop through face j where j != i
|
||||
for j in range(i+1, meshfn.numPolygons):
|
||||
cuj = center[2*j]
|
||||
cvj = center[2*j+1]
|
||||
rj = radius[j]
|
||||
du = cuj - cui
|
||||
dv = cvj - cvi
|
||||
dsqr = du * du + dv * dv
|
||||
# Quick rejection if bounding circles don't overlap
|
||||
if (dsqr >= (ri + rj) * (ri + rj)):
|
||||
continue
|
||||
|
||||
rayb2, face2Orig, face2Vec = self._createRayGivenFace(
|
||||
meshfn, j)
|
||||
if not rayb2:
|
||||
continue
|
||||
# Exclude the degenerate face
|
||||
# if(area(face2Orig) < 0.000001): continue;
|
||||
if self._checkCrossingEdges(face1Orig,
|
||||
face1Vec,
|
||||
face2Orig,
|
||||
face2Vec):
|
||||
face1 = '%s.f[%d]' % (meshfn.name(), i)
|
||||
face2 = '%s.f[%d]' % (meshfn.name(), j)
|
||||
if face1 not in faces:
|
||||
faces.append(face1)
|
||||
if face2 not in faces:
|
||||
faces.append(face2)
|
||||
return faces
|
||||
|
||||
|
||||
class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
|
||||
|
|
@ -241,7 +239,7 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
|
|||
optional = True
|
||||
|
||||
@classmethod
|
||||
def _has_overlapping_uvs(cls, node):
|
||||
def _get_overlapping_uvs(cls, node):
|
||||
""" Check if mesh has overlapping UVs.
|
||||
|
||||
:param node: node to check
|
||||
|
|
@ -251,27 +249,32 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
ovl = GetOverlappingUVs()
|
||||
|
||||
for i, uv in enumerate(polyUVSet(node, q=1, auv=1)):
|
||||
polyUVSet(node, cuv=1, uvSet=uv)
|
||||
of = ovl._getOverlapUVFaces(str(node))
|
||||
if of != []:
|
||||
return True
|
||||
return False
|
||||
overlapping_faces = []
|
||||
for i, uv in enumerate(pm.polyUVSet(node, q=1, auv=1)):
|
||||
pm.polyUVSet(node, cuv=1, uvSet=uv)
|
||||
overlapping_faces.extend(ovl._getOverlapUVFaces(str(node)))
|
||||
|
||||
return overlapping_faces
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
def get_invalid(cls, instance, compute=False):
|
||||
invalid = []
|
||||
|
||||
for node in cmds.ls(instance, type='mesh'):
|
||||
if cls._has_overlapping_uvs(node):
|
||||
invalid.append(node)
|
||||
if compute:
|
||||
instance.data["overlapping_faces"] = []
|
||||
for node in pm.ls(instance, type="mesh"):
|
||||
faces = cls._get_overlapping_uvs(node)
|
||||
invalid.extend(faces)
|
||||
# Store values for later.
|
||||
instance.data["overlapping_faces"].extend(faces)
|
||||
else:
|
||||
invalid.extend(instance.data["overlapping_faces"])
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
invalid = self.get_invalid(instance, compute=True)
|
||||
if invalid:
|
||||
raise RuntimeError("Meshes found with overlapping "
|
||||
"UVs: {0}".format(invalid))
|
||||
pass
|
||||
raise RuntimeError(
|
||||
"Meshes found with overlapping UVs: {0}".format(invalid)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -152,7 +152,8 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
import maya.cmds as mc
|
||||
|
||||
attributes = mc.listAttr(control, keyable=True, scalar=True)
|
||||
# Support controls without any attributes returning None
|
||||
attributes = mc.listAttr(control, keyable=True, scalar=True) or []
|
||||
invalid = []
|
||||
for attr in attributes:
|
||||
plug = "{}.{}".format(control, attr)
|
||||
|
|
@ -162,6 +163,10 @@ class ValidateRigControllers(pyblish.api.InstancePlugin):
|
|||
if locked:
|
||||
continue
|
||||
|
||||
# Ignore proxy connections.
|
||||
if cmds.addAttr(plug, query=True, usedAsProxy=True):
|
||||
continue
|
||||
|
||||
# Check for incoming connections
|
||||
if cmds.listConnections(plug, source=True, destination=False):
|
||||
invalid.append(plug)
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ def loader_shift(node, frame, relative=True):
|
|||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["write", "source"]
|
||||
families = ["write", "source", "plate"]
|
||||
representations = ["exr", "dpx"]
|
||||
|
||||
label = "Load sequence"
|
||||
|
|
@ -94,6 +94,18 @@ class LoadSequence(api.Loader):
|
|||
|
||||
first = version_data.get("startFrame", None)
|
||||
last = version_data.get("endFrame", None)
|
||||
handles = version_data.get("handles", None)
|
||||
handle_start = version_data.get("handleStart", None)
|
||||
handle_end = version_data.get("handleEnd", None)
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
handle_end = handles
|
||||
|
||||
# create handles offset
|
||||
first -= handle_start
|
||||
last += handle_end
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
|
|
@ -117,25 +129,23 @@ class LoadSequence(api.Loader):
|
|||
if colorspace is not None:
|
||||
r["colorspace"].setValue(str(colorspace))
|
||||
|
||||
# Set global in point to start frame (if in version.data)
|
||||
start = context["version"]["data"].get("startFrame", None)
|
||||
if start is not None:
|
||||
loader_shift(r, start, relative=True)
|
||||
r["origfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["origlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
loader_shift(r, first, relative=True)
|
||||
r["origfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["origlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["startFrame", "endFrame", "handles",
|
||||
"source", "colorspace", "author", "fps", "version"]
|
||||
"source", "colorspace", "author", "fps", "version",
|
||||
"handleStart", "handleEnd"]
|
||||
|
||||
data_imprint = {}
|
||||
for k in add_keys:
|
||||
if k is 'version':
|
||||
data_imprint.update({k: context["version"]['name']})
|
||||
else:
|
||||
data_imprint.update({k: context["version"]['data'][k]})
|
||||
data_imprint.update({k: context["version"]['data'].get(k, str(None))})
|
||||
|
||||
data_imprint.update({"objectName": read_name})
|
||||
|
||||
|
|
@ -186,12 +196,28 @@ class LoadSequence(api.Loader):
|
|||
|
||||
max_version = max(versions)
|
||||
|
||||
start = version["data"].get("startFrame")
|
||||
if start is None:
|
||||
version_data = version.get("data", {})
|
||||
|
||||
first = version_data.get("startFrame", None)
|
||||
last = version_data.get("endFrame", None)
|
||||
handles = version_data.get("handles", 0)
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
if first is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
start = 0
|
||||
first = 0
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
handle_end = handles
|
||||
|
||||
# create handles offset
|
||||
first -= handle_start
|
||||
last += handle_end
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
|
|
@ -199,24 +225,26 @@ class LoadSequence(api.Loader):
|
|||
log.info("__ node['file']: {}".format(node["file"]))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
global_in_changed = loader_shift(node, start, relative=False)
|
||||
if global_in_changed:
|
||||
# Log this change to the user
|
||||
log.debug("Changed '{}' global in:"
|
||||
" {:d}".format(node['name'].value(), start))
|
||||
loader_shift(node, first, relative=True)
|
||||
node["origfirst"].setValue(first)
|
||||
node["first"].setValue(first)
|
||||
node["origlast"].setValue(last)
|
||||
node["last"].setValue(last)
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"startFrame": start,
|
||||
"endFrame": version["data"].get("endFrame"),
|
||||
"startFrame": version_data.get("startFrame"),
|
||||
"endFrame": version_data.get("endFrame"),
|
||||
"version": version.get("name"),
|
||||
"colorspace": version["data"].get("colorspace"),
|
||||
"source": version["data"].get("source"),
|
||||
"handles": version["data"].get("handles"),
|
||||
"fps": version["data"].get("fps"),
|
||||
"author": version["data"].get("author"),
|
||||
"outputDir": version["data"].get("outputDir"),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handles": version_data.get("handles"),
|
||||
"handleStart": version_data.get("handleStart"),
|
||||
"handleEnd": version_data.get("handleEnd"),
|
||||
"fps": version_data.get("fps"),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
})
|
||||
|
||||
# change color of node
|
||||
|
|
|
|||
|
|
@ -1,45 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipSubsets(api.InstancePlugin):
|
||||
"""Collect Subsets from selected Clips, Tags, Preset."""
|
||||
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Subsets"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
tags = instance.data.get('tags', None)
|
||||
presets = instance.context.data['presets'][
|
||||
instance.context.data['host']]
|
||||
if tags:
|
||||
self.log.info(tags)
|
||||
|
||||
if presets:
|
||||
self.log.info(presets)
|
||||
|
||||
# get presets and tags
|
||||
# iterate tags and get task family
|
||||
# iterate tags and get host family
|
||||
# iterate tags and get handles family
|
||||
|
||||
instance = instance.context.create_instance(instance_name)
|
||||
|
||||
instance.data.update({
|
||||
"subset": subset_name,
|
||||
"stagingDir": staging_dir,
|
||||
"task": task,
|
||||
"representation": ext[1:],
|
||||
"host": host,
|
||||
"asset": asset_name,
|
||||
"label": label,
|
||||
"name": name,
|
||||
# "hierarchy": hierarchy,
|
||||
# "parents": parents,
|
||||
"family": family,
|
||||
"families": [families, 'ftrack'],
|
||||
"publish": True,
|
||||
# "files": files_list
|
||||
})
|
||||
instances.append(instance)
|
||||
238
pype/plugins/nukestudio/_unused/extract_plates.py
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
from pyblish import api
|
||||
import pype
|
||||
|
||||
|
||||
class ExtractPlates(pype.api.Extractor):
|
||||
"""Extracts plates"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Plates"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["encode"]
|
||||
|
||||
def process(self, instance):
|
||||
import os
|
||||
import hiero.core
|
||||
# from hiero.ui.nuke_bridge import FnNsFrameServer
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
version_data = dict()
|
||||
context = instance.context
|
||||
anatomy = context.data.get("anatomy", None)
|
||||
padding = int(anatomy.templates['render']['padding'])
|
||||
|
||||
name = instance.data["subset"]
|
||||
asset = instance.data["asset"]
|
||||
track = instance.data["track"]
|
||||
family = instance.data["family"]
|
||||
families = instance.data["families"]
|
||||
attrs = instance.data["attributes"]
|
||||
version = instance.data["version"]
|
||||
|
||||
# staging dir creation
|
||||
self.log.debug("creating staging dir")
|
||||
self.staging_dir(instance)
|
||||
|
||||
staging_dir = instance.data['stagingDir']
|
||||
|
||||
Nuke_writer = hiero.core.nuke.ScriptWriter()
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
# get timeline frames
|
||||
timeline_in = int(instance.data["timelineIn"])
|
||||
timeline_out = int(instance.data["timelineOut"])
|
||||
|
||||
# frame-ranges with handles
|
||||
timeline_frame_start = int(instance.data["timelineInHandles"])
|
||||
timeline_frame_end = int(instance.data["timelineOutHandles"])
|
||||
|
||||
# creating comp frame range
|
||||
frame_start = int(instance.data["startFrame"])
|
||||
frame_end = int(instance.data["endFrame"])
|
||||
|
||||
# get colorspace
|
||||
colorspace = instance.context.data["colorspace"]
|
||||
|
||||
# get sequence from context, and fps
|
||||
fps = int(instance.data["fps"])
|
||||
|
||||
# test output
|
||||
self.log.debug("__ handles: {}".format(handles))
|
||||
self.log.debug("__ handle_start: {}".format(handle_start))
|
||||
self.log.debug("__ handle_end: {}".format(handle_end))
|
||||
self.log.debug("__ timeline_in: {}".format(timeline_in))
|
||||
self.log.debug("__ timeline_out: {}".format(timeline_out))
|
||||
self.log.debug("__ timeline_frame_start: {}".format(
|
||||
timeline_frame_start))
|
||||
self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
|
||||
self.log.debug("__ frame_start: {}".format(frame_start))
|
||||
self.log.debug("__ frame_end: {}".format(frame_end))
|
||||
self.log.debug("__ colorspace: {}".format(colorspace))
|
||||
self.log.debug("__ track: {}".format(track))
|
||||
self.log.debug("__ fps: {}".format(fps))
|
||||
|
||||
# Generate Nuke script
|
||||
write_name = "Write_out"
|
||||
|
||||
# root node
|
||||
root_node = hiero.core.nuke.RootNode(
|
||||
frame_start,
|
||||
frame_end,
|
||||
fps=fps
|
||||
)
|
||||
|
||||
root_node.addProjectSettings(colorspace)
|
||||
|
||||
# create write node and link it to root node
|
||||
Nuke_writer.addNode(root_node)
|
||||
'''TrackItem.addToNukeScript(script=, firstFrame=None, additionalNodes=[], additionalNodesCallback=None, includeRetimes=False, retimeMethod=None, startHandle=None, endHandle=None, colourTransform=None, offset=0, nodeLabel=None, includeAnnotations=False, includeEffects=True, outputToSequenceFormat=False)'''
|
||||
item.addToNukeScript(
|
||||
script=Nuke_writer,
|
||||
firstFrame=frame_start,
|
||||
includeRetimes=attrs["includeRetimes"],
|
||||
retimeMethod=attrs["retimeMethod"],
|
||||
startHandle=handle_start,
|
||||
endHandle=handle_end,
|
||||
includeEffects=attrs["includeEffects"],
|
||||
includeAnnotations=attrs["includeAnnotations"]
|
||||
)
|
||||
|
||||
write_knobs = attrs["nodes"]["write"]["attributes"]
|
||||
|
||||
# TODO: take template from anatomy
|
||||
nukescript_file = "{asset}_{name}_v{version}.{ext}".format(
|
||||
asset=asset,
|
||||
name=name,
|
||||
version=version,
|
||||
ext="nk"
|
||||
)
|
||||
nukescript_path = os.path.join(
|
||||
staging_dir, nukescript_file
|
||||
)
|
||||
|
||||
# TODO: take template from anatomy
|
||||
output_file = "{asset}_{name}_v{version}.%0{padding}d.{ext}".format(
|
||||
asset=asset,
|
||||
name=name,
|
||||
version=version,
|
||||
padding=padding,
|
||||
ext=write_knobs["file_type"]
|
||||
)
|
||||
output_path = os.path.join(
|
||||
staging_dir, output_file
|
||||
)
|
||||
|
||||
write_node = hiero.core.nuke.WriteNode(output_path.replace("\\", "/"))
|
||||
write_node.setKnob("name", write_name)
|
||||
write_node.setKnob("file_type", write_knobs["file_type"])
|
||||
for knob, value in write_knobs.items():
|
||||
write_node.setKnob(knob, value)
|
||||
|
||||
Nuke_writer.addNode(write_node)
|
||||
|
||||
Nuke_writer.writeToDisk(nukescript_path)
|
||||
|
||||
# test prints
|
||||
self.log.debug("__ output_file: {}".format(output_file))
|
||||
self.log.debug("__ output_path: {}".format(output_path))
|
||||
self.log.debug("__ nukescript_file: {}".format(nukescript_file))
|
||||
self.log.debug("__ nukescript_path: {}".format(nukescript_path))
|
||||
self.log.debug("__ write_knobs: {}".format(write_knobs))
|
||||
self.log.debug("__ write_name: {}".format(write_name))
|
||||
self.log.debug("__ Nuke_writer: {}".format(Nuke_writer))
|
||||
|
||||
# create rendering arguments for FnNsFrameServer
|
||||
_args = [
|
||||
nukescript_path,
|
||||
"{}-{}".format(frame_start, frame_end),
|
||||
write_name,
|
||||
["main"]
|
||||
]
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"timelineIn": timeline_in,
|
||||
"timelineOut": timeline_out,
|
||||
"timelineInHandles": timeline_frame_start,
|
||||
"timelineOutHandles": timeline_frame_end,
|
||||
"compFrameIn": frame_start,
|
||||
"compFrameOut": frame_end,
|
||||
"fps": fps,
|
||||
"colorspace": write_knobs["colorspace"],
|
||||
"nukeScriptFileName": nukescript_file,
|
||||
"nukeWriteFileName": output_file,
|
||||
"nukeWriteName": write_name,
|
||||
"FnNsFrameServer_renderFrames_args": str(_args),
|
||||
"family": family,
|
||||
"families": families,
|
||||
"asset": asset,
|
||||
"subset": name,
|
||||
"track": track,
|
||||
"version": int(version)
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
# adding representation for nukescript
|
||||
nk_representation = {
|
||||
'files': nukescript_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "nk",
|
||||
'ext': "nk",
|
||||
}
|
||||
instance.data["representations"].append(nk_representation)
|
||||
|
||||
# adding representation for plates
|
||||
plates_representation = {
|
||||
'files': [output_file % i for i in range(
|
||||
frame_start, (frame_end + 1), 1)],
|
||||
'stagingDir': staging_dir,
|
||||
'name': write_knobs["file_type"],
|
||||
'ext': write_knobs["file_type"],
|
||||
}
|
||||
instance.data["representations"].append(plates_representation)
|
||||
|
||||
# adding checking file to context for ExtractPlateCheck(context) plugin
|
||||
context.data["platesCheck"] = os.path.join(
|
||||
staging_dir, output_file % frame_end
|
||||
)
|
||||
|
||||
if not context.data.get("frameServerRenderQueue"):
|
||||
context.data["frameServerRenderQueue"] = list()
|
||||
|
||||
# add to render queue list
|
||||
context.data["frameServerRenderQueue"].append(_args)
|
||||
|
||||
# test prints
|
||||
self.log.debug("__ before family: {}".format(family))
|
||||
self.log.debug("__ before families: {}".format(families))
|
||||
|
||||
# this is just workaround because 'clip' family is filtered
|
||||
instance.data["family"] = families[-1]
|
||||
instance.data["families"].append(family)
|
||||
|
||||
# testing families
|
||||
family = instance.data["family"]
|
||||
families = instance.data["families"]
|
||||
|
||||
# test prints version_data
|
||||
self.log.debug("__ version_data: {}".format(version_data))
|
||||
self.log.debug("__ nk_representation: {}".format(nk_representation))
|
||||
self.log.debug("__ plates_representation: {}".format(
|
||||
plates_representation))
|
||||
self.log.debug("__ after family: {}".format(family))
|
||||
self.log.debug("__ after families: {}".format(families))
|
||||
|
||||
# # this will do FnNsFrameServer
|
||||
# FnNsFrameServer.renderFrames(*_args)
|
||||
30
pype/plugins/nukestudio/_unused/extract_plates_waiting.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
from pyblish import api
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
class ExtractPlateCheck(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.ExtractorOrder + 0.01
|
||||
label = "Plates Export Waiting"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["encode"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
plate_path = context.data.get("platesCheck", None)
|
||||
|
||||
self.log.info("Chacking plate: `{}`".format(plate_path))
|
||||
|
||||
if not plate_path:
|
||||
return
|
||||
|
||||
while not os.path.exists(plate_path):
|
||||
self.log.info("Waiting for plates to be rendered")
|
||||
time.sleep(5)
|
||||
|
||||
if os.path.isfile(plate_path):
|
||||
self.log.info("Plates were rendered: `{}`".format(plate_path))
|
||||
else:
|
||||
raise ValueError("%s isn't a file!" % plate_path)
|
||||
|
|
@ -7,7 +7,7 @@ class ExtractTasks(api.InstancePlugin):
|
|||
order = api.ExtractorOrder
|
||||
label = "Tasks"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["trackItem.task"]
|
||||
families = ["clip"]
|
||||
optional = True
|
||||
|
||||
def filelink(self, src, dst):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
- tags get tasks
|
||||
|
||||
- collect_subset(instance):
|
||||
- gets presets for subset by tasks
|
||||
- creates instances for comp .nk, plates (main instance converted to plates)
|
||||
- add families:
|
||||
- .nk compositing script [workfile, ftrack]
|
||||
- plates [plates]
|
||||
- audio [audio]
|
||||
|
||||
- extract_submit_frameserver(instance)
|
||||
- families [plates]
|
||||
- adds .nk script created only for encoding (plates write) no color correction
|
||||
- adds .nk script created only for encoding (mov write)
|
||||
- add .nk script created only for encoding (jpg, thumbnail)
|
||||
- _______
|
||||
- from hiero.ui.nuke_bridge import FnNsFrameServer
|
||||
- FnNsFrameServer.renderFrames(nks, "1-10", "Write_exr", ["main"])
|
||||
- dict(script(str), framerange(str), writeNode(str), views(list))
|
||||
|
||||
# next step ########################################################
|
||||
- submit_exporting_task(instance)
|
||||
- families [workfile]
|
||||
- create compositing scripts
|
||||
- create inventory containers for Reads
|
||||
- create publishable write nodes
|
||||
|
|
@ -1,17 +1,19 @@
|
|||
import os
|
||||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClips(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Clips"
|
||||
hosts = ["nukestudio"]
|
||||
|
||||
def process(self, context):
|
||||
projectdata = context.data["projectData"]
|
||||
version = context.data.get("version", "001")
|
||||
data = {}
|
||||
for item in context.data.get("selection", []):
|
||||
self.log.info("__ item: {}".format(item))
|
||||
# Skip audio track items
|
||||
# Try/Except is to handle items types, like EffectTrackItem
|
||||
try:
|
||||
|
|
@ -21,8 +23,25 @@ class CollectClips(api.ContextPlugin):
|
|||
except:
|
||||
continue
|
||||
|
||||
data[item.name()] = {
|
||||
track = item.parent()
|
||||
source = item.source().mediaSource()
|
||||
source_path = source.firstpath()
|
||||
instance_name = "{0}_{1}".format(track.name(), item.name())
|
||||
|
||||
try:
|
||||
head, padding, ext = os.path.basename(source_path).split('.')
|
||||
source_first_frame = int(padding)
|
||||
except:
|
||||
source_first_frame = 0
|
||||
|
||||
data[instance_name] = {
|
||||
"item": item,
|
||||
"source": source,
|
||||
"sourcePath": source_path,
|
||||
"track": track.name(),
|
||||
"sourceFirst": source_first_frame,
|
||||
"sourceIn": int(item.sourceIn()),
|
||||
"sourceOut": int(item.sourceOut()),
|
||||
"startFrame": int(item.timelineIn()),
|
||||
"endFrame": int(item.timelineOut())
|
||||
}
|
||||
|
|
@ -31,11 +50,20 @@ class CollectClips(api.ContextPlugin):
|
|||
family = "clip"
|
||||
context.create_instance(
|
||||
name=key,
|
||||
subset="{0}{1}".format(family, 'Default'),
|
||||
asset=value["item"].name(),
|
||||
item=value["item"],
|
||||
source=value["source"],
|
||||
sourcePath=value["sourcePath"],
|
||||
family=family,
|
||||
families=[],
|
||||
sourceFirst=value["sourceFirst"],
|
||||
sourceIn=value["sourceIn"],
|
||||
sourceOut=value["sourceOut"],
|
||||
startFrame=value["startFrame"],
|
||||
endFrame=value["endFrame"],
|
||||
handles=0
|
||||
handles=projectdata['handles'],
|
||||
handleStart=0,
|
||||
handleEnd=0,
|
||||
version=version,
|
||||
track=value["track"]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,14 +1,17 @@
|
|||
import pyblish.api
|
||||
|
||||
import pype.api as pype
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
|
||||
|
||||
def process(self, context):
|
||||
"""Todo, inject the current working file"""
|
||||
|
||||
project = context.data('activeProject')
|
||||
context.set_data('currentFile', value=project.path())
|
||||
context.data["currentFile"] = path = project.path()
|
||||
context.data["version"] = pype.get_version_from_path(path)
|
||||
self.log.info("currentFile: {}".format(context.data["currentFile"]))
|
||||
self.log.info("version: {}".format(context.data["version"]))
|
||||
|
|
|
|||
44
pype/plugins/nukestudio/publish/collect_handles.py
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
import json
|
||||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipHandles(api.ContextPlugin):
|
||||
"""Collect Handles from all instanes and add to assetShared."""
|
||||
|
||||
order = api.CollectorOrder + 0.1025
|
||||
label = "Collect Handles"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, context):
|
||||
assets_shared = context.data.get("assetsShared")
|
||||
assert assets_shared, "Context data missing `assetsShared` key"
|
||||
|
||||
# find all main types instances and add its handles to asset shared
|
||||
instances = context[:]
|
||||
for instance in instances:
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
if instance.data.get("main"):
|
||||
name = instance.data["asset"]
|
||||
if assets_shared.get(name):
|
||||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
assets_shared[name].update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
|
||||
for instance in instances:
|
||||
if not instance.data.get("main"):
|
||||
self.log.debug("Synchronize handles on: `{}`".format(
|
||||
instance.data["name"]))
|
||||
name = instance.data["asset"]
|
||||
s_asset_data = assets_shared.get(name)
|
||||
instance.data["handles"] = s_asset_data["handles"]
|
||||
instance.data["handleStart"] = s_asset_data["handleStart"]
|
||||
instance.data["handleEnd"] = s_asset_data["handleEnd"]
|
||||
|
|
@ -13,7 +13,7 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Collect Hierarchy Clip"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
families = ["clip"]
|
||||
|
||||
def convert_to_entity(self, key, value):
|
||||
|
|
@ -37,11 +37,15 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
|
|||
clip = instance.data["item"]
|
||||
asset = instance.data.get("asset")
|
||||
|
||||
# create asset_names conversion table
|
||||
if not context.data.get("assetsShared"):
|
||||
context.data["assetsShared"] = dict()
|
||||
|
||||
# build data for inner nukestudio project property
|
||||
data = {
|
||||
"sequence": context.data['activeSequence'].name().replace(' ', '_'),
|
||||
"track": clip.parent().name().replace(' ', '_'),
|
||||
"shot": asset
|
||||
"clip": asset
|
||||
}
|
||||
self.log.debug("__ data: {}".format(data))
|
||||
|
||||
|
|
@ -65,16 +69,24 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# if shot in template then remove it
|
||||
if "shot" in template.lower():
|
||||
instance.data["asset"] = [
|
||||
t for t in template.split('/')][-1]
|
||||
template = "/".join([t for t in template.split('/')][0:-1])
|
||||
|
||||
# take template from Tag.note and break it into parts
|
||||
patern = re.compile(r"^\{([a-z]*?)\}")
|
||||
par_split = [patern.findall(t)[0]
|
||||
template_split = template.split("/")
|
||||
patern = re.compile(r"\{([a-z]*?)\}")
|
||||
par_split = [patern.findall(t)
|
||||
for t in template.split("/")]
|
||||
|
||||
# format all {} in two layers
|
||||
for k, v in t_metadata.items():
|
||||
new_k = k.split(".")[1]
|
||||
|
||||
# ignore all help strings
|
||||
if 'help' in k:
|
||||
continue
|
||||
# self.log.info("__ new_k: `{}`".format(new_k))
|
||||
try:
|
||||
# first try all data and context data to
|
||||
# add to individual properties
|
||||
|
|
@ -82,9 +94,6 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
|
|||
**dict(context.data, **data))
|
||||
d_metadata[new_k] = new_v
|
||||
|
||||
if 'shot' in new_k:
|
||||
instance.data["asset"] = new_v
|
||||
|
||||
# create parents
|
||||
# find matching index of order
|
||||
p_match_i = [i for i, p in enumerate(par_split)
|
||||
|
|
@ -92,29 +101,46 @@ class CollectHierarchyInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# if any is matching then convert to entity_types
|
||||
if p_match_i:
|
||||
parent = self.convert_to_entity(new_k, new_v)
|
||||
parent = self.convert_to_entity(
|
||||
new_k, template_split[p_match_i[0]])
|
||||
parents.insert(p_match_i[0], parent)
|
||||
except Exception:
|
||||
d_metadata[new_k] = v
|
||||
|
||||
# create new shot asset name
|
||||
instance.data["asset"] = instance.data["asset"].format(
|
||||
**d_metadata)
|
||||
self.log.debug("__ instance.data[asset]: {}".format(instance.data["asset"]))
|
||||
|
||||
# lastly fill those individual properties itno
|
||||
# format the string with collected data
|
||||
parents = [{"entityName": p["entityName"].format(
|
||||
**d_metadata), "entityType": p["entityType"]}
|
||||
for p in parents]
|
||||
self.log.debug("__ parents: {}".format(parents))
|
||||
|
||||
hierarchy = template.format(
|
||||
**d_metadata)
|
||||
self.log.debug("__ hierarchy: {}".format(hierarchy))
|
||||
|
||||
# check if hierarchy attribute is already created
|
||||
# it should not be so return warning if it is
|
||||
hd = instance.data.get("hierarchy")
|
||||
self.log.info("__ hd: {}".format(hd))
|
||||
assert not hd, "Only one Hierarchy Tag is \
|
||||
allowed. Clip: `{}`".format(asset)
|
||||
|
||||
assetsShared = {
|
||||
asset: {
|
||||
"asset": instance.data["asset"],
|
||||
"hierarchy": hierarchy,
|
||||
"parents": parents
|
||||
}}
|
||||
self.log.debug("__ assetsShared: {}".format(assetsShared))
|
||||
# add formated hierarchy path into instance data
|
||||
instance.data["hierarchy"] = hierarchy
|
||||
instance.data["parents"] = parents
|
||||
self.log.debug("__ hierarchy.format: {}".format(hierarchy))
|
||||
self.log.debug("__ parents: {}".format(parents))
|
||||
self.log.debug("__ d_metadata: {}".format(d_metadata))
|
||||
context.data["assetsShared"].update(
|
||||
assetsShared)
|
||||
|
||||
|
||||
class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
||||
|
|
@ -123,42 +149,92 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
'''
|
||||
|
||||
label = "Collect Hierarchy Context"
|
||||
order = pyblish.api.CollectorOrder + 0.101
|
||||
order = pyblish.api.CollectorOrder + 0.102
|
||||
|
||||
def update_dict(self, ex_dict, new_dict):
|
||||
for key in ex_dict:
|
||||
if key in new_dict and isinstance(ex_dict[key], dict):
|
||||
new_dict[key] = self.update_dict(ex_dict[key], new_dict[key])
|
||||
else:
|
||||
new_dict[key] = ex_dict[key]
|
||||
if ex_dict.get(key) and new_dict.get(key):
|
||||
continue
|
||||
else:
|
||||
new_dict[key] = ex_dict[key]
|
||||
|
||||
return new_dict
|
||||
|
||||
def process(self, context):
|
||||
instances = context[:]
|
||||
# create hierarchyContext attr if context has none
|
||||
self.log.debug("__ instances: {}".format(context[:]))
|
||||
|
||||
temp_context = {}
|
||||
for instance in instances:
|
||||
if 'projectfile' in instance.data.get('family', ''):
|
||||
continue
|
||||
|
||||
name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"] + handles)
|
||||
handle_end = int(instance.data["handleEnd"] + handles)
|
||||
|
||||
# get source frames
|
||||
source_first = int(instance.data["sourceFirst"])
|
||||
source_in = int(instance.data["sourceIn"])
|
||||
source_out = int(instance.data["sourceOut"])
|
||||
|
||||
instance.data['startFrame'] = int(
|
||||
source_first + source_in - handle_start)
|
||||
instance.data['endFrame'] = int(
|
||||
(source_first + source_out + handle_end))
|
||||
|
||||
# inject assetsShared to other plates types
|
||||
assets_shared = context.data.get("assetsShared")
|
||||
|
||||
if assets_shared:
|
||||
s_asset_data = assets_shared.get(name)
|
||||
if s_asset_data:
|
||||
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
|
||||
name = instance.data["asset"] = s_asset_data["asset"]
|
||||
instance.data["parents"] = s_asset_data["parents"]
|
||||
instance.data["hierarchy"] = s_asset_data["hierarchy"]
|
||||
|
||||
self.log.debug("__ instance.data[parents]: {}".format(instance.data["parents"]))
|
||||
self.log.debug("__ instance.data[hierarchy]: {}".format(instance.data["hierarchy"]))
|
||||
self.log.debug("__ instance.data[name]: {}".format(instance.data["name"]))
|
||||
if "main" not in instance.data["name"].lower():
|
||||
continue
|
||||
|
||||
in_info = {}
|
||||
name = instance.data["asset"]
|
||||
# suppose that all instances are Shots
|
||||
in_info['entity_type'] = 'Shot'
|
||||
|
||||
# get custom attributes of the shot
|
||||
in_info['custom_attributes'] = {
|
||||
'fend': instance.data['endFrame'],
|
||||
'fstart': instance.data['startFrame'],
|
||||
'handles': int(instance.data.get('handles')),
|
||||
'fend': int(
|
||||
(source_first + source_out)),
|
||||
'fstart': int(
|
||||
source_first + source_in),
|
||||
'fps': context.data["framerate"]
|
||||
}
|
||||
|
||||
handle_start = instance.data.get('handleStart')
|
||||
handle_end = instance.data.get('handleEnd')
|
||||
self.log.debug("__ handle_start: {}".format(handle_start))
|
||||
self.log.debug("__ handle_end: {}".format(handle_end))
|
||||
|
||||
if handle_start and handle_end:
|
||||
in_info['custom_attributes'].update({
|
||||
"handle_start": handle_start,
|
||||
"handle_end": handle_end
|
||||
})
|
||||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
|
||||
parents = instance.data.get('parents', [])
|
||||
self.log.debug("__ in_info: {}".format(in_info))
|
||||
|
||||
actual = {name: in_info}
|
||||
|
||||
|
|
@ -171,7 +247,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
actual = next_dict
|
||||
|
||||
temp_context = self.update_dict(temp_context, actual)
|
||||
self.log.debug(temp_context)
|
||||
|
||||
# TODO: 100% sure way of get project! Will be Name or Code?
|
||||
project_name = avalon.Session["AVALON_PROJECT"]
|
||||
|
|
|
|||
210
pype/plugins/nukestudio/publish/collect_plates.py
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
from pyblish import api
|
||||
import pype
|
||||
|
||||
|
||||
class CollectPlates(api.InstancePlugin):
|
||||
"""Collect plates"""
|
||||
|
||||
order = api.CollectorOrder + 0.49
|
||||
label = "Collect Plates"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["plate"]
|
||||
|
||||
def process(self, instance):
|
||||
import os
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
version_data = dict()
|
||||
context = instance.context
|
||||
anatomy = context.data.get("anatomy", None)
|
||||
padding = int(anatomy.templates['render']['padding'])
|
||||
|
||||
name = instance.data["subset"]
|
||||
asset = instance.data["asset"]
|
||||
track = instance.data["track"]
|
||||
family = instance.data["family"]
|
||||
families = instance.data["families"]
|
||||
version = instance.data["version"]
|
||||
source_path = instance.data["sourcePath"]
|
||||
source_file = os.path.basename(source_path)
|
||||
|
||||
# staging dir creation
|
||||
staging_dir = os.path.dirname(
|
||||
source_path)
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
# get source frames
|
||||
source_in = int(instance.data["sourceIn"])
|
||||
source_out = int(instance.data["sourceOut"])
|
||||
|
||||
# get source frames
|
||||
frame_start = int(instance.data["startFrame"])
|
||||
frame_end = int(instance.data["endFrame"])
|
||||
|
||||
# get source frames
|
||||
source_in_h = int(instance.data["sourceInH"])
|
||||
source_out_h = int(instance.data["sourceOutH"])
|
||||
|
||||
# get timeline frames
|
||||
timeline_in = int(instance.data["timelineIn"])
|
||||
timeline_out = int(instance.data["timelineOut"])
|
||||
|
||||
# frame-ranges with handles
|
||||
timeline_frame_start = int(instance.data["timelineInHandles"])
|
||||
timeline_frame_end = int(instance.data["timelineOutHandles"])
|
||||
|
||||
# get colorspace
|
||||
colorspace = item.sourceMediaColourTransform()
|
||||
|
||||
# get sequence from context, and fps
|
||||
fps = float(str(instance.data["fps"]))
|
||||
|
||||
# test output
|
||||
self.log.debug("__ handles: {}".format(handles))
|
||||
self.log.debug("__ handle_start: {}".format(handle_start))
|
||||
self.log.debug("__ handle_end: {}".format(handle_end))
|
||||
self.log.debug("__ frame_start: {}".format(frame_start))
|
||||
self.log.debug("__ frame_end: {}".format(frame_end))
|
||||
self.log.debug("__ f duration: {}".format(frame_end - frame_start + 1))
|
||||
self.log.debug("__ source_in: {}".format(source_in))
|
||||
self.log.debug("__ source_out: {}".format(source_out))
|
||||
self.log.debug("__ s duration: {}".format(source_out - source_in + 1))
|
||||
self.log.debug("__ source_in_h: {}".format(source_in_h))
|
||||
self.log.debug("__ source_out_h: {}".format(source_out_h))
|
||||
self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1))
|
||||
self.log.debug("__ timeline_in: {}".format(timeline_in))
|
||||
self.log.debug("__ timeline_out: {}".format(timeline_out))
|
||||
self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1))
|
||||
self.log.debug("__ timeline_frame_start: {}".format(
|
||||
timeline_frame_start))
|
||||
self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
|
||||
self.log.debug("__ colorspace: {}".format(colorspace))
|
||||
self.log.debug("__ track: {}".format(track))
|
||||
self.log.debug("__ fps: {}".format(fps))
|
||||
self.log.debug("__ source_file: {}".format(source_file))
|
||||
self.log.debug("__ staging_dir: {}".format(staging_dir))
|
||||
|
||||
self.log.debug("__ before family: {}".format(family))
|
||||
self.log.debug("__ before families: {}".format(families))
|
||||
#
|
||||
# this is just workaround because 'clip' family is filtered
|
||||
instance.data["family"] = families[-1]
|
||||
instance.data["families"].append(family)
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"startFrame": frame_start,
|
||||
"endFrame": frame_end,
|
||||
"timelineIn": timeline_in,
|
||||
"timelineOut": timeline_out,
|
||||
"timelineInHandles": timeline_frame_start,
|
||||
"timelineOutHandles": timeline_frame_end,
|
||||
"fps": fps,
|
||||
"colorspace": colorspace,
|
||||
"families": [f for f in families if 'ftrack' not in f],
|
||||
"asset": asset,
|
||||
"subset": name,
|
||||
"track": track,
|
||||
"version": int(version)
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
try:
|
||||
head, padding, ext = source_file.split('.')
|
||||
source_first_frame = int(padding)
|
||||
padding = len(padding)
|
||||
file = "{head}.%0{padding}d.{ext}".format(
|
||||
head=head,
|
||||
padding=padding,
|
||||
ext=ext
|
||||
)
|
||||
start_frame = source_first_frame
|
||||
end_frame = source_first_frame + source_out
|
||||
files = [file % i for i in range(
|
||||
(source_first_frame + source_in_h),
|
||||
((source_first_frame + source_out_h) + 1), 1)]
|
||||
except Exception as e:
|
||||
self.log.debug("Exception in file: {}".format(e))
|
||||
head, ext = source_file.split('.')
|
||||
files = source_file
|
||||
start_frame = source_in_h
|
||||
end_frame = source_out_h
|
||||
|
||||
|
||||
mov_file = head + ".mov"
|
||||
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
|
||||
if os.path.exists(mov_path):
|
||||
# adding mov into the representations
|
||||
self.log.debug("__ mov_path: {}".format(mov_path))
|
||||
plates_mov_representation = {
|
||||
'files': mov_file,
|
||||
'stagingDir': staging_dir,
|
||||
'startFrame': 0,
|
||||
'endFrame': source_out - source_in + 1,
|
||||
'step': 1,
|
||||
'frameRate': fps,
|
||||
'preview': True,
|
||||
'thumbnail': False,
|
||||
'name': "preview",
|
||||
'ext': "mov",
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
plates_mov_representation)
|
||||
|
||||
thumb_file = head + ".png"
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
self.log.debug("__ thumb_path: {}".format(thumb_path))
|
||||
thumbnail = item.thumbnail(source_in).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug("__ thumbnail: {}".format(thumbnail))
|
||||
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
||||
# adding representation for plates
|
||||
plates_representation = {
|
||||
'files': files,
|
||||
'stagingDir': staging_dir,
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'startFrame': start_frame,
|
||||
'endFrame': end_frame,
|
||||
}
|
||||
instance.data["representations"].append(plates_representation)
|
||||
|
||||
# testing families
|
||||
family = instance.data["family"]
|
||||
families = instance.data["families"]
|
||||
|
||||
# test prints version_data
|
||||
self.log.debug("__ version_data: {}".format(version_data))
|
||||
self.log.debug("__ plates_representation: {}".format(
|
||||
plates_representation))
|
||||
self.log.debug("__ after family: {}".format(family))
|
||||
self.log.debug("__ after families: {}".format(families))
|
||||
|
||||
# # this will do FnNsFrameServer
|
||||
# FnNsFrameServer.renderFrames(*_args)
|
||||
|
|
@ -5,7 +5,7 @@ import hiero
|
|||
class CollectSequence(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder
|
||||
order = api.CollectorOrder - 0.01
|
||||
label = "Collect Sequence"
|
||||
hosts = ["nukestudio"]
|
||||
|
||||
|
|
|
|||
210
pype/plugins/nukestudio/publish/collect_subsets.py
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
from pyblish import api
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class CollectClipSubsets(api.InstancePlugin):
|
||||
"""Collect Subsets from selected Clips, Tags, Preset."""
|
||||
|
||||
order = api.CollectorOrder + 0.103
|
||||
label = "Collect Subsets"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
# get all subsets from tags and match them with nks_presets >
|
||||
# > looks to rules for tasks, subsets, representations
|
||||
subsets_collection = self.get_subsets_from_presets(instance)
|
||||
|
||||
# iterate trough subsets and create instances
|
||||
for subset, attrs in subsets_collection.items():
|
||||
self.log.info((subset, attrs))
|
||||
# create families
|
||||
item = instance.data["item"]
|
||||
family = instance.data["family"]
|
||||
families = attrs["families"] + [str(subset)]
|
||||
task = attrs["task"]
|
||||
subset = "{0}{1}".format(
|
||||
subset,
|
||||
instance.data.get("subsetType") or "Default")
|
||||
instance_name = "{0}_{1}_{2}".format(asset_name, task, subset)
|
||||
self.log.info("Creating instance with name: {}".format(
|
||||
instance_name))
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"] + handles)
|
||||
handle_end = int(instance.data["handleEnd"] + handles)
|
||||
|
||||
# get source frames
|
||||
source_first = int(instance.data["sourceFirst"])
|
||||
source_in = int(instance.data["sourceIn"])
|
||||
source_out = int(instance.data["sourceOut"])
|
||||
|
||||
# frame-ranges with handles
|
||||
source_in_h = source_in - handle_start
|
||||
source_out_h = source_out + handle_end
|
||||
|
||||
# get timeline frames
|
||||
timeline_in = int(item.timelineIn())
|
||||
timeline_out = int(item.timelineOut())
|
||||
|
||||
# frame-ranges with handles
|
||||
timeline_frame_start = timeline_in - handle_start
|
||||
timeline_frame_end = timeline_out + handle_end
|
||||
|
||||
# creating comp frame range
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = frame_start + (source_out - source_in)
|
||||
|
||||
# get sequence from context, and fps
|
||||
sequence = context.data["activeSequence"]
|
||||
fps = sequence.framerate()
|
||||
|
||||
context.create_instance(
|
||||
name=instance_name,
|
||||
subset=subset,
|
||||
asset=asset_name,
|
||||
track=instance.data.get("track"),
|
||||
item=item,
|
||||
task=task,
|
||||
sourcePath=instance.data.get("sourcePath"),
|
||||
family=family,
|
||||
families=families,
|
||||
sourceFirst=source_first,
|
||||
sourceIn=source_in,
|
||||
sourceOut=source_out,
|
||||
sourceInH=source_in_h,
|
||||
sourceOutH=source_out_h,
|
||||
frameStart=frame_start,
|
||||
startFrame=frame_start,
|
||||
endFrame=frame_end,
|
||||
timelineIn=timeline_in,
|
||||
timelineOut=timeline_out,
|
||||
timelineInHandles=timeline_frame_start,
|
||||
timelineOutHandles=timeline_frame_end,
|
||||
fps=fps,
|
||||
handles=instance.data["handles"],
|
||||
handleStart=handle_start,
|
||||
handleEnd=handle_end,
|
||||
attributes=attrs,
|
||||
version=instance.data["version"],
|
||||
hierarchy=instance.data.get("hierarchy", None),
|
||||
parents=instance.data.get("parents", None),
|
||||
publish=True
|
||||
)
|
||||
|
||||
# removing original instance
|
||||
context.remove(instance)
|
||||
|
||||
def get_subsets_from_presets(self, instance):
|
||||
|
||||
family = instance.data["family"]
|
||||
# get presets and tags
|
||||
tag_tasks = instance.data["tasks"]
|
||||
presets = instance.context.data['presets']
|
||||
nks_presets = presets[instance.context.data['host']]
|
||||
family_default_preset = nks_presets["asset_default"].get(family)
|
||||
|
||||
if family_default_preset:
|
||||
frame_start = family_default_preset.get("fstart", 1)
|
||||
instance.data["frameStart"] = int(frame_start)
|
||||
|
||||
# get specific presets
|
||||
pr_host_tasks = deepcopy(
|
||||
nks_presets["rules_tasks"]).get("hostTasks", None)
|
||||
pr_host_subsets = deepcopy(
|
||||
nks_presets["rules_tasks"]).get("hostSubsets", None)
|
||||
|
||||
subsets_collect = dict()
|
||||
# iterate tags and collect subset properities from presets
|
||||
for task in tag_tasks:
|
||||
self.log.info("__ task: {}".format(task))
|
||||
try:
|
||||
# get host for task
|
||||
host = None
|
||||
host = [h for h, tasks in pr_host_tasks.items()
|
||||
if task in tasks][0]
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# get subsets for task
|
||||
subsets = None
|
||||
subsets = pr_host_subsets[host]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if not subsets:
|
||||
continue
|
||||
|
||||
# get subsets for task
|
||||
for sub in subsets:
|
||||
# get specific presets
|
||||
pr_subsets = deepcopy(nks_presets["rules_subsets"])
|
||||
pr_representations = deepcopy(
|
||||
nks_presets["rules_representations"])
|
||||
|
||||
# initialise collection dictionary
|
||||
subs_data = dict()
|
||||
|
||||
# gets subset properities
|
||||
subs_data[sub] = None
|
||||
subs_data[sub] = pr_subsets.get(sub, None)
|
||||
|
||||
# gets representation if in keys
|
||||
if subs_data[sub] and (
|
||||
"representation" in subs_data[sub].keys()
|
||||
):
|
||||
repr_name = subs_data[sub]["representation"]
|
||||
|
||||
# owerwrite representation key with values from preset
|
||||
subs_data[sub]["representation"] = pr_representations[
|
||||
repr_name
|
||||
]
|
||||
subs_data[sub]["representation"]["name"] = repr_name
|
||||
|
||||
# gets nodes and presets data if in keys
|
||||
# gets nodes if any
|
||||
if subs_data[sub] and (
|
||||
"nodes" in subs_data[sub].keys()
|
||||
):
|
||||
# iterate trough each node
|
||||
for k in subs_data[sub]["nodes"]:
|
||||
pr_node = k
|
||||
pr_family = subs_data[sub]["nodes"][k]["family"]
|
||||
|
||||
# create attribute dict for later filling
|
||||
subs_data[sub]["nodes"][k]["attributes"] = dict()
|
||||
|
||||
# iterate presets for the node
|
||||
for p, path in subs_data[sub]["nodes"][k][
|
||||
"presets"].items():
|
||||
|
||||
# adds node type and family for preset path
|
||||
nPath = path + [pr_node, pr_family]
|
||||
|
||||
# create basic iternode to be wolked trough until
|
||||
# found presets at the end
|
||||
iternode = presets[p]
|
||||
for part in nPath:
|
||||
iternode = iternode[part]
|
||||
|
||||
iternode = {k: v for k, v in iternode.items()
|
||||
if not k.startswith("_")}
|
||||
# adds found preset to attributes of the node
|
||||
subs_data[sub]["nodes"][k][
|
||||
"attributes"].update(iternode)
|
||||
|
||||
# removes preset key
|
||||
subs_data[sub]["nodes"][k].pop("presets")
|
||||
|
||||
# add all into dictionary
|
||||
self.log.info("__ subs_data[sub]: {}".format(subs_data[sub]))
|
||||
subs_data[sub]["task"] = task.lower()
|
||||
subsets_collect.update(subs_data)
|
||||
|
||||
return subsets_collect
|
||||
47
pype/plugins/nukestudio/publish/collect_tag_handles.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
import json
|
||||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipTagHandles(api.InstancePlugin):
|
||||
"""Collect Handles from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect Tag Handles"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if "handles" in t_family:
|
||||
# gets value of handles
|
||||
t_value = int(t_metadata.get("tag.value", ""))
|
||||
|
||||
# gets arguments if there are any
|
||||
t_args = t_metadata.get("tag.args", "")
|
||||
|
||||
# distribute handles
|
||||
if not t_args:
|
||||
# add handles to both sides
|
||||
instance.data['handles'] = t_value
|
||||
self.log.info("Collected Handles: `{}`".format(
|
||||
instance.data['handles']))
|
||||
else:
|
||||
t_args = json.loads(t_args.replace("'", "\""))
|
||||
# add in start
|
||||
if 'start' in t_args['where']:
|
||||
instance.data["handleStart"] += t_value
|
||||
self.log.info("Collected Handle Start: `{}`".format(
|
||||
instance.data["handleStart"]))
|
||||
|
||||
# add in end
|
||||
if 'end' in t_args['where']:
|
||||
instance.data["handleEnd"] += t_value
|
||||
self.log.info("Collected Handle End: `{}`".format(
|
||||
instance.data["handleEnd"]))
|
||||
|
|
@ -4,7 +4,7 @@ from pyblish import api
|
|||
class CollectClipTagTasks(api.InstancePlugin):
|
||||
"""Collect Tags from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.006
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect Tag Tasks"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
|
|
|||
37
pype/plugins/nukestudio/publish/collect_tag_types.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipTagTypes(api.InstancePlugin):
|
||||
"""Collect Types from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect Plate Type from Tag"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
subset_names = list()
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if "plate" in t_family:
|
||||
t_type = t_metadata.get("tag.type", "")
|
||||
t_order = t_metadata.get("tag.order", "")
|
||||
subset_type = "{0}{1}".format(
|
||||
t_type.capitalize(), t_order)
|
||||
subset_names.append(subset_type)
|
||||
|
||||
if "main" in t_type:
|
||||
instance.data["main"] = True
|
||||
|
||||
if subset_names:
|
||||
instance.data["subsetType"] = subset_names[0]
|
||||
|
||||
self.log.info("Collected Plate Types from Tags: `{}`".format(
|
||||
instance.data["subsetType"]))
|
||||
return
|
||||
|
|
@ -4,7 +4,7 @@ from pyblish import api
|
|||
class CollectClipTags(api.InstancePlugin):
|
||||
"""Collect Tags from selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.005
|
||||
order = api.CollectorOrder + 0.011
|
||||
label = "Collect Tags"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
|
|
|||
62
pype/plugins/nukestudio/publish/extract_audio.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
from pyblish import api
|
||||
import pype
|
||||
|
||||
class ExtractAudioFile(pype.api.Extractor):
|
||||
"""Extracts audio subset file"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract Subset Audio"
|
||||
hosts = ["nukestudio"]
|
||||
families = ["clip", "audio"]
|
||||
match = api.Intersection
|
||||
optional = True
|
||||
active = False
|
||||
|
||||
def process(self, instance):
|
||||
import os
|
||||
|
||||
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
|
||||
|
||||
item = instance.data["item"]
|
||||
context = instance.context
|
||||
|
||||
self.log.debug("creating staging dir")
|
||||
self.staging_dir(instance)
|
||||
|
||||
staging_dir = instance.data["stagingDir"]
|
||||
|
||||
# get handles from context
|
||||
handles = instance.data["handles"]
|
||||
handle_start = instance.data["handleStart"] + handles
|
||||
handle_end = instance.data["handleEnd"] + handles
|
||||
|
||||
# get sequence from context
|
||||
sequence = context.data["activeSequence"]
|
||||
|
||||
# path to wav file
|
||||
audio_file = os.path.join(
|
||||
staging_dir, "{0}.wav".format(instance.data["subset"])
|
||||
)
|
||||
|
||||
# export audio to disk
|
||||
writeSequenceAudioWithHandles(
|
||||
audio_file,
|
||||
sequence,
|
||||
item.timelineIn(),
|
||||
item.timelineOut(),
|
||||
handle_start,
|
||||
handle_end
|
||||
)
|
||||
|
||||
# add to representations
|
||||
if not instance.data.get("representations"):
|
||||
instance.data["representations"] = list()
|
||||
|
||||
representation = {
|
||||
'files': [audio_file],
|
||||
'stagingDir': staging_dir,
|
||||
'name': "wav",
|
||||
'ext': ".wav"
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class ExtractReview(api.InstancePlugin):
|
||||
"""Extracts movie for review"""
|
||||
|
||||
order = api.ExtractorOrder
|
||||
label = "NukeStudio Review"
|
||||
optional = True
|
||||
hosts = ["nukestudio"]
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
import os
|
||||
import time
|
||||
|
||||
import hiero.core
|
||||
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
|
||||
|
||||
nukeWriter = hiero.core.nuke.ScriptWriter()
|
||||
|
||||
item = instance.data["item"]
|
||||
|
||||
handles = instance.data["handles"]
|
||||
|
||||
sequence = item.parent().parent()
|
||||
|
||||
output_path = os.path.abspath(
|
||||
os.path.join(
|
||||
instance.context.data["currentFile"], "..", "workspace"
|
||||
)
|
||||
)
|
||||
|
||||
# Generate audio
|
||||
audio_file = os.path.join(
|
||||
output_path, "{0}.wav".format(instance.data["name"])
|
||||
)
|
||||
|
||||
writeSequenceAudioWithHandles(
|
||||
audio_file,
|
||||
sequence,
|
||||
item.timelineIn(),
|
||||
item.timelineOut(),
|
||||
handles,
|
||||
handles
|
||||
)
|
||||
|
||||
# Generate Nuke script
|
||||
root_node = hiero.core.nuke.RootNode(
|
||||
item.timelineIn() - handles,
|
||||
item.timelineOut() + handles,
|
||||
fps=sequence.framerate()
|
||||
)
|
||||
|
||||
root_node.addProjectSettings(instance.context.data["colorspace"])
|
||||
|
||||
nukeWriter.addNode(root_node)
|
||||
|
||||
item.addToNukeScript(
|
||||
script=nukeWriter,
|
||||
includeRetimes=True,
|
||||
retimeMethod="Frame",
|
||||
startHandle=handles,
|
||||
endHandle=handles
|
||||
)
|
||||
|
||||
movie_path = os.path.join(
|
||||
output_path, "{0}.mov".format(instance.data["name"])
|
||||
)
|
||||
write_node = hiero.core.nuke.WriteNode(movie_path.replace("\\", "/"))
|
||||
self.log.info("__ write_node: {0}".format(write_node))
|
||||
write_node.setKnob("file_type", "mov")
|
||||
write_node.setKnob("colorspace", instance.context.data["colorspace"]["lutSettingFloat"])
|
||||
write_node.setKnob("meta_codec", "ap4h")
|
||||
write_node.setKnob("mov64_codec", "ap4h")
|
||||
write_node.setKnob("mov64_bitrate", 400000)
|
||||
write_node.setKnob("mov64_bitrate_tolerance", 40000000)
|
||||
write_node.setKnob("mov64_quality_min", 2)
|
||||
write_node.setKnob("mov64_quality_max", 31)
|
||||
write_node.setKnob("mov64_gop_size", 12)
|
||||
write_node.setKnob("mov64_b_frames", 0)
|
||||
write_node.setKnob("raw", True )
|
||||
write_node.setKnob("mov64_audiofile", audio_file.replace("\\", "/"))
|
||||
write_node.setKnob("mov32_fps", sequence.framerate())
|
||||
nukeWriter.addNode(write_node)
|
||||
|
||||
nukescript_path = movie_path.replace(".mov", ".nk")
|
||||
nukeWriter.writeToDisk(nukescript_path)
|
||||
|
||||
process = hiero.core.nuke.executeNukeScript(
|
||||
nukescript_path,
|
||||
open(movie_path.replace(".mov", ".log"), "w")
|
||||
)
|
||||
|
||||
while process.poll() is None:
|
||||
time.sleep(0.5)
|
||||
|
||||
assert os.path.exists(movie_path), "Creating review failed."
|
||||
|
||||
instance.data["output_path"] = movie_path
|
||||
instance.data["review_family"] = "mov"
|
||||
|
|
@ -9,6 +9,40 @@ log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
|
|||
|
||||
|
||||
class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||
'''
|
||||
This is modification of OTIO FFmpeg Burnin adapter.
|
||||
- requires FFmpeg in PATH
|
||||
|
||||
Offers 6 positions for burnin text. Each can be set with:
|
||||
- static text
|
||||
- frames
|
||||
- timecode
|
||||
|
||||
Options - dictionary which sets the final look.
|
||||
- Datatypes explanation:
|
||||
<color> string format must be supported by FFmpeg.
|
||||
Examples: "#000000", "0x000000", "black"
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||
|
||||
- Possible keys:
|
||||
"opacity" - Opacity of text - <float, Range:0-1>
|
||||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||
"bg_color" - Background color - <color>
|
||||
"bg_padding" - Background padding in pixels - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||
"font" - Font Family for text - <font>
|
||||
"font_size" - Font size in pixels - <int>
|
||||
"font_color" - Color of text - <color>
|
||||
"frame_offset" - Default start frame - <int>
|
||||
- required IF start frame is not set when using frames or timecode burnins
|
||||
|
||||
On initializing class can be set General options through "options_init" arg.
|
||||
General can be overriden when adding burnin
|
||||
|
||||
'''
|
||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||
BOTTOM_CENTERED = ffmpeg_burnins.BOTTOM_CENTERED
|
||||
TOP_LEFT = ffmpeg_burnins.TOP_LEFT
|
||||
|
|
@ -162,13 +196,79 @@ def example(input_path, output_path):
|
|||
burnin.render(output_path, overwrite=True)
|
||||
|
||||
|
||||
def example_with_presets(input_path, output_path, data):
|
||||
def burnins_from_data(input_path, output_path, data, overwrite=True):
|
||||
'''
|
||||
This method adds burnins to video/image file based on presets setting.
|
||||
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
|
||||
|
||||
:param input_path: full path to input file where burnins should be add
|
||||
:type input_path: str
|
||||
:param output_path: full path to output file where output will be rendered
|
||||
:type output_path: str
|
||||
:param data: data required for burnin settings (more info below)
|
||||
:type data: dict
|
||||
:param overwrite: output will be overriden if already exists, defaults to True
|
||||
:type overwrite: bool
|
||||
|
||||
Presets must be set separately. Should be dict with 2 keys:
|
||||
- "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc)
|
||||
- *OPTIONAL* default values are used when not included
|
||||
- "burnins" - contains dictionary with burnins settings
|
||||
- *OPTIONAL* burnins won't be added (easier is not to use this)
|
||||
- each key of "burnins" represents Alignment, there are 6 possibilities:
|
||||
TOP_LEFT TOP_CENTERED TOP_RIGHT
|
||||
BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT
|
||||
- value for each key is dict which should contain "function" which says
|
||||
what kind of burnin is that:
|
||||
"text", "timecode" or "frame_numbers"
|
||||
- "text" key with content is also required when "text" function is used
|
||||
|
||||
Requirement of *data* keys is based on presets.
|
||||
- "start_frame" - is required when "timecode" or "frame_numbers" function is used
|
||||
- "start_frame_tc" - when "timecode" should start with different frame
|
||||
- *keys for static text*
|
||||
|
||||
EXAMPLE:
|
||||
preset = {
|
||||
"options": {*OPTIONS FOR LOOK*},
|
||||
"burnins": {
|
||||
"TOP_LEFT": {
|
||||
"function": "text",
|
||||
"text": "static_text"
|
||||
},
|
||||
"TOP_RIGHT": {
|
||||
"function": "text",
|
||||
"text": "{shot}"
|
||||
},
|
||||
"BOTTOM_LEFT": {
|
||||
"function": "timecode"
|
||||
},
|
||||
"BOTTOM_RIGHT": {
|
||||
"function": "frame_numbers"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
For this preset we'll need at least this data:
|
||||
data = {
|
||||
"start_frame": 1001,
|
||||
"shot": "sh0010"
|
||||
}
|
||||
|
||||
When Timecode should start from 1 then data need:
|
||||
data = {
|
||||
"start_frame": 1001,
|
||||
"start_frame_tc": 1,
|
||||
"shot": "sh0010"
|
||||
}
|
||||
'''
|
||||
presets = config.get_presets().get('tools', {}).get('burnins', {})
|
||||
options_init = presets.get('options')
|
||||
|
||||
burnin = ModifiedBurnins(input_path, options_init=options_init)
|
||||
|
||||
start_frame = data.get("start_frame")
|
||||
start_frame_tc = data.get('start_frame_tc', start_frame)
|
||||
for align_text, preset in presets.get('burnins', {}).items():
|
||||
align = None
|
||||
if align_text == 'TOP_LEFT':
|
||||
|
|
@ -205,8 +305,8 @@ def example_with_presets(input_path, output_path, data):
|
|||
if bi_func == 'frame_numbers':
|
||||
burnin.add_frame_numbers(align, start_frame=start_frame)
|
||||
elif bi_func == 'timecode':
|
||||
burnin.add_timecode(align, start_frame=start_frame)
|
||||
elif: bi_func == 'text':
|
||||
burnin.add_timecode(align, start_frame=start_frame_tc)
|
||||
elif bi_func == 'text':
|
||||
if not preset.get('text'):
|
||||
log.error('Text is not set for text function burnin!')
|
||||
return
|
||||
|
|
@ -218,7 +318,7 @@ def example_with_presets(input_path, output_path, data):
|
|||
)
|
||||
return
|
||||
|
||||
burnin.render(output_path, overwrite=True)
|
||||
burnin.render(output_path, overwrite=overwrite)
|
||||
|
||||
|
||||
'''
|
||||
|
|
|
|||
BIN
setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png
Normal file
|
After Width: | Height: | Size: 199 KiB |
|
Before Width: | Height: | Size: 23 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png
Normal file
|
After Width: | Height: | Size: 215 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 96 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png
Normal file
|
After Width: | Height: | Size: 194 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/4_2D.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 73 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/layers.psd
Normal file
BIN
setup/nukestudio/hiero_plugin_path/Icons/lense.png
Normal file
|
After Width: | Height: | Size: 103 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/lense1.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png
Normal file
|
After Width: | Height: | Size: 87 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png
Normal file
|
After Width: | Height: | Size: 62 KiB |