Merge branch '2.0/sync_from_1.0' into 2.0/develop

# Conflicts:
#	pype/ftrack/lib/ftrack_app_handler.py
This commit is contained in:
Milan Kolar 2019-04-12 19:00:33 +02:00
commit b72368e1d7
20 changed files with 1311 additions and 361 deletions

View file

@ -1,13 +1,15 @@
import logging
import os
import argparse
import sys
import errno
import logging
import argparse
import re
import json
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
import json
from pype import api as pype
from pype import api as pype, lib as pypelib
from avalon import lib as avalonlib
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
class CreateFolders(BaseAction):
@ -25,111 +27,312 @@ class CreateFolders(BaseAction):
'https://cdn1.iconfinder.com/data/icons/hawcons/32/'
'698620-icon-105-folder-add-512.png'
)
db = DbConnector()
def discover(self, session, entities, event):
''' Validation '''
not_allowed = ['assetversion']
if len(entities) != 1:
return False
if entities[0].entity_type.lower() in not_allowed:
return False
return True
def getShotAsset(self, entity):
if entity not in self.importable:
if entity['object_type']['name'] != 'Task':
self.importable.add(entity)
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
entity = entities[0]
without_interface = True
for child in entity['children']:
if child['object_type']['name'].lower() != 'task':
without_interface = False
break
self.without_interface = without_interface
if without_interface:
return
title = 'Create folders'
if entity['children']:
children = entity['children']
for child in children:
self.getShotAsset(child)
entity_name = entity['name']
msg = (
'<h2>Do you want create folders also'
' for all children of "{}"?</h2>'
)
if entity.entity_type.lower() == 'project':
entity_name = entity['full_name']
msg = msg.replace(' also', '')
msg += '<h3>(Project root won\'t be created if not checked)</h3>'
items = []
item_msg = {
'type': 'label',
'value': msg.format(entity_name)
}
item_label = {
'type': 'label',
'value': 'With all chilren entities'
}
item = {
'name': 'children_included',
'type': 'boolean',
'value': False
}
items.append(item_msg)
items.append(item_label)
items.append(item)
if len(items) == 0:
return {
'success': False,
'message': 'Didn\'t found any running jobs'
}
else:
return {
'items': items,
'title': title
}
def launch(self, session, entities, event):
'''Callback method for custom action.'''
with_childrens = True
if self.without_interface is False:
if 'values' not in event['data']:
return
with_childrens = event['data']['values']['children_included']
entity = entities[0]
if entity.entity_type.lower() == 'project':
proj = entity
else:
proj = entity['project']
project_name = proj['full_name']
project_code = proj['name']
if entity.entity_type.lower() == 'project' and with_childrens == False:
return {
'success': True,
'message': 'Nothing was created'
}
data = {
"root": os.environ["AVALON_PROJECTS"],
"project": {
"name": project_name,
"code": project_code
}
}
all_entities = []
all_entities.append(entity)
if with_childrens:
all_entities = self.get_notask_children(entity)
#######################################################################
# JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
job = session.create('Job', {
'user': user,
'status': 'running',
'data': json.dumps({
'description': 'Creating Folders.'
})
})
av_project = None
try:
self.importable = set([])
# self.importable = []
self.Anatomy = pype.Anatomy
project = entities[0]['project']
paths_collected = set([])
# get all child entities separately/unique
for entity in entities:
self.getShotAsset(entity)
for ent in self.importable:
self.log.info("{}".format(ent['name']))
for entity in self.importable:
print(entity['name'])
anatomy = pype.Anatomy
parents = entity['link']
hierarchy_names = []
for p in parents[1:-1]:
hierarchy_names.append(p['name'])
if hierarchy_names:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy_names)
template_data = {"project": {"name": project['full_name'],
"code": project['name']},
"asset": entity['name'],
"hierarchy": hierarchy}
for task in entity['children']:
if task['object_type']['name'] == 'Task':
self.log.info('child: {}'.format(task['name']))
template_data['task'] = task['name']
anatomy_filled = anatomy.format(template_data)
paths_collected.add(anatomy_filled.work.folder)
paths_collected.add(anatomy_filled.publish.folder)
for path in paths_collected:
self.log.info(path)
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
job['status'] = 'done'
session.commit()
except ValueError as ve:
job['status'] = 'failed'
session.commit()
message = str(ve)
self.log.error('Error during syncToAvalon: {}'.format(message))
self.db.install()
self.db.Session['AVALON_PROJECT'] = project_name
av_project = self.db.find_one({'type': 'project'})
template_work = av_project['config']['template']['work']
template_publish = av_project['config']['template']['publish']
self.db.uninstall()
except Exception:
job['status'] = 'failed'
session.commit()
anatomy = pype.Anatomy
template_work = anatomy.avalon.work
template_publish = anatomy.avalon.publish
#######################################################################
collected_paths = []
presets = self.get_presets()
for entity in all_entities:
if entity.entity_type.lower() == 'project':
continue
ent_data = data.copy()
asset_name = entity['name']
ent_data['asset'] = asset_name
parents = entity['link']
hierarchy_names = [p['name'] for p in parents[1:-1]]
hierarchy = ''
if hierarchy_names:
hierarchy = os.path.sep.join(hierarchy_names)
ent_data['hierarchy'] = hierarchy
tasks_created = False
if entity['children']:
for child in entity['children']:
if child['object_type']['name'].lower() != 'task':
continue
tasks_created = True
task_type_name = child['type']['name'].lower()
task_data = ent_data.copy()
task_data['task'] = child['name']
possible_apps = presets.get(task_type_name, [])
template_work_created = False
template_publish_created = False
apps = []
for app in possible_apps:
try:
app_data = avalonlib.get_application(app)
app_dir = app_data['application_dir']
except ValueError:
app_dir = app
apps.append(app_dir)
# Template wok
if '{app}' in template_work:
for app in apps:
template_work_created = True
app_data = task_data.copy()
app_data['app'] = app
collected_paths.append(
self.compute_template(
template_work, app_data
)
)
if template_work_created is False:
collected_paths.append(
self.compute_template(template_work, task_data)
)
# Template publish
if '{app}' in template_publish:
for app in apps:
template_publish_created = True
app_data = task_data.copy()
app_data['app'] = app
collected_paths.append(
self.compute_template(
template_publish, app_data, True
)
)
if template_publish_created is False:
collected_paths.append(
self.compute_template(
template_publish, task_data, True
)
)
if not tasks_created:
# create path for entity
collected_paths.append(
self.compute_template(template_work, ent_data)
)
collected_paths.append(
self.compute_template(template_publish, ent_data)
)
if len(collected_paths) > 0:
self.log.info('Creating folders:')
for path in set(collected_paths):
self.log.info(path)
if not os.path.exists(path):
os.makedirs(path)
return {
'success': True,
'message': 'Created Folders Successfully!'
}
def get_notask_children(self, entity):
output = []
if entity.get('object_type', {}).get(
'name', entity.entity_type
).lower() == 'task':
return output
else:
output.append(entity)
if entity['children']:
for child in entity['children']:
output.extend(self.get_notask_children(child))
return output
def get_presets(self):
fpath_items = [pypelib.get_presets_path(), 'tools', 'sw_folders.json']
filepath = os.path.normpath(os.path.sep.join(fpath_items))
presets = dict()
try:
with open(filepath) as data_file:
presets = json.load(data_file)
except Exception as e:
self.log.warning('Wasn\'t able to load presets')
return dict(presets)
def template_format(self, template, data):
partial_data = PartialDict(data)
# remove subdict items from string (like 'project[name]')
subdict = PartialDict()
count = 1
store_pattern = 5*'_'+'{:0>3}'
regex_patern = "\{\w*\[[^\}]*\]\}"
matches = re.findall(regex_patern, template)
for match in matches:
key = store_pattern.format(count)
subdict[key] = match
template = template.replace(match, '{'+key+'}')
count += 1
# solve fillind keys with optional keys
solved = self._solve_with_optional(template, partial_data)
# try to solve subdict and replace them back to string
for k, v in subdict.items():
try:
v = v.format_map(data)
except (KeyError, TypeError):
pass
subdict[k] = v
return solved.format_map(subdict)
def _solve_with_optional(self, template, data):
# Remove optional missing keys
pattern = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
invalid_optionals = []
for group in pattern.findall(template):
try:
group.format(**data)
except KeyError:
invalid_optionals.append(group)
for group in invalid_optionals:
template = template.replace(group, "")
solved = template.format_map(data)
# solving after format optional in second round
for catch in re.compile(r"(<.*?[^{0]*>)[^0-9]*?").findall(solved):
if "{" in catch:
# remove all optional
solved = solved.replace(catch, "")
else:
# Remove optional symbols
solved = solved.replace(catch, catch[1:-1])
return solved
def compute_template(self, str, data, task=False):
first_result = self.template_format(str, data)
if first_result == first_result.split('{')[0]:
return os.path.normpath(first_result)
if task:
return os.path.normpath(first_result.split('{')[0])
index = first_result.index('{')
regex = '\{\w*[^\}]*\}'
match = re.findall(regex, first_result[index:])[0]
without_missing = str.split(match)[0].split('}')
output_items = []
for part in without_missing:
if '{' in part:
output_items.append(part + '}')
return os.path.normpath(
self.template_format(''.join(output_items), data)
)
class PartialDict(dict):
def __getitem__(self, item):
out = super().__getitem__(item)
if isinstance(out, dict):
return '{'+item+'}'
return out
def __missing__(self, key):
return '{'+key+'}'
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''

View file

@ -0,0 +1,262 @@
import os
import sys
import re
import argparse
import logging
import json
import ftrack_api
from pype import lib as pypelib
from pype.ftrack import BaseAction
class CreateProjectFolders(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'create.project.folders'
#: Action label.
label = 'Create Project Folders'
#: Action description.
description = 'Creates folder structure'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
icon = (
'https://cdn2.iconfinder.com/data/icons/'
'buttons-9/512/Button_Add-01.png'
)
pattern_array = re.compile('\[.*\]')
pattern_ftrack = '.*\[[.]*ftrack[.]*'
pattern_ent_ftrack = 'ftrack\.[^.,\],\s,]*'
project_root_key = '__project_root__'
def discover(self, session, entities, event):
''' Validation '''
return True
def launch(self, session, entities, event):
entity = entities[0]
if entity.entity_type.lower() == 'project':
project = entity
else:
project = entity['project']
presets = self.load_presets()
try:
# Get paths based on presets
basic_paths = self.get_path_items(presets)
self.create_folders(basic_paths, entity)
self.create_ftrack_entities(basic_paths, project)
except Exception as e:
session.rollback()
return {
'success': False,
'message': str(e)
}
return True
def get_ftrack_paths(self, paths_items):
all_ftrack_paths = []
for path_items in paths_items:
ftrack_path_items = []
is_ftrack = False
for item in reversed(path_items):
if item == self.project_root_key:
continue
if is_ftrack:
ftrack_path_items.append(item)
elif re.match(self.pattern_ftrack, item):
ftrack_path_items.append(item)
is_ftrack = True
ftrack_path_items = list(reversed(ftrack_path_items))
if ftrack_path_items:
all_ftrack_paths.append(ftrack_path_items)
return all_ftrack_paths
def compute_ftrack_items(self, in_list, keys):
if len(keys) == 0:
return in_list
key = keys[0]
exist = None
for index, subdict in enumerate(in_list):
if key in subdict:
exist = index
break
if exist is not None:
in_list[exist][key] = self.compute_ftrack_items(
in_list[exist][key], keys[1:]
)
else:
in_list.append({key: self.compute_ftrack_items([], keys[1:])})
return in_list
def translate_ftrack_items(self, paths_items):
main = []
for path_items in paths_items:
main = self.compute_ftrack_items(main, path_items)
return main
def create_ftrack_entities(self, basic_paths, project_ent):
only_ftrack_items = self.get_ftrack_paths(basic_paths)
ftrack_paths = self.translate_ftrack_items(only_ftrack_items)
for separation in ftrack_paths:
parent = project_ent
self.trigger_creation(separation, parent)
def trigger_creation(self, separation, parent):
for item, subvalues in separation.items():
matches = re.findall(self.pattern_array, item)
ent_type = 'Folder'
if len(matches) == 0:
name = item
else:
match = matches[0]
name = item.replace(match, '')
ent_type_match = re.findall(self.pattern_ent_ftrack, match)
if len(ent_type_match) > 0:
ent_type_split = ent_type_match[0].split('.')
if len(ent_type_split) == 2:
ent_type = ent_type_split[1]
new_parent = self.create_ftrack_entity(name, ent_type, parent)
if subvalues:
for subvalue in subvalues:
self.trigger_creation(subvalue, new_parent)
def create_ftrack_entity(self, name, ent_type, parent):
for children in parent['children']:
if children['name'] == name:
return children
data = {
'name': name,
'parent_id': parent['id']
}
if parent.entity_type.lower() == 'project':
data['project_id'] = parent['id']
else:
data['project_id'] = parent['project']['id']
new_ent = self.session.create(ent_type, data)
self.session.commit()
return new_ent
def load_presets(self):
preset_items = [
pypelib.get_presets_path(),
'tools',
'project_folder_structure.json'
]
filepath = os.path.sep.join(preset_items)
# Load folder structure template from presets
presets = dict()
try:
with open(filepath) as data_file:
presets = json.load(data_file)
except Exception as e:
msg = 'Unable to load Folder structure preset'
self.log.warning(msg)
return {
'success': False,
'message': msg
}
return presets
def get_path_items(self, in_dict):
output = []
for key, value in in_dict.items():
if not value:
output.append(key)
else:
paths = self.get_path_items(value)
for path in paths:
if isinstance(path, str):
output.append([key, path])
else:
p = [key]
p.extend(path)
output.append(p)
return output
def compute_paths(self, basic_paths_items, project_root):
output = []
for path_items in basic_paths_items:
clean_items = []
for path_item in path_items:
matches = re.findall(self.pattern_array, path_item)
if len(matches) > 0:
path_item = path_item.replace(matches[0], '')
if path_item == self.project_root_key:
path_item = project_root
clean_items.append(path_item)
output.append(os.path.normpath(os.path.sep.join(clean_items)))
return output
def create_folders(self, basic_paths, entity):
# Set project root folder
if entity.entity_type.lower() == 'project':
project_name = entity['full_name']
else:
project_name = entity['project']['full_name']
project_root_items = [os.environ['AVALON_PROJECTS'], project_name]
project_root = os.path.sep.join(project_root_items)
full_paths = self.compute_paths(basic_paths, project_root)
#Create folders
for path in full_paths:
if os.path.exists(path):
continue
os.makedirs(path.format(project_root=project_root))
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
CreateProjectFolders(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -1,155 +0,0 @@
import os
import sys
import json
import argparse
import logging
import ftrack_api
from avalon import lib as avalonlib
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from pype import lib as pypelib
from pype.ftrack import BaseAction
class CreateSWFolders(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
identifier = 'create.sw.folders'
#: Action label.
label = 'Create SW Folders'
#: Action description.
description = 'Creates folders for all SW in project'
def __init__(self, session):
super().__init__(session)
self.avalon_db = DbConnector()
self.avalon_db.install()
def discover(self, session, entities, event):
''' Validation '''
return True
def launch(self, session, entities, event):
if len(entities) != 1:
self.log.warning(
'There are more entities in selection!'
)
return False
entity = entities[0]
if entity.entity_type.lower() != 'task':
self.log.warning(
'Selected entity is not Task!'
)
return False
asset = entity['parent']
project = asset['project']
project_name = project["full_name"]
self.avalon_db.Session['AVALON_PROJECT'] = project_name
av_project = self.avalon_db.find_one({'type': 'project'})
av_asset = self.avalon_db.find_one({
'type': 'asset',
'name': asset['name']
})
templates = av_project["config"]["template"]
template = templates.get("work", None)
if template is None:
return False
data = {
"root": os.environ["AVALON_PROJECTS"],
"project": {
"name": project_name,
"code": project["name"]
},
"hierarchy": av_asset['data']['hierarchy'],
"asset": asset['name'],
"task": entity['name'],
}
apps = []
if '{app}' in template:
# Apps in project
for app in av_project['data']['applications']:
app_data = avalonlib.get_application(app)
app_dir = app_data['application_dir']
if app_dir not in apps:
apps.append(app_dir)
# Apps in presets
path_items = [pypelib.get_presets_path(), 'tools', 'sw_folders.json']
filepath = os.path.sep.join(path_items)
presets = dict()
try:
with open(filepath) as data_file:
presets = json.load(data_file)
except Exception as e:
self.log.warning('Wasn\'t able to load presets')
preset_apps = presets.get(project_name, presets.get('__default__', []))
for app in preset_apps:
if app not in apps:
apps.append(app)
# Create folders for apps
for app in apps:
data['app'] = app
self.log.info('Created folder for app {}'.format(app))
path = os.path.normpath(template.format(**data))
if os.path.exists(path):
continue
os.makedirs(path)
return True
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
CreateSWFolders(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))

View file

@ -0,0 +1,519 @@
from pype.ftrack import BaseAction
import os
import sys
import json
import subprocess
import ftrack_api
import logging
import operator
import re
from pype import lib as pypelib
from app.api import Logger
log = Logger.getLogger(__name__)
class RVAction(BaseAction):
""" Launch RV action """
identifier = "rv.launch.action"
label = "rv"
description = "rv Launcher"
icon = "https://img.icons8.com/color/48/000000/circled-play.png"
type = 'Application'
def __init__(self, session):
""" Constructor
:param session: ftrack Session
:type session: :class:`ftrack_api.Session`
"""
super().__init__(session)
self.rv_path = None
self.config_data = None
# RV_HOME should be set if properly installed
if os.environ.get('RV_HOME'):
self.rv_path = os.path.join(
os.environ.get('RV_HOME'),
'bin',
'rv'
)
else:
# if not, fallback to config file location
self.load_config_data()
self.set_rv_path()
if self.rv_path is None:
return
self.allowed_types = self.config_data.get(
'file_ext', ["img", "mov", "exr"]
)
def discover(self, session, entities, event):
"""Return available actions based on *event*. """
selection = event["data"].get("selection", [])
if len(selection) != 1:
return False
entityType = selection[0].get("entityType", None)
if entityType in ["assetversion", "task"]:
return True
return False
def load_config_data(self):
path_items = [pypelib.get_presets_path(), 'rv', 'config.json']
filepath = os.path.sep.join(path_items)
data = dict()
try:
with open(filepath) as data_file:
data = json.load(data_file)
except Exception as e:
log.warning(
'Failed to load data from RV presets file ({})'.format(e)
)
self.config_data = data
def set_rv_path(self):
self.rv_path = self.config_data.get("rv_path")
def register(self):
assert (self.rv_path is not None), (
'RV is not installed'
' or paths in presets are not set correctly'
)
super().register()
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
entity = entities[0]
versions = []
entity_type = entity.entity_type.lower()
if entity_type == "assetversion":
if (
entity[
'components'
][0]['file_type'][1:] in self.allowed_types
):
versions.append(entity)
else:
master_entity = entity
if entity_type == "task":
master_entity = entity['parent']
for asset in master_entity['assets']:
for version in asset['versions']:
# Get only AssetVersion of selected task
if (
entity_type == "task" and
version['task']['id'] != entity['id']
):
continue
# Get only components with allowed type
filetype = version['components'][0]['file_type']
if filetype[1:] in self.allowed_types:
versions.append(version)
if len(versions) < 1:
return {
'success': False,
'message': 'There are no Asset Versions to open.'
}
items = []
base_label = "v{0} - {1} - {2}"
default_component = self.config_data.get(
'default_component', None
)
last_available = None
select_value = None
for version in versions:
for component in version['components']:
label = base_label.format(
str(version['version']).zfill(3),
version['asset']['type']['name'],
component['name']
)
try:
location = component[
'component_locations'
][0]['location']
file_path = location.get_filesystem_path(component)
except Exception:
file_path = component[
'component_locations'
][0]['resource_identifier']
if os.path.isdir(os.path.dirname(file_path)):
last_available = file_path
if component['name'] == default_component:
select_value = file_path
items.append(
{'label': label, 'value': file_path}
)
if len(items) == 0:
return {
'success': False,
'message': (
'There are no Asset Versions with accessible path.'
)
}
item = {
'label': 'Items to view',
'type': 'enumerator',
'name': 'path',
'data': sorted(
items,
key=operator.itemgetter('label'),
reverse=True
)
}
if select_value is not None:
item['value'] = select_value
else:
item['value'] = last_available
return {'items': [item]}
def launch(self, session, entities, event):
"""Callback method for RV action."""
# Launching application
if "values" not in event["data"]:
return
filename = event['data']['values']['path']
fps = entities[0].get('custom_attributes', {}).get('fps', None)
cmd = []
# change frame number to padding string for RV to play sequence
try:
frame = re.findall(r'(\d+).', filename)[-1]
except KeyError:
# we didn't detected frame number
pass
else:
padding = '#' * len(frame)
pos = filename.rfind(frame)
filename = filename[:pos] + padding + filename[
filename.rfind('.'):]
# RV path
cmd.append(os.path.normpath(self.rv_path))
if fps is not None:
cmd.append("-fps {}".format(int(fps)))
cmd.append(os.path.normpath(filename))
log.info('Running rv: {}'.format(' '.join(cmd)))
try:
# Run RV with these commands
subprocess.Popen(' '.join(cmd), shell=True)
except Exception as e:
return {
'success': False,
'message': 'File "{}" was not found.'.format(
e
)
}
return True
def register(session):
"""Register hooks."""
if not isinstance(session, ftrack_api.session.Session):
return
RVAction(session).register()
def main(arguments=None):
'''Set up logging and register action.'''
if arguments is None:
arguments = []
import argparse
parser = argparse.ArgumentParser()
# Allow setting of logging level from arguments.
loggingLevels = {}
for level in (
logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
logging.ERROR, logging.CRITICAL
):
loggingLevels[logging.getLevelName(level).lower()] = level
parser.add_argument(
'-v', '--verbosity',
help='Set the logging output verbosity.',
choices=loggingLevels.keys(),
default='info'
)
namespace = parser.parse_args(arguments)
# Set up basic logging
logging.basicConfig(level=loggingLevels[namespace.verbosity])
session = ftrack_api.Session()
register(session)
# Wait for events
logging.info(
'Registered actions and listening for events. Use Ctrl-C to abort.'
)
session.event_hub.wait()
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
"""
Usage: RV movie and image sequence viewer
One File: rv foo.jpg
This Directory: rv .
Other Directory: rv /path/to/dir
Image Sequence w/Audio: rv [ in.#.tif in.wav ]
Stereo w/Audio: rv [ left.#.tif right.#.tif in.wav ]
Stereo Movies: rv [ left.mov right.mov ]
Stereo Movie (from rvio): rv stereo.mov
Cuts Sequenced: rv cut1.mov cut2.#.exr cut3.mov
Stereo Cuts Sequenced: rv [ l1.mov r1.mov ] [ l2.mov r2.mov ]
Forced Anamorphic: rv [ -pa 2.0 fullaperture.#.dpx ]
Compare: rv -wipe a.exr b.exr
Difference: rv -diff a.exr b.exr
Slap Comp Over: rv -over a.exr b.exr
Tile Images: rv -tile *.jpg
Cache + Play Movie: rv -l -play foo.mov
Cache Images to Examine: rv -c big.#.exr
Fullscreen on 2nd monitor: rv -fullscreen -screen 1
Select Source View: rv [ in.exr -select view right ]
Select Source Layer: rv [ in.exr -select layer light1.diffuse ]
(single-view source)
Select Source Layer: rv [ in.exr -select layer left,light1.diffuse ]
(multi-view source)
Select Source Channel: rv [ in.exr -select channel R ]
(single-view, single-layer source)
Select Source Channel: rv [ in.exr -select channel left,Diffuse,R ]
(multi-view, multi-layer source)
Image Sequence Numbering
Frames 1 to 100 no padding: image.1-100@.jpg
Frames 1 to 100 padding 4: image.1-100#.jpg -or- image.1-100@@@@.jpg
Frames 1 to 100 padding 5: image.1-100@@@@@.jpg
Frames -100 to -200 padding 4: image.-100--200#jpg
printf style padding 4: image.%04d.jpg
printf style w/range: image.%04d.jpg 1-100
printf no padding w/range: image.%d.jpg 1-100
Complicated no pad 1 to 100: image_887f1-100@_982.tif
Stereo pair (left,right): image.#.%V.tif
Stereo pair (L,R): image.#.%v.tif
All Frames, padding 4: image.#.jpg
All Frames in Sequence: image.*.jpg
All Frames in Directory: /path/to/directory
All Frames in current dir: .
Per-source arguments (inside [ and ] restricts to that source only)
-pa %f Per-source pixel aspect ratio
-ro %d Per-source range offset
-rs %d Per-source range start
-fps %f Per-source or global fps
-ao %f Per-source audio offset in seconds
-so %f Per-source stereo relative eye offset
-rso %f Per-source stereo right eye offset
-volume %f Per-source or global audio volume (default=1)
-fcdl %S Per-source file CDL
-lcdl %S Per-source look CDL
-flut %S Per-source file LUT
-llut %S Per-source look LUT
-pclut %S Per-source pre-cache software LUT
-cmap %S Per-source channel mapping
(channel names, separated by ',')
-select %S %S Per-source view/layer/channel selection
-crop %d %d %d %d Per-source crop (xmin, ymin, xmax, ymax)
-uncrop %d %d %d %d Per-source uncrop (width, height, xoffset, yoffset)
-in %d Per-source cut-in frame
-out %d Per-source cut-out frame
-noMovieAudio Disable source movie's baked-in audio
-inparams ... Source specific input parameters
... Input sequence patterns, images, movies, or directories
-c Use region frame cache
-l Use look-ahead cache
-nc Use no caching
-s %f Image scale reduction
-ns Nuke style sequence notation
(deprecated and ignored -- no longer needed)
-noRanges No separate frame ranges
(i.e. 1-10 will be considered a file)
-sessionType %S Session type (sequence, stack) (deprecated, use -view)
-stereo %S Stereo mode
(hardware, checker, scanline, anaglyph, lumanaglyph,
left, right, pair, mirror, hsqueezed, vsqueezed)
-stereoSwap %d Swap left and right eyes stereo display
(0 == no, 1 == yes, default=0)
-vsync %d Video Sync (1 = on, 0 = off, default = 1)
-comp %S Composite mode
(over, add, difference, replace, topmost)
-layout %S Layout mode (packed, row, column, manual)
-over Same as -comp over -view defaultStack
-diff Same as -comp difference -view defaultStack
-replace Same as -comp replace -view defaultStack
-topmost Same as -comp topmost -view defaultStack
-layer Same as -comp topmost -view defaultStack, with strict
frame ranges
-tile Same as -layout packed -view defaultLayout
-wipe Same as -over with wipes enabled
-view %S Start with a particular view
-noSequence Don't contract files into sequences
-inferSequence Infer sequences from one file
-autoRetime %d Automatically retime conflicting media fps in
sequences and stacks (1 = on, 0 = off, default = 1)
-rthreads %d Number of reader threads (default=1)
-fullscreen Start in fullscreen mode
-present Start in presentation mode (using presentation device)
-presentAudio %d Use presentation audio device in presentation mode
(1 = on, 0 = off)
-presentDevice %S Presentation mode device
-presentVideoFormat %S Presentation mode override video format
(device specific)
-presentDataFormat %S Presentation mode override data format
(device specific)
-screen %d Start on screen (0, 1, 2, ...)
-noBorders No window manager decorations
-geometry %d %d [%d %d] Start geometry X, Y, W, H
-fitMedia Fit the window to the first media shown
-init %S Override init script
-nofloat Turn off floating point by default
-maxbits %d Maximum default bit depth (default=32)
-gamma %f Set display gamma (default=1)
-sRGB Display using linear -> sRGB conversion
-rec709 Display using linear -> Rec 709 conversion
-dlut %S Apply display LUT
-brightness %f Set display relative brightness in stops (default=0)
-resampleMethod %S Resampling method
(area, linear, cubic, nearest, default=area)
-eval %S Evaluate Mu expression at every session start
-pyeval %S Evaluate Python expression at every session start
-nomb Hide menu bar on start up
-play Play on startup
-playMode %d Playback mode (0=Context dependent, 1=Play all frames,
2=Realtime, default=0)
-loopMode %d Playback loop mode
(0=Loop, 1=Play Once, 2=Ping-Pong, default=0)
-cli Mu command line interface
-vram %f VRAM usage limit in Mb, default = 64.000000
-cram %f Max region cache RAM usage in Gb,
(6.4Gb available, default 1Gb)
-lram %f Max look-ahead cache RAM usage in Gb,
(6.4Gb available, default 0.2Gb)
-noPBO Prevent use of GL PBOs for pixel transfer
-prefetch Prefetch images for rendering
-useAppleClientStorage Use APPLE_client_storage extension
-useThreadedUpload Use threading for texture uploading/downloading
if possible
-bwait %f Max buffer wait time in cached seconds, default 5.0
-lookback %f Percentage of the lookahead cache reserved for
frames behind the playhead, default 25
-yuv Assume YUV hardware conversion
-noaudio Turn off audio
-audiofs %d Use fixed audio frame size
(results are hardware dependant ... try 512)
-audioCachePacket %d Audio cache packet size in samples (default=2048)
-audioMinCache %f Audio cache min size in seconds (default=0.300000)
-audioMaxCache %f Audio cache max size in seconds (default=0.600000)
-audioModule %S Use specific audio module
-audioDevice %S Use specific audio device
-audioRate %f Use specific output audio rate (default=ask hardware)
-audioPrecision %d Use specific output audio precision (default=16)
-audioNice %d Close audio device when not playing
(may cause problems on some hardware) default=0
-audioNoLock %d Do not use hardware audio/video syncronization
(use software instead, default=0)
-audioPreRoll %d Preroll audio on device open (Linux only; default=0)
-audioGlobalOffset %f Global audio offset in seconds
-audioDeviceLatency %f Audio device latency compensation in milliseconds
-bg %S Background pattern (default=black, white, grey18,
grey50, checker, crosshatch)
-formats Show all supported image and movie formats
-apple Use Quicktime and NSImage libraries (on OS X)
-cinalt Use alternate Cineon/DPX readers
-exrcpus %d EXR thread count (default=0)
-exrRGBA EXR Always read as RGBA (default=false)
-exrInherit EXR guess channel inheritance (default=false)
-exrNoOneChannel EXR never use one channel planar images (default=false)
-exrIOMethod %d [%d] EXR I/O Method (0=standard, 1=buffered, 2=unbuffered,
3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
default=1) and optional chunk size (default=61440)
-exrReadWindowIsDisplayWindow
EXR read window is display window (default=false)
-exrReadWindow %d EXR Read Window Method (0=Data, 1=Display,
2=Union, 3=Data inside Display, default=3)
-jpegRGBA Make JPEG four channel RGBA on read
(default=no, use RGB or YUV)
-jpegIOMethod %d [%d] JPEG I/O Method (0=standard, 1=buffered,
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
5=AsyncUnbuffered, default=1) and optional
chunk size (default=61440)
-cinpixel %S Cineon pixel storage (default=RGB8_PLANAR)
-cinchroma Use Cineon chromaticity values
(for default reader only)
-cinIOMethod %d [%d] Cineon I/O Method (0=standard, 1=buffered,
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
5=AsyncUnbuffered, default=1) and optional
chunk size (default=61440)
-dpxpixel %S DPX pixel storage (default=RGB8_PLANAR)
-dpxchroma Use DPX chromaticity values (for default reader only)
-dpxIOMethod %d [%d] DPX I/O Method (0=standard, 1=buffered, 2=unbuffered,
3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
default=1) and optional chunk size (default=61440)
-tgaIOMethod %d [%d] TARGA I/O Method (0=standard, 1=buffered,
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
5=AsyncUnbuffered, default=1)
and optional chunk size (default=61440)
-tiffIOMethod %d [%d] TIFF I/O Method (0=standard, 1=buffered,
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
5=AsyncUnbuffered, default=1) and optional
chunk size (default=61440)
-lic %S Use specific license file
-noPrefs Ignore preferences
-resetPrefs Reset preferences to default values
-qtcss %S Use QT style sheet for UI
-qtstyle %S Use QT style
-qtdesktop %d QT desktop aware, default=1 (on)
-xl Aggressively absorb screen space for large media
-mouse %d Force tablet/stylus events to be treated as a
mouse events, default=0 (off)
-network Start networking
-networkPort %d Port for networking
-networkHost %S Alternate host/address for incoming connections
-networkTag %S Tag to mark automatically saved port file
-networkConnect %S [%d] Start networking and connect to host at port
-networkPerm %d Default network connection permission
(0=Ask, 1=Allow, 2=Deny, default=0)
-reuse %d Try to re-use the current session for
incoming URLs (1 = reuse session,
0 = new session, default = 1)
-nopackages Don't load any packages at startup (for debugging)
-encodeURL Encode the command line as
an rvlink URL, print, and exit
-bakeURL Fully bake the command line as an
rvlink URL, print, and exit
-sendEvent ... Send external events e.g. -sendEvent 'name' 'content'
-flags ... Arbitrary flags (flag, or 'name=value')
for use in Mu code
-debug ... Debug category
-version Show RV version number
-strictlicense Exit rather than consume an rv license if no rvsolo
licenses are available
-prefsPath %S Alternate path to preferences directory
-sleep %d Sleep (in seconds) before starting to
allow attaching debugger
"""

View file

@ -204,21 +204,36 @@ class AppAction(BaseHandler):
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
data = {
"root": os.environ.get("PYPE_STUDIO_PROJECTS_PATH"),
"project": {"name": entity['project']['full_name'],
"code": entity['project']['name']},
"task": entity['name'],
"asset": entity['parent']['name'],
"app": application["application_dir"],
"hierarchy": hierarchy}
try:
anatomy_filled = anatomy.format(data)
# anatomy = anatomy.format(data)
except Exception as e:
self.log.error(
"{0} Error in anatomy.format: {1}".format(__name__, e)
"root": os.environ.get("PYPE_STUDIO_PROJECTS_PATH"),
"project": {
"name": entity['project']['full_name'],
"code": entity['project']['name']
},
"task": entity['name'],
"asset": entity['parent']['name'],
"app": application["application_dir"],
"hierarchy": hierarchy,
}
av_project = database[project_name].find_one({"type": 'project'})
templates = None
if av_project:
work_template = av_project.get('config', {}).get('template', {}).get(
'work', None
)
os.environ["AVALON_WORKDIR"] = anatomy_filled['work']['folder']
work_template = None
try:
work_template = work_template.format(**data)
except Exception:
try:
anatomy = anatomy.format(data)
work_template = anatomy["work"]["path"]
except Exception as e:
self.log.error(
"{0} Error in anatomy.format: {1}".format(__name__, e)
)
os.environ["AVALON_WORKDIR"] = os.path.normpath(work_template)
# collect all parents from the task
parents = []

View file

@ -40,6 +40,9 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "nuke", "inventory")
self = sys.modules[__name__]
self.nLogger = None
if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
# class NukeHandler(Logger.logging.Handler):
# '''
@ -93,13 +96,15 @@ def reload_config():
"{}.templates".format(AVALON_CONFIG),
"{}.nuke.actions".format(AVALON_CONFIG),
"{}.nuke.templates".format(AVALON_CONFIG),
"{}.nuke.menu".format(AVALON_CONFIG)
"{}.nuke.menu".format(AVALON_CONFIG),
"{}.nuke.lib".format(AVALON_CONFIG),
):
log.info("Reloading module: {}...".format(module))
module = importlib.import_module(module)
try:
module = importlib.import_module(module)
reload(module)
except Exception:
except Exception as e:
log.warning("Cannot reload module: {}".format(e))
importlib.reload(module)

View file

@ -1,4 +1,6 @@
import os
import sys
import os
from collections import OrderedDict
from pprint import pprint
from avalon.vendor.Qt import QtGui
@ -6,6 +8,10 @@ from avalon import api, io, lib
import avalon.nuke
import pype.api as pype
import nuke
from .templates import (
get_dataflow,
get_colorspace
)
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
@ -60,21 +66,54 @@ def version_up_script():
nukescripts.script_and_write_nodes_version_up()
def get_render_path(node):
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
"preset": data['avalon']['families']
}
nuke_dataflow_writes = get_dataflow(**data_preset)
nuke_colorspace_writes = get_colorspace(**data_preset)
application = lib.get_application(os.environ["AVALON_APP_NAME"])
data.update({
"application": application,
"nuke_dataflow_writes": nuke_dataflow_writes,
"nuke_colorspace_writes": nuke_colorspace_writes
})
anatomy_filled = format_anatomy(data)
return anatomy_filled.render.path.replace("\\", "/")
def format_anatomy(data):
from .templates import (
get_anatomy
)
file = script_name()
anatomy = get_anatomy()
# TODO: perhaps should be in try!
padding = anatomy.render.padding
version = data.get("version", None)
if not version:
file = script_name()
data["version"] = pype.get_version_from_path(file)
data.update({
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": str(pype.get_task()).lower(),
"family": data["avalon"]["family"],
"project": {"name": pype.get_project_name(),
"code": pype.get_project_code()},
"representation": data["nuke_dataflow_writes"].file_type,
"app": data["application"]["application_dir"],
"hierarchy": pype.get_hierarchy(),
"frame": "#"*padding,
"version": pype.get_version_from_path(file)
"frame": "#" * padding,
})
# log.info("format_anatomy:anatomy: {}".format(anatomy))
@ -86,24 +125,19 @@ def script_name():
def create_write_node(name, data):
from .templates import (
get_dataflow,
get_colorspace
)
nuke_dataflow_writes = get_dataflow(**data)
nuke_colorspace_writes = get_colorspace(**data)
application = lib.get_application(os.environ["AVALON_APP_NAME"])
try:
anatomy_filled = format_anatomy({
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": pype.get_task(),
"family": data["avalon"]["family"],
"project": {"name": pype.get_project_name(),
"code": pype.get_project_code()},
"representation": nuke_dataflow_writes.file_type,
"app": application["application_dir"],
data.update({
"application": application,
"nuke_dataflow_writes": nuke_dataflow_writes,
"nuke_colorspace_writes": nuke_colorspace_writes
})
anatomy_filled = format_anatomy(data)
except Exception as e:
log.error("problem with resolving anatomy tepmlate: {}".format(e))
@ -136,7 +170,6 @@ def create_write_node(name, data):
add_rendering_knobs(instance)
return instance
def add_rendering_knobs(node):
if "render" not in node.knobs():
knob = nuke.Boolean_Knob("render", "Render")
@ -403,3 +436,37 @@ def get_additional_data(container):
]
return {"color": QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
def get_write_node_template_attr(node):
''' Gets all defined data from presets
'''
# get avalon data from node
data = dict()
data['avalon'] = get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
"preset": data['avalon']['families']
}
# get template data
nuke_dataflow_writes = get_dataflow(**data_preset)
nuke_colorspace_writes = get_colorspace(**data_preset)
# collecting correct data
correct_data = OrderedDict({
"file": get_render_path(node)
})
# adding dataflow template
{correct_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
if k not in ["id", "previous"]}
# adding colorspace template
{correct_data.update({k: v})
for k, v in nuke_colorspace_writes.items()}
# fix badly encoded data
return avalon.nuke.lib.fix_data_for_node_create(correct_data)

View file

@ -1,3 +0,0 @@
import nuke
n = nuke.createNode("Constant")
print(n)

View file

@ -38,6 +38,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
version_number = int(assumed_version)
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
family = instance.data['family'].lower()
asset_type = ''

View file

@ -15,7 +15,7 @@ class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
publish the shading network. Same goes for file dependent assets.
"""
label = "Extract Quicktime EXR"
label = "Extract Quicktime"
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
hosts = ["shell"]

View file

@ -5,7 +5,7 @@ try:
except Exception:
pass
log = Logger.getLogger(__name__, "clockify_start")
log = Logger().get_logger(__name__, "clockify_start")
class ClockifyStart(api.Action):

View file

@ -4,7 +4,7 @@ try:
except Exception:
pass
from pype.api import Logger
log = Logger.getLogger(__name__, "clockify_sync")
log = Logger().get_logger(__name__, "clockify_sync")
class ClockifySync(api.Action):

View file

@ -0,0 +1,36 @@
import os
import pyblish.api
import pype.utils
@pyblish.api.log
class RepairNukeWriteNodeVersionAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
import pype.nuke.lib as nukelib
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
node = instance[0]
render_path = nukelib.get_render_path(node)
self.log.info("render_path: {}".format(render_path))
node['file'].setValue(render_path.replace("\\", "/"))
class ValidateVersionMatch(pyblish.api.InstancePlugin):
"""Checks if write version matches workfile version"""
label = "Validate Version Match"
order = pyblish.api.ValidatorOrder
actions = [RepairNukeWriteNodeVersionAction]
hosts = ["nuke"]
families = ['write']
def process(self, instance):
assert instance.data['version'] == instance.context.data['version'], "\
Version in write doesn't match version of the workfile"

View file

@ -1,56 +0,0 @@
import os
import pyblish.api
import pype.utils
@pyblish.api.log
class RepairNukeWriteNodeAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
if "create_directories" in instance[0].knobs():
instance[0]['create_directories'].setValue(True)
else:
path, file = os.path.split(instance[0].data['outputFilename'])
self.log.info(path)
if not os.path.exists(path):
os.makedirs(path)
if "metadata" in instance[0].knobs().keys():
instance[0]["metadata"].setValue("all metadata")
class ValidateNukeWriteNode(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder
optional = True
families = ["write.render"]
label = "Write Node"
actions = [RepairNukeWriteNodeAction]
hosts = ["nuke"]
def process(self, instance):
# Validate output directory exists, if not creating directories.
# The existence of the knob is queried because previous version
# of Nuke did not have this feature.
if "create_directories" in instance[0].knobs():
msg = "Use Create Directories"
assert instance[0].knobs()['create_directories'].value() is True, msg
else:
path, file = os.path.split(instance.data['outputFilename'])
msg = "Output directory doesn't exist: \"{0}\"".format(path)
assert os.path.exists(path), msg
# Validate metadata knob
if "metadata" in instance[0].knobs().keys():
msg = "Metadata needs to be set to \"all metadata\"."
assert instance[0]["metadata"].value() == "all metadata", msg

View file

@ -26,7 +26,7 @@ class CrateWriteRender(avalon.nuke.Creator):
name = "WriteRender"
label = "Create Write Render"
hosts = ["nuke"]
family = "write"
family = "{}_write".format(preset)
families = preset
icon = "sign-out"
@ -69,7 +69,7 @@ class CrateWritePrerender(avalon.nuke.Creator):
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
family = "write"
family = "{}_write".format(preset)
families = preset
icon = "sign-out"
@ -112,7 +112,7 @@ class CrateWriteStill(avalon.nuke.Creator):
name = "WriteStill"
label = "Create Write Still"
hosts = ["nuke"]
family = "write"
family = "{}_write".format(preset)
families = preset
icon = "image"

View file

@ -8,7 +8,7 @@ class ValidateScript(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder + 0.1
families = ["nukescript"]
label = "Check nukescript settings"
label = "Check script settings"
hosts = ["nuke"]
def process(self, instance):

View file

@ -1,15 +0,0 @@
import pyblish.api
class ValidateVersionMatch(pyblish.api.InstancePlugin):
"""Checks if write version matches workfile version"""
label = "Validate Version Match"
order = pyblish.api.ValidatorOrder
hosts = ["nuke"]
families = ['render.frames']
def process(self, instance):
assert instance.data['version'] == instance.context.data['version'], "\
Version in write doesn't match version of the workfile"

View file

@ -0,0 +1,69 @@
import os
import pyblish.api
import pype.utils
import pype.nuke.lib as nukelib
import avalon.nuke
@pyblish.api.log
class RepairNukeWriteNodeAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
node = instance[0]
correct_data = nukelib.get_write_node_template_attr(node)
for k, v in correct_data.items():
node[k].setValue(v)
self.log.info("Node attributes were fixed")
class ValidateNukeWriteNode(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder
optional = True
families = ["write"]
label = "Write Node"
actions = [RepairNukeWriteNodeAction]
hosts = ["nuke"]
def process(self, instance):
node = instance[0]
correct_data = nukelib.get_write_node_template_attr(node)
check = []
for k, v in correct_data.items():
if k is 'file':
padding = len(v.split('#'))
ref_path = avalon.nuke.lib.get_node_path(v, padding)
n_path = avalon.nuke.lib.get_node_path(node[k].value(), padding)
isnt = False
for i, p in enumerate(ref_path):
if str(n_path[i]) not in str(p):
if not isnt:
isnt = True
else:
continue
if isnt:
check.append([k, v, node[k].value()])
else:
if str(node[k].value()) not in str(v):
check.append([k, v, node[k].value()])
self.log.info(check)
msg = "Node's attribute `{0}` is not correct!\n" \
"\nCorrect: `{1}` \n\nWrong: `{2}` \n\n"
if check:
print_msg = ""
for item in check:
print_msg += msg.format(item[0], item[1], item[2])
print_msg += "`RMB` click to the validator and `A` to fix!"
assert not check, print_msg