mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into 3.0/poetry
This commit is contained in:
commit
1e6f699a03
205 changed files with 781 additions and 874 deletions
|
|
@ -20,16 +20,23 @@ class AfterEffectsPrelaunchHook(PreLaunchHook):
|
|||
while self.launch_context.launch_args:
|
||||
remainders.append(self.launch_context.launch_args.pop(0))
|
||||
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
if not os.path.exists(workfile_path):
|
||||
workfile_path = ""
|
||||
|
||||
new_launch_args = [
|
||||
self.python_executable(),
|
||||
"-c",
|
||||
(
|
||||
"import avalon.aftereffects;"
|
||||
"avalon.aftereffects.launch(\"{}\")"
|
||||
).format(aftereffects_executable)
|
||||
"avalon.aftereffects.launch(\"{}\", \"{}\")"
|
||||
).format(
|
||||
aftereffects_executable.replace("\\", "\\\\"),
|
||||
workfile_path.replace("\\", "\\\\")
|
||||
)
|
||||
]
|
||||
|
||||
# Append as whole list as these areguments should not be separated
|
||||
# Append as whole list as these arguments should not be separated
|
||||
self.launch_context.launch_args.append(new_launch_args)
|
||||
|
||||
if remainders:
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class HarmonyPrelaunchHook(PreLaunchHook):
|
|||
(
|
||||
"import avalon.harmony;"
|
||||
"avalon.harmony.launch(\"{}\")"
|
||||
).format(harmony_executable.replace("\\", "/"))
|
||||
).format(harmony_executable.replace("\\", "\\\\"))
|
||||
]
|
||||
|
||||
# Append as whole list as these areguments should not be separated
|
||||
|
|
|
|||
|
|
@ -20,13 +20,20 @@ class PhotoshopPrelaunchHook(PreLaunchHook):
|
|||
while self.launch_context.launch_args:
|
||||
remainders.append(self.launch_context.launch_args.pop(0))
|
||||
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
if not os.path.exists(workfile_path):
|
||||
workfile_path = ""
|
||||
|
||||
new_launch_args = [
|
||||
self.python_executable(),
|
||||
"-c",
|
||||
(
|
||||
"import avalon.photoshop;"
|
||||
"avalon.photoshop.launch(\"{}\")"
|
||||
).format(photoshop_executable)
|
||||
"avalon.photoshop.launch(\"{}\", \"{}\")"
|
||||
).format(
|
||||
photoshop_executable.replace("\\", "\\\\"),
|
||||
workfile_path.replace("\\", "\\\\")
|
||||
)
|
||||
]
|
||||
|
||||
# Append as whole list as these areguments should not be separated
|
||||
|
|
|
|||
|
|
@ -1,131 +0,0 @@
|
|||
import os
|
||||
import pype.lib
|
||||
from pype.api import Logger, Anatomy
|
||||
import shutil
|
||||
import getpass
|
||||
import avalon.api
|
||||
|
||||
|
||||
class PhotoshopPrelaunch(pype.lib.PypeHook):
|
||||
"""This hook will check for the existence of PyWin
|
||||
|
||||
PyWin is a requirement for the Photoshop integration.
|
||||
"""
|
||||
project_code = None
|
||||
host_name = "photoshop"
|
||||
|
||||
def __init__(self, logger=None):
|
||||
if not logger:
|
||||
self.log = Logger().get_logger(self.__class__.__name__)
|
||||
else:
|
||||
self.log = logger
|
||||
|
||||
self.signature = "( {} )".format(self.__class__.__name__)
|
||||
|
||||
def execute(self, *args, env: dict = None) -> bool:
|
||||
output = pype.lib._subprocess(["pip", "install", "pywin32==227"])
|
||||
self.log.info(output)
|
||||
|
||||
workfile_path = self.get_workfile_plath(env, self.host_name)
|
||||
|
||||
# adding compulsory environment var for openting file
|
||||
env["PYPE_WORKFILE_PATH"] = workfile_path
|
||||
|
||||
return True
|
||||
|
||||
def get_anatomy_filled(self, workdir, project_name, asset_name,
|
||||
task_name, host_name, extension):
|
||||
dbcon = avalon.api.AvalonMongoDB()
|
||||
dbcon.install()
|
||||
dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
project_document = dbcon.find_one({"type": "project"})
|
||||
asset_document = dbcon.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
dbcon.uninstall()
|
||||
|
||||
asset_doc_parents = asset_document["data"].get("parents")
|
||||
hierarchy = "/".join(asset_doc_parents)
|
||||
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_document["name"],
|
||||
"code": project_document["data"].get("code")
|
||||
},
|
||||
"task": task_name,
|
||||
"asset": asset_name,
|
||||
"app": host_name,
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
anatomy = Anatomy(project_name)
|
||||
file_template = anatomy.templates["work"]["file"]
|
||||
data.update({
|
||||
"version": 1,
|
||||
"user": os.environ.get("PYPE_USERNAME") or getpass.getuser(),
|
||||
"ext": extension
|
||||
})
|
||||
|
||||
return avalon.api.last_workfile(
|
||||
workdir, file_template, data,
|
||||
avalon.api.HOST_WORKFILE_EXTENSIONS[host_name], True
|
||||
)
|
||||
|
||||
def get_workfile_plath(self, env, host_name):
|
||||
# get context variables
|
||||
project_name = env["AVALON_PROJECT"]
|
||||
asset_name = env["AVALON_ASSET"]
|
||||
task_name = env["AVALON_TASK"]
|
||||
workdir = env["AVALON_WORKDIR"]
|
||||
extension = avalon.api.HOST_WORKFILE_EXTENSIONS[host_name][0]
|
||||
template_env_key = "{}_TEMPLATE".format(host_name.upper())
|
||||
|
||||
# get workfile path
|
||||
workfile_path = self.get_anatomy_filled(
|
||||
workdir, project_name, asset_name, task_name, host_name, extension)
|
||||
|
||||
# create workdir if doesn't exist
|
||||
os.makedirs(workdir, exist_ok=True)
|
||||
self.log.info("Work dir is: `{}`".format(workdir))
|
||||
|
||||
# get last version of workfile
|
||||
workfile_last = env.get("AVALON_LAST_WORKFILE")
|
||||
self.log.debug("_ workfile_last: `{}`".format(workfile_last))
|
||||
|
||||
if workfile_last:
|
||||
workfile = workfile_last
|
||||
workfile_path = os.path.join(workdir, workfile)
|
||||
|
||||
# copy workfile from template if doesnt exist any on path
|
||||
if not os.path.isfile(workfile_path):
|
||||
# try to get path from environment or use default
|
||||
# from `pype.hosts.<host_name>` dir
|
||||
template_path = env.get(template_env_key) or os.path.join(
|
||||
env.get("PYPE_MODULE_ROOT"),
|
||||
"pype/hosts/{}/template{}".format(host_name, extension)
|
||||
)
|
||||
|
||||
# try to get template from project config folder
|
||||
proj_config_path = os.path.join(
|
||||
env["PYPE_PROJECT_CONFIGS"], project_name)
|
||||
if os.path.exists(proj_config_path):
|
||||
|
||||
template_file = None
|
||||
for f in os.listdir(proj_config_path):
|
||||
if extension in os.path.splitext(f):
|
||||
template_file = f
|
||||
|
||||
if template_file:
|
||||
template_path = os.path.join(
|
||||
proj_config_path, template_file)
|
||||
self.log.info(
|
||||
"Creating workfile from template: `{}`".format(template_path))
|
||||
|
||||
# copy template to new destinantion
|
||||
shutil.copy2(
|
||||
os.path.normpath(template_path),
|
||||
os.path.normpath(workfile_path)
|
||||
)
|
||||
|
||||
self.log.info("Workfile to open: `{}`".format(workfile_path))
|
||||
return workfile_path
|
||||
BIN
pype/hosts/aftereffects/template.aep
Normal file
BIN
pype/hosts/aftereffects/template.aep
Normal file
Binary file not shown.
|
|
@ -1 +0,0 @@
|
|||
kwargs = None
|
||||
1
pype/hosts/celaction/api/__init__.py
Normal file
1
pype/hosts/celaction/api/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
kwargs = None
|
||||
|
|
@ -11,18 +11,19 @@ import pyblish.util
|
|||
|
||||
from pype.api import Logger
|
||||
import pype
|
||||
from pype.hosts import celaction
|
||||
import pype.hosts.celaction
|
||||
from pype.hosts.celaction import api as celaction
|
||||
|
||||
log = Logger().get_logger("Celaction_cli_publisher")
|
||||
|
||||
publish_host = "celaction"
|
||||
|
||||
PUBLISH_PATH = os.path.join(pype.PLUGINS_DIR, publish_host, "publish")
|
||||
|
||||
PUBLISH_PATHS = [
|
||||
PUBLISH_PATH,
|
||||
os.path.join(pype.PLUGINS_DIR, "ftrack", "publish")
|
||||
]
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.celaction.__file__))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
|
||||
def cli():
|
||||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import shutil
|
||||
import winreg
|
||||
from pype.lib import PreLaunchHook
|
||||
from pype.hosts import celaction
|
||||
from pype.hosts.celaction import api as celaction
|
||||
|
||||
|
||||
class CelactionPrelaunchHook(PreLaunchHook):
|
||||
|
|
@ -37,7 +37,7 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
"Software\\CelAction\\CelAction2D\\User Settings", 0,
|
||||
winreg.KEY_ALL_ACCESS)
|
||||
|
||||
# TODO: change to root path and pyblish standalone to premiere way
|
||||
# TODO: change to pype executable
|
||||
pype_root_path = os.getenv("PYPE_SETUP_PATH")
|
||||
path = os.path.join(pype_root_path, "pype.bat")
|
||||
|
||||
|
|
@ -94,11 +94,12 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
if not os.path.exists(workfile_path):
|
||||
# TODO add ability to set different template workfile path via
|
||||
# settings
|
||||
pype_celaction_dir = os.path.dirname(
|
||||
pype_celaction_dir = os.path.dirname(os.path.dirname(
|
||||
os.path.abspath(celaction.__file__)
|
||||
)
|
||||
))
|
||||
template_path = os.path.join(
|
||||
pype_celaction_dir,
|
||||
"resources",
|
||||
"celaction_template_scene.scn"
|
||||
)
|
||||
|
||||
|
|
@ -1,126 +1,126 @@
|
|||
import os
|
||||
import collections
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
||||
|
||||
label = "Colect Audio for publishing"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
self.log.info('Collecting Audio Data')
|
||||
asset_doc = context.data["assetEntity"]
|
||||
|
||||
# get all available representations
|
||||
subsets = self.get_subsets(
|
||||
asset_doc,
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
self.log.info(f"subsets is: {pformat(subsets)}")
|
||||
|
||||
if not subsets.get("audioMain"):
|
||||
raise AttributeError("`audioMain` subset does not exist")
|
||||
|
||||
reprs = subsets.get("audioMain", {}).get("representations", [])
|
||||
self.log.info(f"reprs is: {pformat(reprs)}")
|
||||
|
||||
repr = next((r for r in reprs), None)
|
||||
if not repr:
|
||||
raise "Missing `audioMain` representation"
|
||||
self.log.info(f"represetation is: {repr}")
|
||||
|
||||
audio_file = repr.get('data', {}).get('path', "")
|
||||
|
||||
if os.path.exists(audio_file):
|
||||
context.data["audioFile"] = audio_file
|
||||
self.log.info(
|
||||
'audio_file: {}, has been added to context'.format(audio_file))
|
||||
else:
|
||||
self.log.warning("Couldn't find any audio file on Ftrack.")
|
||||
|
||||
def get_subsets(self, asset_doc, representations):
|
||||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version
|
||||
and subsets. Version could be specified with number. Representation
|
||||
can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_doct (dict): Asset (shot) mongo document
|
||||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
subset_docs = io.find({
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
# Collect all subset ids
|
||||
subset_ids = [
|
||||
subset_doc["_id"]
|
||||
for subset_doc in subset_docs
|
||||
]
|
||||
|
||||
# Check if we found anything
|
||||
assert subset_ids, (
|
||||
"No subsets found. Check correct filter. "
|
||||
"Try this for start `r'.*'`: asset: `{}`"
|
||||
).format(asset_doc["name"])
|
||||
|
||||
# Last version aggregation
|
||||
pipeline = [
|
||||
# Find all versions of those subsets
|
||||
{"$match": {
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_ids}
|
||||
}},
|
||||
# Sorting versions all together
|
||||
{"$sort": {"name": 1}},
|
||||
# Group them by "parent", but only take the last
|
||||
{"$group": {
|
||||
"_id": "$parent",
|
||||
"_version_id": {"$last": "$_id"},
|
||||
"name": {"$last": "$name"}
|
||||
}}
|
||||
]
|
||||
last_versions_by_subset_id = dict()
|
||||
for doc in io.aggregate(pipeline):
|
||||
doc["parent"] = doc["_id"]
|
||||
doc["_id"] = doc.pop("_version_id")
|
||||
last_versions_by_subset_id[doc["parent"]] = doc
|
||||
|
||||
version_docs_by_id = {}
|
||||
for version_doc in last_versions_by_subset_id.values():
|
||||
version_docs_by_id[version_doc["_id"]] = version_doc
|
||||
|
||||
repre_docs = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": list(version_docs_by_id.keys())},
|
||||
"name": {"$in": representations}
|
||||
})
|
||||
repre_docs_by_version_id = collections.defaultdict(list)
|
||||
for repre_doc in repre_docs:
|
||||
version_id = repre_doc["parent"]
|
||||
repre_docs_by_version_id[version_id].append(repre_doc)
|
||||
|
||||
output_dict = {}
|
||||
for version_id, repre_docs in repre_docs_by_version_id.items():
|
||||
version_doc = version_docs_by_id[version_id]
|
||||
subset_id = version_doc["parent"]
|
||||
subset_doc = last_versions_by_subset_id[subset_id]
|
||||
# Store queried docs by subset name
|
||||
output_dict[subset_doc["name"]] = {
|
||||
"representations": repre_docs,
|
||||
"version": version_doc
|
||||
}
|
||||
|
||||
return output_dict
|
||||
import os
|
||||
import collections
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
||||
|
||||
label = "Colect Audio for publishing"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
self.log.info('Collecting Audio Data')
|
||||
asset_doc = context.data["assetEntity"]
|
||||
|
||||
# get all available representations
|
||||
subsets = self.get_subsets(
|
||||
asset_doc,
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
self.log.info(f"subsets is: {pformat(subsets)}")
|
||||
|
||||
if not subsets.get("audioMain"):
|
||||
raise AttributeError("`audioMain` subset does not exist")
|
||||
|
||||
reprs = subsets.get("audioMain", {}).get("representations", [])
|
||||
self.log.info(f"reprs is: {pformat(reprs)}")
|
||||
|
||||
repr = next((r for r in reprs), None)
|
||||
if not repr:
|
||||
raise "Missing `audioMain` representation"
|
||||
self.log.info(f"represetation is: {repr}")
|
||||
|
||||
audio_file = repr.get('data', {}).get('path', "")
|
||||
|
||||
if os.path.exists(audio_file):
|
||||
context.data["audioFile"] = audio_file
|
||||
self.log.info(
|
||||
'audio_file: {}, has been added to context'.format(audio_file))
|
||||
else:
|
||||
self.log.warning("Couldn't find any audio file on Ftrack.")
|
||||
|
||||
def get_subsets(self, asset_doc, representations):
|
||||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version
|
||||
and subsets. Version could be specified with number. Representation
|
||||
can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_doct (dict): Asset (shot) mongo document
|
||||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
subset_docs = io.find({
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
# Collect all subset ids
|
||||
subset_ids = [
|
||||
subset_doc["_id"]
|
||||
for subset_doc in subset_docs
|
||||
]
|
||||
|
||||
# Check if we found anything
|
||||
assert subset_ids, (
|
||||
"No subsets found. Check correct filter. "
|
||||
"Try this for start `r'.*'`: asset: `{}`"
|
||||
).format(asset_doc["name"])
|
||||
|
||||
# Last version aggregation
|
||||
pipeline = [
|
||||
# Find all versions of those subsets
|
||||
{"$match": {
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_ids}
|
||||
}},
|
||||
# Sorting versions all together
|
||||
{"$sort": {"name": 1}},
|
||||
# Group them by "parent", but only take the last
|
||||
{"$group": {
|
||||
"_id": "$parent",
|
||||
"_version_id": {"$last": "$_id"},
|
||||
"name": {"$last": "$name"}
|
||||
}}
|
||||
]
|
||||
last_versions_by_subset_id = dict()
|
||||
for doc in io.aggregate(pipeline):
|
||||
doc["parent"] = doc["_id"]
|
||||
doc["_id"] = doc.pop("_version_id")
|
||||
last_versions_by_subset_id[doc["parent"]] = doc
|
||||
|
||||
version_docs_by_id = {}
|
||||
for version_doc in last_versions_by_subset_id.values():
|
||||
version_docs_by_id[version_doc["_id"]] = version_doc
|
||||
|
||||
repre_docs = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": list(version_docs_by_id.keys())},
|
||||
"name": {"$in": representations}
|
||||
})
|
||||
repre_docs_by_version_id = collections.defaultdict(list)
|
||||
for repre_doc in repre_docs:
|
||||
version_id = repre_doc["parent"]
|
||||
repre_docs_by_version_id[version_id].append(repre_doc)
|
||||
|
||||
output_dict = {}
|
||||
for version_id, repre_docs in repre_docs_by_version_id.items():
|
||||
version_doc = version_docs_by_id[version_id]
|
||||
subset_id = version_doc["parent"]
|
||||
subset_doc = last_versions_by_subset_id[subset_id]
|
||||
# Store queried docs by subset name
|
||||
output_dict[subset_doc["name"]] = {
|
||||
"representations": repre_docs,
|
||||
"version": version_doc
|
||||
}
|
||||
|
||||
return output_dict
|
||||
|
|
@ -1,23 +1,23 @@
|
|||
import pyblish.api
|
||||
from pype.hosts import celaction
|
||||
|
||||
|
||||
class CollectCelactionCliKwargs(pyblish.api.Collector):
|
||||
""" Collects all keyword arguments passed from the terminal """
|
||||
|
||||
label = "Collect Celaction Cli Kwargs"
|
||||
order = pyblish.api.Collector.order - 0.1
|
||||
|
||||
def process(self, context):
|
||||
kwargs = celaction.kwargs.copy()
|
||||
|
||||
self.log.info("Storing kwargs: %s" % kwargs)
|
||||
context.set_data("kwargs", kwargs)
|
||||
|
||||
# get kwargs onto context data as keys with values
|
||||
for k, v in kwargs.items():
|
||||
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
|
||||
if k in ["frameStart", "frameEnd"]:
|
||||
context.data[k] = kwargs[k] = int(v)
|
||||
else:
|
||||
context.data[k] = v
|
||||
import pyblish.api
|
||||
from pype.hosts.celaction import api as celaction
|
||||
|
||||
|
||||
class CollectCelactionCliKwargs(pyblish.api.Collector):
|
||||
""" Collects all keyword arguments passed from the terminal """
|
||||
|
||||
label = "Collect Celaction Cli Kwargs"
|
||||
order = pyblish.api.Collector.order - 0.1
|
||||
|
||||
def process(self, context):
|
||||
kwargs = celaction.kwargs.copy()
|
||||
|
||||
self.log.info("Storing kwargs: %s" % kwargs)
|
||||
context.set_data("kwargs", kwargs)
|
||||
|
||||
# get kwargs onto context data as keys with values
|
||||
for k, v in kwargs.items():
|
||||
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
|
||||
if k in ["frameStart", "frameEnd"]:
|
||||
context.data[k] = kwargs[k] = int(v)
|
||||
else:
|
||||
context.data[k] = v
|
||||
|
|
@ -1,96 +1,96 @@
|
|||
import os
|
||||
from avalon import api
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
||||
""" Adds the celaction render instances """
|
||||
|
||||
label = "Collect Celaction Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = api.Session["AVALON_TASK"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
version = context.data["version"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
||||
shared_instance_data = {
|
||||
"asset": asset_entity["name"],
|
||||
"frameStart": asset_entity["data"]["frameStart"],
|
||||
"frameEnd": asset_entity["data"]["frameEnd"],
|
||||
"handleStart": asset_entity["data"]["handleStart"],
|
||||
"handleEnd": asset_entity["data"]["handleEnd"],
|
||||
"fps": asset_entity["data"]["fps"],
|
||||
"resolutionWidth": asset_entity["data"].get(
|
||||
"resolutionWidth",
|
||||
project_entity["data"]["resolutionWidth"]),
|
||||
"resolutionHeight": asset_entity["data"].get(
|
||||
"resolutionHeight",
|
||||
project_entity["data"]["resolutionHeight"]),
|
||||
"pixelAspect": 1,
|
||||
"step": 1,
|
||||
"version": version
|
||||
}
|
||||
|
||||
celaction_kwargs = context.data.get("kwargs", {})
|
||||
|
||||
if celaction_kwargs:
|
||||
shared_instance_data.update(celaction_kwargs)
|
||||
|
||||
# workfile instance
|
||||
family = "workfile"
|
||||
subset = family + task.capitalize()
|
||||
# Create instance
|
||||
instance = context.create_instance(subset)
|
||||
|
||||
# creating instance data
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"label": scene_file,
|
||||
"family": family,
|
||||
"families": [family, "ftrack"],
|
||||
"representations": list()
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
# creating representation
|
||||
representation = {
|
||||
'name': 'scn',
|
||||
'ext': 'scn',
|
||||
'files': scene_file,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info('Publishing Celaction workfile')
|
||||
|
||||
# render instance
|
||||
family = "render.farm"
|
||||
subset = f"render{task}Main"
|
||||
instance = context.create_instance(name=subset)
|
||||
# getting instance state
|
||||
instance.data["publish"] = True
|
||||
|
||||
# add assetEntity data into instance
|
||||
instance.data.update({
|
||||
"label": "{} - farm".format(subset),
|
||||
"family": family,
|
||||
"families": [family],
|
||||
"subset": subset
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
self.log.info('Publishing Celaction render instance')
|
||||
self.log.debug(f"Instance data: `{instance.data}`")
|
||||
|
||||
for i in context:
|
||||
self.log.debug(f"{i.data['families']}")
|
||||
import os
|
||||
from avalon import api
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
||||
""" Adds the celaction render instances """
|
||||
|
||||
label = "Collect Celaction Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = api.Session["AVALON_TASK"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
version = context.data["version"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
||||
shared_instance_data = {
|
||||
"asset": asset_entity["name"],
|
||||
"frameStart": asset_entity["data"]["frameStart"],
|
||||
"frameEnd": asset_entity["data"]["frameEnd"],
|
||||
"handleStart": asset_entity["data"]["handleStart"],
|
||||
"handleEnd": asset_entity["data"]["handleEnd"],
|
||||
"fps": asset_entity["data"]["fps"],
|
||||
"resolutionWidth": asset_entity["data"].get(
|
||||
"resolutionWidth",
|
||||
project_entity["data"]["resolutionWidth"]),
|
||||
"resolutionHeight": asset_entity["data"].get(
|
||||
"resolutionHeight",
|
||||
project_entity["data"]["resolutionHeight"]),
|
||||
"pixelAspect": 1,
|
||||
"step": 1,
|
||||
"version": version
|
||||
}
|
||||
|
||||
celaction_kwargs = context.data.get("kwargs", {})
|
||||
|
||||
if celaction_kwargs:
|
||||
shared_instance_data.update(celaction_kwargs)
|
||||
|
||||
# workfile instance
|
||||
family = "workfile"
|
||||
subset = family + task.capitalize()
|
||||
# Create instance
|
||||
instance = context.create_instance(subset)
|
||||
|
||||
# creating instance data
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"label": scene_file,
|
||||
"family": family,
|
||||
"families": [family, "ftrack"],
|
||||
"representations": list()
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
# creating representation
|
||||
representation = {
|
||||
'name': 'scn',
|
||||
'ext': 'scn',
|
||||
'files': scene_file,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info('Publishing Celaction workfile')
|
||||
|
||||
# render instance
|
||||
family = "render.farm"
|
||||
subset = f"render{task}Main"
|
||||
instance = context.create_instance(name=subset)
|
||||
# getting instance state
|
||||
instance.data["publish"] = True
|
||||
|
||||
# add assetEntity data into instance
|
||||
instance.data.update({
|
||||
"label": "{} - farm".format(subset),
|
||||
"family": family,
|
||||
"families": [family],
|
||||
"subset": subset
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
self.log.info('Publishing Celaction render instance')
|
||||
self.log.debug(f"Instance data: `{instance.data}`")
|
||||
|
||||
for i in context:
|
||||
self.log.debug(f"{i.data['families']}")
|
||||
|
|
@ -1,20 +1,20 @@
|
|||
import shutil
|
||||
import pype
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class VersionUpScene(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.IntegratorOrder + 0.5
|
||||
label = 'Version Up Scene'
|
||||
families = ['workfile']
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data.get('currentFile')
|
||||
v_up = pype.lib.version_up(current_file)
|
||||
self.log.debug('Current file is: {}'.format(current_file))
|
||||
self.log.debug('Version up: {}'.format(v_up))
|
||||
|
||||
shutil.copy2(current_file, v_up)
|
||||
self.log.info('Scene saved into new version: {}'.format(v_up))
|
||||
import shutil
|
||||
import pype
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class VersionUpScene(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.IntegratorOrder + 0.5
|
||||
label = 'Version Up Scene'
|
||||
families = ['workfile']
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data.get('currentFile')
|
||||
v_up = pype.lib.version_up(current_file)
|
||||
self.log.debug('Current file is: {}'.format(current_file))
|
||||
self.log.debug('Version up: {}'.format(v_up))
|
||||
|
||||
shutil.copy2(current_file, v_up)
|
||||
self.log.info('Scene saved into new version: {}'.format(v_up))
|
||||
|
|
@ -1,217 +0,0 @@
|
|||
import os
|
||||
import logging
|
||||
import weakref
|
||||
|
||||
from maya import utils, cmds
|
||||
|
||||
from avalon import api as avalon, pipeline, maya
|
||||
from avalon.maya.pipeline import IS_HEADLESS
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ...lib import any_outdated
|
||||
from pype import PLUGINS_DIR
|
||||
|
||||
from . import menu
|
||||
from . import lib
|
||||
|
||||
log = logging.getLogger("pype.hosts.maya")
|
||||
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "maya", "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "maya", "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "maya", "create")
|
||||
|
||||
|
||||
def install():
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
log.info(PUBLISH_PATH)
|
||||
menu.install()
|
||||
|
||||
log.info("Installing callbacks ... ")
|
||||
avalon.on("init", on_init)
|
||||
|
||||
# Callbacks below are not required for headless mode, the `init` however
|
||||
# is important to load referenced Alembics correctly at rendertime.
|
||||
if IS_HEADLESS:
|
||||
log.info("Running in headless mode, skipping Colorbleed Maya "
|
||||
"save/open/new callback installation..")
|
||||
return
|
||||
|
||||
avalon.on("save", on_save)
|
||||
avalon.on("open", on_open)
|
||||
avalon.on("new", on_new)
|
||||
avalon.before("save", on_before_save)
|
||||
|
||||
log.info("Overriding existing event 'taskChanged'")
|
||||
override_event("taskChanged", on_task_changed)
|
||||
|
||||
log.info("Setting default family states for loader..")
|
||||
avalon.data["familiesStateToggled"] = ["imagesequence"]
|
||||
|
||||
|
||||
def uninstall():
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
|
||||
menu.uninstall()
|
||||
|
||||
|
||||
def override_event(event, callback):
|
||||
"""
|
||||
Override existing event callback
|
||||
Args:
|
||||
event (str): name of the event
|
||||
callback (function): callback to be triggered
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
ref = weakref.WeakSet()
|
||||
ref.add(callback)
|
||||
|
||||
pipeline._registered_event_handlers[event] = ref
|
||||
|
||||
|
||||
def on_init(_):
|
||||
avalon.logger.info("Running callback on init..")
|
||||
|
||||
def safe_deferred(fn):
|
||||
"""Execute deferred the function in a try-except"""
|
||||
|
||||
def _fn():
|
||||
"""safely call in deferred callback"""
|
||||
try:
|
||||
fn()
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
try:
|
||||
utils.executeDeferred(_fn)
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
# Force load Alembic so referenced alembics
|
||||
# work correctly on scene open
|
||||
cmds.loadPlugin("AbcImport", quiet=True)
|
||||
cmds.loadPlugin("AbcExport", quiet=True)
|
||||
|
||||
# Force load objExport plug-in (requested by artists)
|
||||
cmds.loadPlugin("objExport", quiet=True)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(launch_workfiles_app)
|
||||
|
||||
if not IS_HEADLESS:
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
def launch_workfiles_app(*args):
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
|
||||
|
||||
def on_before_save(return_code, _):
|
||||
"""Run validation for scene's FPS prior to saving"""
|
||||
return lib.validate_fps()
|
||||
|
||||
|
||||
def on_save(_):
|
||||
"""Automatically add IDs to new nodes
|
||||
|
||||
Any transform of a mesh, without an existing ID, is given one
|
||||
automatically on file save.
|
||||
"""
|
||||
|
||||
avalon.logger.info("Running callback on save..")
|
||||
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Generate ids of the current context on nodes in the scene
|
||||
nodes = lib.get_id_required_nodes(referenced_nodes=False)
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def on_open(_):
|
||||
"""On scene open let's assume the containers have changed."""
|
||||
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
from ...widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
lib.validate_fps()
|
||||
lib.fix_incompatible_containers()
|
||||
|
||||
if any_outdated():
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
else:
|
||||
|
||||
# Show outdated pop-up
|
||||
def _on_show_inventory():
|
||||
import avalon.tools.sceneinventory as tool
|
||||
tool.show(parent=parent)
|
||||
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setWindowTitle("Maya scene has outdated content")
|
||||
dialog.setMessage("There are outdated containers in "
|
||||
"your Maya scene.")
|
||||
dialog.on_show.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
|
||||
|
||||
def on_new(_):
|
||||
"""Set project resolution and fps when create a new file"""
|
||||
avalon.logger.info("Running callback on new..")
|
||||
with maya.suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya import lib;lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
|
||||
def on_task_changed(*args):
|
||||
"""Wrapped function of app initialize and maya's on task changed"""
|
||||
# Run
|
||||
maya.pipeline._on_task_changed()
|
||||
with maya.suspended_refresh():
|
||||
lib.set_context_settings()
|
||||
lib.update_content_on_context_change()
|
||||
|
||||
lib.show_message("Context was changed",
|
||||
("Context was changed to {}".format(
|
||||
avalon.Session["AVALON_ASSET"])))
|
||||
226
pype/hosts/maya/api/__init__.py
Normal file
226
pype/hosts/maya/api/__init__.py
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
import os
|
||||
import logging
|
||||
import weakref
|
||||
|
||||
from maya import utils, cmds
|
||||
|
||||
from avalon import api as avalon
|
||||
from avalon import pipeline
|
||||
from avalon.maya import suspended_refresh
|
||||
from avalon.maya.pipeline import IS_HEADLESS, _on_task_changed
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
from pype.lib import any_outdated
|
||||
import pype.hosts.maya
|
||||
from . import menu, lib
|
||||
|
||||
log = logging.getLogger("pype.hosts.maya")
|
||||
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.maya.__file__))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
|
||||
def install():
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
log.info(PUBLISH_PATH)
|
||||
menu.install()
|
||||
|
||||
log.info("Installing callbacks ... ")
|
||||
avalon.on("init", on_init)
|
||||
|
||||
# Callbacks below are not required for headless mode, the `init` however
|
||||
# is important to load referenced Alembics correctly at rendertime.
|
||||
if IS_HEADLESS:
|
||||
log.info("Running in headless mode, skipping Maya "
|
||||
"save/open/new callback installation..")
|
||||
return
|
||||
|
||||
avalon.on("save", on_save)
|
||||
avalon.on("open", on_open)
|
||||
avalon.on("new", on_new)
|
||||
avalon.before("save", on_before_save)
|
||||
|
||||
log.info("Overriding existing event 'taskChanged'")
|
||||
override_event("taskChanged", on_task_changed)
|
||||
|
||||
log.info("Setting default family states for loader..")
|
||||
avalon.data["familiesStateToggled"] = ["imagesequence"]
|
||||
|
||||
|
||||
def uninstall():
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
|
||||
menu.uninstall()
|
||||
|
||||
|
||||
def override_event(event, callback):
|
||||
"""
|
||||
Override existing event callback
|
||||
Args:
|
||||
event (str): name of the event
|
||||
callback (function): callback to be triggered
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
ref = weakref.WeakSet()
|
||||
ref.add(callback)
|
||||
|
||||
pipeline._registered_event_handlers[event] = ref
|
||||
|
||||
|
||||
def on_init(_):
|
||||
avalon.logger.info("Running callback on init..")
|
||||
|
||||
def safe_deferred(fn):
|
||||
"""Execute deferred the function in a try-except"""
|
||||
|
||||
def _fn():
|
||||
"""safely call in deferred callback"""
|
||||
try:
|
||||
fn()
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
try:
|
||||
utils.executeDeferred(_fn)
|
||||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
# Force load Alembic so referenced alembics
|
||||
# work correctly on scene open
|
||||
cmds.loadPlugin("AbcImport", quiet=True)
|
||||
cmds.loadPlugin("AbcExport", quiet=True)
|
||||
|
||||
# Force load objExport plug-in (requested by artists)
|
||||
cmds.loadPlugin("objExport", quiet=True)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(launch_workfiles_app)
|
||||
|
||||
if not IS_HEADLESS:
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
|
||||
|
||||
def on_before_save(return_code, _):
|
||||
"""Run validation for scene's FPS prior to saving"""
|
||||
return lib.validate_fps()
|
||||
|
||||
|
||||
def on_save(_):
|
||||
"""Automatically add IDs to new nodes
|
||||
|
||||
Any transform of a mesh, without an existing ID, is given one
|
||||
automatically on file save.
|
||||
"""
|
||||
|
||||
avalon.logger.info("Running callback on save..")
|
||||
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Generate ids of the current context on nodes in the scene
|
||||
nodes = lib.get_id_required_nodes(referenced_nodes=False)
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def on_open(_):
|
||||
"""On scene open let's assume the containers have changed."""
|
||||
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
from ...widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
lib.validate_fps()
|
||||
lib.fix_incompatible_containers()
|
||||
|
||||
if any_outdated():
|
||||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
else:
|
||||
|
||||
# Show outdated pop-up
|
||||
def _on_show_inventory():
|
||||
import avalon.tools.sceneinventory as tool
|
||||
tool.show(parent=parent)
|
||||
|
||||
dialog = popup.Popup(parent=parent)
|
||||
dialog.setWindowTitle("Maya scene has outdated content")
|
||||
dialog.setMessage("There are outdated containers in "
|
||||
"your Maya scene.")
|
||||
dialog.on_show.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
|
||||
|
||||
def on_new(_):
|
||||
"""Set project resolution and fps when create a new file"""
|
||||
avalon.logger.info("Running callback on new..")
|
||||
with suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
|
||||
def on_task_changed(*args):
|
||||
"""Wrapped function of app initialize and maya's on task changed"""
|
||||
# Run
|
||||
_on_task_changed()
|
||||
with suspended_refresh():
|
||||
lib.set_context_settings()
|
||||
lib.update_content_on_context_change()
|
||||
|
||||
lib.show_message(
|
||||
"Context was changed",
|
||||
("Context was changed to {}".format(avalon.Session["AVALON_ASSET"])),
|
||||
)
|
||||
|
|
@ -4,7 +4,7 @@ from __future__ import absolute_import
|
|||
import pyblish.api
|
||||
|
||||
|
||||
from ...action import get_errored_instances_from_context
|
||||
from pype.api import get_errored_instances_from_context
|
||||
|
||||
|
||||
class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
|
||||
|
|
@ -44,7 +44,7 @@ from abc import ABCMeta, abstractmethod
|
|||
|
||||
import six
|
||||
|
||||
import pype.hosts.maya.lib as lib
|
||||
import pype.hosts.maya.api.lib as lib
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
|
@ -114,9 +114,7 @@ def matrix_equals(a, b, tolerance=1e-10):
|
|||
bool : True or False
|
||||
|
||||
"""
|
||||
if not all(abs(x - y) < tolerance for x, y in zip(a, b)):
|
||||
return False
|
||||
return True
|
||||
return all(abs(x - y) < tolerance for x, y in zip(a, b))
|
||||
|
||||
|
||||
def float_round(num, places=0, direction=ceil):
|
||||
|
|
@ -1084,7 +1082,7 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
|
|||
|
||||
# Check if plugin nodes are available for Maya by checking if the plugin
|
||||
# is loaded
|
||||
if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
|
||||
if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
|
||||
types.append("pgYetiMaya")
|
||||
|
||||
# We *always* ignore intermediate shapes, so we filter them out directly
|
||||
|
|
@ -4,7 +4,7 @@ import logging
|
|||
|
||||
from avalon.vendor.Qt import QtWidgets, QtGui
|
||||
from avalon.maya import pipeline
|
||||
from ...lib import BuildWorkfile
|
||||
from pype.api import BuildWorkfile
|
||||
import maya.cmds as cmds
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -9,7 +9,7 @@ from maya import cmds
|
|||
|
||||
from avalon import api, io
|
||||
from avalon.maya.lib import unique_namespace
|
||||
from pype.hosts.maya.lib import matrix_equals
|
||||
from pype.hosts.maya.api.lib import matrix_equals
|
||||
|
||||
log = logging.getLogger("PackageLoader")
|
||||
|
||||
0
pype/hosts/maya/plugins/__init__.py
Normal file
0
pype/hosts/maya/plugins/__init__.py
Normal file
|
|
@ -1,5 +1,5 @@
|
|||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreateAnimation(avalon.maya.Creator):
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreateCamera(avalon.maya.Creator):
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreateLook(avalon.maya.Creator):
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreatePointCache(avalon.maya.Creator):
|
||||
|
|
@ -8,7 +8,7 @@ import requests
|
|||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
from pype.api import get_system_settings
|
||||
import avalon.maya
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
from maya import cmds
|
||||
|
||||
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreateReview(avalon.maya.Creator):
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from maya import cmds
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CreateYetiCache(avalon.maya.Creator):
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from maya import cmds
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import pype.hosts.maya.plugin
|
||||
import pype.hosts.maya.api.plugin
|
||||
|
||||
|
||||
class AbcLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
||||
class AbcLoader(pype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["animation",
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
from avalon import api
|
||||
import pype.hosts.maya.plugin
|
||||
import pype.hosts.maya.api.plugin
|
||||
import os
|
||||
from pype.api import get_project_settings
|
||||
import clique
|
||||
|
||||
|
||||
class AssProxyLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
||||
class AssProxyLoader(pype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Load the Proxy"""
|
||||
|
||||
families = ["ass"]
|
||||
|
|
@ -23,9 +23,9 @@ class AssemblyLoader(api.Loader):
|
|||
suffix="_",
|
||||
)
|
||||
|
||||
from pype import setdress_api
|
||||
from pype.hosts.maya.api import setdress
|
||||
|
||||
containers = setdress_api.load_package(filepath=self.fname,
|
||||
containers = setdress.load_package(filepath=self.fname,
|
||||
name=name,
|
||||
namespace=namespace)
|
||||
|
||||
|
|
@ -45,19 +45,19 @@ class AssemblyLoader(api.Loader):
|
|||
|
||||
def update(self, container, representation):
|
||||
|
||||
from pype import setdress_api
|
||||
return setdress_api.update_package(container,
|
||||
from pype import setdress
|
||||
return setdress.update_package(container,
|
||||
representation)
|
||||
|
||||
def remove(self, container):
|
||||
"""Remove all sub containers"""
|
||||
|
||||
from avalon import api
|
||||
from pype import setdress_api
|
||||
from pype import setdress
|
||||
import maya.cmds as cmds
|
||||
|
||||
# Remove all members
|
||||
member_containers = setdress_api.get_contained_containers(container)
|
||||
member_containers = setdress.get_contained_containers(container)
|
||||
for member_container in member_containers:
|
||||
self.log.info("Removing container %s",
|
||||
member_container['objectName'])
|
||||
|
|
@ -1,10 +1,7 @@
|
|||
from maya import cmds, mel
|
||||
import pymel.core as pc
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.maya.pipeline import containerise
|
||||
from avalon.maya import lib
|
||||
|
||||
from maya import cmds, mel
|
||||
|
||||
class AudioLoader(api.Loader):
|
||||
"""Specific loader of audio."""
|
||||
|
|
@ -15,7 +12,9 @@ class AudioLoader(api.Loader):
|
|||
icon = "volume-up"
|
||||
color = "orange"
|
||||
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
start_frame = cmds.playbackOptions(query=True, min=True)
|
||||
sound_node = cmds.sound(
|
||||
file=context["representation"]["data"]["path"], offset=start_frame
|
||||
|
|
@ -43,8 +42,10 @@ class AudioLoader(api.Loader):
|
|||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
import pymel.core as pm
|
||||
|
||||
audio_node = None
|
||||
for node in pc.PyNode(container["objectName"]).members():
|
||||
for node in pm.PyNode(container["objectName"]).members():
|
||||
if node.nodeType() == "audio":
|
||||
audio_node = node
|
||||
|
||||
|
|
@ -1,9 +1,6 @@
|
|||
from avalon import api
|
||||
import pype.hosts.maya.plugin
|
||||
import os
|
||||
from avalon import api
|
||||
from pype.api import get_project_settings
|
||||
reload(config)
|
||||
|
||||
|
||||
class GpuCacheLoader(api.Loader):
|
||||
"""Load model Alembic as gpuCache"""
|
||||
|
|
@ -1,11 +1,10 @@
|
|||
import pymel.core as pc
|
||||
import maya.cmds as cmds
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.maya.pipeline import containerise
|
||||
from avalon.maya import lib
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class CameraWindow(QtWidgets.QDialog):
|
||||
|
||||
|
|
@ -73,6 +72,8 @@ class ImagePlaneLoader(api.Loader):
|
|||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
import pymel.core as pm
|
||||
|
||||
new_nodes = []
|
||||
image_plane_depth = 1000
|
||||
asset = context['asset']['name']
|
||||
|
|
@ -88,7 +89,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
"frontShape", "perspShape", "sideShape", "topShape"
|
||||
]
|
||||
cameras = [
|
||||
x for x in pc.ls(type="camera") if x.name() not in default_cameras
|
||||
x for x in pm.ls(type="camera") if x.name() not in default_cameras
|
||||
]
|
||||
camera_names = {x.getParent().name(): x for x in cameras}
|
||||
camera_names["Create new camera."] = "create_camera"
|
||||
|
|
@ -97,7 +98,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
camera = camera_names[window.camera]
|
||||
|
||||
if camera == "create_camera":
|
||||
camera = pc.createNode("camera")
|
||||
camera = pm.createNode("camera")
|
||||
|
||||
if camera is None:
|
||||
return
|
||||
|
|
@ -109,7 +110,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
pass
|
||||
|
||||
# Create image plane
|
||||
image_plane_transform, image_plane_shape = pc.imagePlane(
|
||||
image_plane_transform, image_plane_shape = pm.imagePlane(
|
||||
camera=camera, showInAllViews=False
|
||||
)
|
||||
image_plane_shape.depth.set(image_plane_depth)
|
||||
|
|
@ -118,8 +119,8 @@ class ImagePlaneLoader(api.Loader):
|
|||
context["representation"]["data"]["path"]
|
||||
)
|
||||
|
||||
start_frame = pc.playbackOptions(q=True, min=True)
|
||||
end_frame = pc.playbackOptions(q=True, max=True)
|
||||
start_frame = pm.playbackOptions(q=True, min=True)
|
||||
end_frame = pm.playbackOptions(q=True, max=True)
|
||||
|
||||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
|
|
@ -130,12 +131,12 @@ class ImagePlaneLoader(api.Loader):
|
|||
movie_representations = ["mov", "preview"]
|
||||
if context["representation"]["name"] in movie_representations:
|
||||
# Need to get "type" by string, because its a method as well.
|
||||
pc.Attribute(image_plane_shape + ".type").set(2)
|
||||
pm.Attribute(image_plane_shape + ".type").set(2)
|
||||
|
||||
# Ask user whether to use sequence or still image.
|
||||
if context["representation"]["name"] == "exr":
|
||||
# Ensure OpenEXRLoader plugin is loaded.
|
||||
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
|
||||
pm.loadPlugin("OpenEXRLoader.mll", quiet=True)
|
||||
|
||||
message = (
|
||||
"Hold image sequence on first frame?"
|
||||
|
|
@ -151,7 +152,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
QtWidgets.QMessageBox.Cancel
|
||||
)
|
||||
if reply == QtWidgets.QMessageBox.Ok:
|
||||
pc.delete(
|
||||
pm.delete(
|
||||
image_plane_shape.listConnections(type="expression")[0]
|
||||
)
|
||||
image_plane_shape.frameExtension.set(start_frame)
|
||||
|
|
@ -164,7 +165,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
)
|
||||
|
||||
for node in new_nodes:
|
||||
pc.rename(node, "{}:{}".format(namespace, node))
|
||||
pm.rename(node, "{}:{}".format(namespace, node))
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
|
|
@ -175,8 +176,9 @@ class ImagePlaneLoader(api.Loader):
|
|||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
import pymel.core as pm
|
||||
image_plane_shape = None
|
||||
for node in pc.PyNode(container["objectName"]).members():
|
||||
for node in pm.PyNode(container["objectName"]).members():
|
||||
if node.nodeType() == "imagePlane":
|
||||
image_plane_shape = node
|
||||
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
import pype.hosts.maya.plugin
|
||||
import pype.hosts.maya.api.plugin
|
||||
from avalon import api, io
|
||||
import json
|
||||
import pype.hosts.maya.lib
|
||||
import pype.hosts.maya.api.lib
|
||||
from collections import defaultdict
|
||||
from pype.widgets.message_window import ScrollMessageBox
|
||||
from Qt import QtWidgets
|
||||
|
||||
|
||||
class LookLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
||||
class LookLoader(pype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Specific loader for lookdev"""
|
||||
|
||||
families = ["look"]
|
||||
|
|
@ -120,7 +120,7 @@ class LookLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
|||
cmds.file(cr=reference_node) # cleanReference
|
||||
|
||||
# reapply shading groups from json representation on orig nodes
|
||||
pype.hosts.maya.lib.apply_shaders(relationships,
|
||||
pype.hosts.maya.api.lib.apply_shaders(relationships,
|
||||
shader_nodes,
|
||||
orig_nodes)
|
||||
|
||||
|
|
@ -138,8 +138,8 @@ class LookLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
|||
# region compute lookup
|
||||
nodes_by_id = defaultdict(list)
|
||||
for n in nodes:
|
||||
nodes_by_id[pype.hosts.maya.lib.get_id(n)].append(n)
|
||||
pype.hosts.maya.lib.apply_attributes(attributes, nodes_by_id)
|
||||
nodes_by_id[pype.hosts.maya.api.lib.get_id(n)].append(n)
|
||||
pype.hosts.maya.api.lib.apply_attributes(attributes, nodes_by_id)
|
||||
|
||||
# Update metadata
|
||||
cmds.setAttr("{}.representation".format(node),
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
import pype.hosts.maya.plugin
|
||||
import pype.hosts.maya.api.plugin
|
||||
from avalon import api, maya
|
||||
from maya import cmds
|
||||
import os
|
||||
from pype.api import get_project_settings
|
||||
|
||||
|
||||
class ReferenceLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
||||
class ReferenceLoader(pype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
||||
families = ["model",
|
||||
|
|
@ -12,7 +12,7 @@ import sys
|
|||
|
||||
from avalon import api
|
||||
from avalon.maya import lib
|
||||
from pype.hosts.maya import lib as pypelib
|
||||
from pype.hosts.maya.api import lib as pypelib
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
|
@ -32,7 +32,7 @@ class RenderSetupLoader(api.Loader):
|
|||
def load(self, context, name, namespace, data):
|
||||
"""Load RenderSetup settings."""
|
||||
from avalon.maya.pipeline import containerise
|
||||
# from pype.hosts.maya.lib import namespaced
|
||||
# from pype.hosts.maya.api.lib import namespaced
|
||||
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
|
|
@ -19,7 +19,7 @@ class VRayProxyLoader(api.Loader):
|
|||
def load(self, context, name, namespace, data):
|
||||
|
||||
from avalon.maya.pipeline import containerise
|
||||
from pype.hosts.maya.lib import namespaced
|
||||
from pype.hosts.maya.api.lib import namespaced
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
|
|
@ -8,7 +8,7 @@ from maya import cmds
|
|||
|
||||
from avalon import api, io
|
||||
from avalon.maya import lib as avalon_lib, pipeline
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
from pype.api import get_project_settings
|
||||
from pprint import pprint
|
||||
|
||||
|
|
@ -2,11 +2,11 @@ import os
|
|||
from collections import defaultdict
|
||||
|
||||
from pype.api import get_project_settings
|
||||
import pype.hosts.maya.plugin
|
||||
from pype.hosts.maya import lib
|
||||
import pype.hosts.maya.api.plugin
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class YetiRigLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
||||
class YetiRigLoader(pype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""
|
||||
This loader will load Yeti rig. You can select something in scene and if it
|
||||
has same ID as mesh published with rig, their shapes will be linked
|
||||
0
pype/hosts/maya/plugins/publish/__init__.py
Normal file
0
pype/hosts/maya/plugins/publish/__init__.py
Normal file
|
|
@ -3,7 +3,7 @@ import pyblish.api
|
|||
|
||||
from maya import cmds, mel
|
||||
from avalon import maya as avalon
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
# TODO : Publish of assembly: -unique namespace for all assets, VALIDATOR!
|
||||
|
||||
|
|
@ -4,7 +4,7 @@ import glob
|
|||
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
SHAPE_ATTRS = ["castsShadows",
|
||||
"receiveShadows",
|
||||
|
|
@ -49,8 +49,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
|
|||
import pyblish.api
|
||||
|
||||
from avalon import maya, api
|
||||
from pype.hosts.maya.expected_files import ExpectedFiles
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api.expected_files import ExpectedFiles
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CollectMayaRender(pyblish.api.ContextPlugin):
|
||||
|
|
@ -2,7 +2,7 @@ from maya import cmds
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
||||
|
|
@ -2,7 +2,7 @@ import pyblish.api
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
||||
|
|
@ -2,7 +2,7 @@ from maya import cmds
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
"renderWidth",
|
||||
|
|
@ -5,7 +5,7 @@ from maya import cmds
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
|
|
@ -4,7 +4,7 @@ from maya import cmds
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.hosts.maya.lib import extract_alembic
|
||||
from pype.hosts.maya.api.lib import extract_alembic
|
||||
|
||||
|
||||
class ExtractAnimation(pype.api.Extractor):
|
||||
|
|
@ -3,7 +3,7 @@ import json
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
from pype.hosts.maya.lib import extract_alembic
|
||||
from pype.hosts.maya.api.lib import extract_alembic
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
@ -5,7 +5,6 @@ import contextlib
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.hosts.maya import lib
|
||||
|
||||
|
||||
class ExtractAssProxy(pype.api.Extractor):
|
||||
|
|
@ -5,7 +5,7 @@ from maya import cmds
|
|||
import avalon.maya
|
||||
import pype.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class ExtractCameraAlembic(pype.api.Extractor):
|
||||
|
|
@ -7,7 +7,7 @@ from maya import cmds
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
def massage_ma_file(path):
|
||||
|
|
@ -14,7 +14,7 @@ import avalon.maya
|
|||
from avalon import io, api
|
||||
|
||||
import pype.api
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
# Modes for transfer
|
||||
COPY = 1
|
||||
|
|
@ -6,7 +6,7 @@ from maya import cmds
|
|||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.hosts.maya import lib
|
||||
from pype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class ExtractModel(pype.api.Extractor):
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue