mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merged in bugfix/PYPE-711-nk-nks-eallin-fixes (pull request #477)
little changes good to realease Approved-by: Milan Kolar <milan@orbi.tools>
This commit is contained in:
commit
2e1db915aa
11 changed files with 185 additions and 62 deletions
103
pype/nuke/lib.py
103
pype/nuke/lib.py
|
|
@ -374,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
now_node.setInput(0, prev_node)
|
||||
|
||||
# imprinting group node
|
||||
avalon.nuke.imprint(GN, data["avalon"], tab="Pype")
|
||||
avalon.nuke.imprint(GN, data["avalon"])
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
|
@ -645,15 +645,105 @@ class WorkfileSettings(object):
|
|||
write_dict (dict): nuke write node as dictionary
|
||||
|
||||
'''
|
||||
# TODO: complete this function so any write node in
|
||||
# scene will have fixed colorspace following presets for the project
|
||||
if not isinstance(write_dict, dict):
|
||||
msg = "set_root_colorspace(): argument should be dictionary"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
return
|
||||
|
||||
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
|
||||
from avalon.nuke import get_avalon_knob_data
|
||||
|
||||
for node in nuke.allNodes():
|
||||
|
||||
if node.Class() in ["Viewer", "Dot"]:
|
||||
continue
|
||||
|
||||
# get data from avalon knob
|
||||
avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
|
||||
|
||||
if not avalon_knob_data:
|
||||
continue
|
||||
|
||||
if avalon_knob_data["id"] != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# establish families
|
||||
families = [avalon_knob_data["family"]]
|
||||
if avalon_knob_data.get("families"):
|
||||
families.append(avalon_knob_data.get("families"))
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
for fmly, knob in write_dict.items():
|
||||
write = None
|
||||
if (fmly in families):
|
||||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
node.begin()
|
||||
for x in nuke.allNodes():
|
||||
if x.Class() == "Write":
|
||||
write = x
|
||||
node.end()
|
||||
elif node.Class() == "Write":
|
||||
write = node
|
||||
else:
|
||||
log.warning("Wrong write node Class")
|
||||
|
||||
write["colorspace"].setValue(str(knob["colorspace"]))
|
||||
log.info(
|
||||
"Setting `{0}` to `{1}`".format(
|
||||
write.name(),
|
||||
knob["colorspace"]))
|
||||
|
||||
def set_reads_colorspace(self, reads):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
|
||||
"""
|
||||
changes = dict()
|
||||
for n in nuke.allNodes():
|
||||
file = nuke.filename(n)
|
||||
if not n.Class() == "Read":
|
||||
continue
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
log.debug(preset_clrsp)
|
||||
if preset_clrsp is not None:
|
||||
current = n["colorspace"].value()
|
||||
future = str(preset_clrsp)
|
||||
if current != future:
|
||||
changes.update({
|
||||
n.name(): {
|
||||
"from": current,
|
||||
"to": future
|
||||
}
|
||||
})
|
||||
log.debug(changes)
|
||||
if changes:
|
||||
msg = "Read nodes are not set to correct colospace:\n\n"
|
||||
for nname, knobs in changes.items():
|
||||
msg += str(" - node: '{0}' is now '{1}' "
|
||||
"but should be '{2}'\n").format(
|
||||
nname, knobs["from"], knobs["to"]
|
||||
)
|
||||
|
||||
msg += "\nWould you like to change it?"
|
||||
|
||||
if nuke.ask(msg):
|
||||
for nname, knobs in changes.items():
|
||||
n = nuke.toNode(nname)
|
||||
n["colorspace"].setValue(knobs["to"])
|
||||
log.info(
|
||||
"Setting `{0}` to `{1}`".format(
|
||||
nname,
|
||||
knobs["to"]))
|
||||
|
||||
def set_colorspace(self):
|
||||
''' Setting colorpace following presets
|
||||
|
|
@ -671,6 +761,7 @@ class WorkfileSettings(object):
|
|||
msg = "set_colorspace(): missing `viewer` settings in template"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
|
||||
try:
|
||||
self.set_writes_colorspace(nuke_colorspace["write"])
|
||||
except AttributeError:
|
||||
|
|
@ -678,6 +769,10 @@ class WorkfileSettings(object):
|
|||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
|
||||
reads = nuke_colorspace.get("read")
|
||||
if reads:
|
||||
self.set_reads_colorspace(reads)
|
||||
|
||||
try:
|
||||
for key in nuke_colorspace:
|
||||
log.debug("Preset's colorspace key: {}".format(key))
|
||||
|
|
|
|||
|
|
@ -4,9 +4,7 @@ import contextlib
|
|||
|
||||
from avalon import api, io
|
||||
from pype.nuke import presets
|
||||
|
||||
from pype.api import Logger
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -34,14 +32,14 @@ def preserve_trim(node):
|
|||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
log.info("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
print("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str((script_start + offset_frame)))
|
||||
log.info("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
print("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
|
|
@ -70,11 +68,37 @@ def loader_shift(node, frame, relative=True):
|
|||
return int(script_start)
|
||||
|
||||
|
||||
def add_review_presets_config():
|
||||
returning = {
|
||||
"families": list(),
|
||||
"representations": list()
|
||||
}
|
||||
review_presets = config.get_presets()["plugins"]["global"]["publish"].get(
|
||||
"ExtractReview", {})
|
||||
|
||||
outputs = review_presets.get("outputs", {})
|
||||
#
|
||||
for output, properities in outputs.items():
|
||||
returning["representations"].append(output)
|
||||
returning["families"] += properities.get("families", [])
|
||||
|
||||
return returning
|
||||
|
||||
|
||||
class LoadMov(api.Loader):
|
||||
"""Load mov file into Nuke"""
|
||||
presets = add_review_presets_config()
|
||||
families = [
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"review"] + presets["families"]
|
||||
|
||||
families = ["write", "source", "plate", "render", "review"]
|
||||
representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"]
|
||||
representations = [
|
||||
"mov",
|
||||
"preview",
|
||||
"review",
|
||||
"mp4"] + presets["representations"]
|
||||
|
||||
label = "Load mov"
|
||||
order = -10
|
||||
|
|
@ -115,7 +139,7 @@ class LoadMov(api.Loader):
|
|||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
log.warning(
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
|
|
@ -211,7 +235,7 @@ class LoadMov(api.Loader):
|
|||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
log.warning(
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
|
|
@ -246,9 +270,10 @@ class LoadMov(api.Loader):
|
|||
colorspace = version_data.get("colorspace")
|
||||
|
||||
if first is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
self.log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(
|
||||
node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
# fix handle start and end if none are available
|
||||
|
|
@ -264,7 +289,7 @@ class LoadMov(api.Loader):
|
|||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file)
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
self.log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
loader_shift(node, first, relative=True)
|
||||
|
|
@ -290,7 +315,6 @@ class LoadMov(api.Loader):
|
|||
if preset_clrsp is not None:
|
||||
node["colorspace"].setValue(str(preset_clrsp))
|
||||
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
|
|
@ -316,7 +340,7 @@ class LoadMov(api.Loader):
|
|||
update_container(
|
||||
node, updated_dict
|
||||
)
|
||||
log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
|
|
|
|||
|
|
@ -5,10 +5,6 @@ import contextlib
|
|||
from avalon import api, io
|
||||
from pype.nuke import presets
|
||||
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def preserve_trim(node):
|
||||
|
|
@ -35,14 +31,14 @@ def preserve_trim(node):
|
|||
if start_at_frame:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
log.info("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
print("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
if offset_frame:
|
||||
node['frame_mode'].setValue("offset")
|
||||
node['frame'].setValue(str((script_start + offset_frame)))
|
||||
log.info("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
print("start frame of Read was set to"
|
||||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
|
|
@ -74,7 +70,7 @@ def loader_shift(node, frame, relative=True):
|
|||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["write", "source", "plate", "render"]
|
||||
families = ["render2d", "source", "plate", "render"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
|
||||
|
||||
label = "Load sequence"
|
||||
|
|
@ -91,7 +87,7 @@ class LoadSequence(api.Loader):
|
|||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
log.info("version_data: {}\n".format(version_data))
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
|
|
@ -111,7 +107,7 @@ class LoadSequence(api.Loader):
|
|||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
log.warning(
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
|
|
@ -242,7 +238,7 @@ class LoadSequence(api.Loader):
|
|||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
log.warning(
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
|
|
@ -277,9 +273,10 @@ class LoadSequence(api.Loader):
|
|||
last = version_data.get("frameEnd")
|
||||
|
||||
if first is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
self.log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(
|
||||
node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
first -= self.handle_start
|
||||
|
|
@ -288,7 +285,7 @@ class LoadSequence(api.Loader):
|
|||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file)
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
self.log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
loader_shift(node, first, relative=True)
|
||||
|
|
@ -328,7 +325,7 @@ class LoadSequence(api.Loader):
|
|||
node,
|
||||
updated_dict
|
||||
)
|
||||
log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
|
|
|
|||
|
|
@ -105,7 +105,6 @@ class CollectClips(api.ContextPlugin):
|
|||
"asset": asset,
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
"handles": 0,
|
||||
"handleStart": projectdata.get("handleStart", 0),
|
||||
"handleEnd": projectdata.get("handleEnd", 0),
|
||||
"version": int(version)})
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
|
||||
self.log.debug(
|
||||
"Finding soft effect for subset: `{}`".format(
|
||||
instance.data.get("subset")))
|
||||
|
||||
# taking active sequence
|
||||
subset = instance.data.get("subset")
|
||||
|
|
@ -41,8 +43,12 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
|
|||
|
||||
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
|
||||
instance.data["families"] += ["lut"]
|
||||
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
|
||||
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
|
||||
self.log.debug(
|
||||
"effects.keys: {}".format(
|
||||
instance.data.get("effectTrackItems", {}).keys()))
|
||||
self.log.debug(
|
||||
"effects: {}".format(
|
||||
instance.data.get("effectTrackItems", {})))
|
||||
|
||||
def add_effect(self, instance, track_index, item):
|
||||
track = item.parentTrack().name()
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ class CollectClipHandles(api.ContextPlugin):
|
|||
continue
|
||||
|
||||
# get handles
|
||||
handles = int(instance.data["handles"])
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
|
|
@ -38,19 +37,16 @@ class CollectClipHandles(api.ContextPlugin):
|
|||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
asset_shared.update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
|
||||
|
||||
for instance in filtered_instances:
|
||||
if not instance.data.get("main") and not instance.data.get("handleTag"):
|
||||
self.log.debug("Synchronize handles on: `{}`".format(
|
||||
instance.data["name"]))
|
||||
name = instance.data["asset"]
|
||||
s_asset_data = assets_shared.get(name)
|
||||
instance.data["handles"] = s_asset_data.get("handles", 0)
|
||||
instance.data["handleStart"] = s_asset_data.get(
|
||||
"handleStart", 0
|
||||
)
|
||||
|
|
|
|||
|
|
@ -263,7 +263,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
# get custom attributes of the shot
|
||||
if instance.data.get("main"):
|
||||
in_info['custom_attributes'] = {
|
||||
'handles': int(instance.data.get('handles', 0)),
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
|
|
|
|||
|
|
@ -134,7 +134,6 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"colorspaceScript": instance.context.data["colorspace"],
|
||||
"families": [f for f in families if 'ftrack' not in f],
|
||||
|
|
@ -156,8 +155,9 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
ext=ext
|
||||
)
|
||||
|
||||
start_frame = source_first_frame + instance.data["sourceInH"]
|
||||
duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
|
||||
start_frame = int(source_first_frame + instance.data["sourceInH"])
|
||||
duration = int(
|
||||
instance.data["sourceOutH"] - instance.data["sourceInH"])
|
||||
end_frame = start_frame + duration
|
||||
self.log.debug("start_frame: `{}`".format(start_frame))
|
||||
self.log.debug("end_frame: `{}`".format(end_frame))
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ class CollectReviews(api.InstancePlugin):
|
|||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
|
||||
|
||||
self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"]))
|
||||
self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
|
|
@ -145,7 +145,10 @@ class CollectReviews(api.InstancePlugin):
|
|||
item = instance.data["item"]
|
||||
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version"
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
|
||||
"track", "version"
|
||||
]
|
||||
|
||||
version_data = dict()
|
||||
|
|
@ -154,7 +157,6 @@ class CollectReviews(api.InstancePlugin):
|
|||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"families": instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
|
|
|
|||
|
|
@ -38,7 +38,9 @@ class CollectClipTagHandles(api.ContextPlugin):
|
|||
|
||||
# gets arguments if there are any
|
||||
t_args = t_metadata.get("tag.args", "")
|
||||
assert t_args, self.log.error("Tag with Handles is missing Args. Use only handle start/end")
|
||||
assert t_args, self.log.error(
|
||||
"Tag with Handles is missing Args. "
|
||||
"Use only handle start/end")
|
||||
|
||||
t_args = json.loads(t_args.replace("'", "\""))
|
||||
# add in start
|
||||
|
|
@ -55,8 +57,8 @@ class CollectClipTagHandles(api.ContextPlugin):
|
|||
|
||||
# adding handles to asset_shared on context
|
||||
if instance.data.get("handleEnd"):
|
||||
assets_shared_a["handleEnd"] = instance.data["handleEnd"]
|
||||
assets_shared_a[
|
||||
"handleEnd"] = instance.data["handleEnd"]
|
||||
if instance.data.get("handleStart"):
|
||||
assets_shared_a["handleStart"] = instance.data["handleStart"]
|
||||
if instance.data.get("handles"):
|
||||
assets_shared_a["handles"] = instance.data["handles"]
|
||||
assets_shared_a[
|
||||
"handleStart"] = instance.data["handleStart"]
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import pyblish.api
|
|||
import tempfile
|
||||
from avalon import io, api
|
||||
|
||||
|
||||
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
|
||||
"""Collect video tracks effects into context."""
|
||||
|
||||
|
|
@ -17,9 +18,12 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
|
|||
item = instance.data["item"]
|
||||
effects = instance.data.get("effectTrackItems")
|
||||
|
||||
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
|
||||
instance.data["families"] = [f for f in instance.data.get(
|
||||
"families", []) if f not in ["lut"]]
|
||||
|
||||
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
|
||||
self.log.debug(
|
||||
"__ instance.data[families]: `{}`".format(
|
||||
instance.data["families"]))
|
||||
|
||||
# skip any without effects
|
||||
if not effects:
|
||||
|
|
@ -102,7 +106,6 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
|
|||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"colorspaceScript": instance.context.data["colorspace"],
|
||||
"families": ["plate", "lut"],
|
||||
|
|
@ -132,7 +135,7 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
|
|||
|
||||
def copy_linked_files(self, effect, dst_dir):
|
||||
for k, v in effect["node"].items():
|
||||
if k in "file" and v is not '':
|
||||
if k in "file" and v != '':
|
||||
base_name = os.path.basename(v)
|
||||
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue