+
+
+
+
+
+
+This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
\ No newline at end of file
diff --git a/openpype/cli.py b/openpype/cli.py
index cbeb7fef9b..2aa4a46929 100644
--- a/openpype/cli.py
+++ b/openpype/cli.py
@@ -20,6 +20,10 @@ from .pype_commands import PypeCommands
"to list staging versions."))
@click.option("--validate-version", expose_value=False,
help="validate given version integrity")
+@click.option("--debug", is_flag=True, expose_value=False,
+ help=("Enable debug"))
+@click.option("--verbose", expose_value=False,
+ help=("Change OpenPype log level (debug - critical or 0-50)"))
def main(ctx):
"""Pype is main command serving as entry point to pipeline system.
@@ -49,18 +53,13 @@ def traypublisher():
@main.command()
-@click.option("-d", "--debug",
- is_flag=True, help=("Run pype tray in debug mode"))
-def tray(debug=False):
+def tray():
"""Launch pype tray.
Default action of pype command is to launch tray widget to control basic
aspects of pype. See documentation for more information.
-
- Running pype with `--debug` will result in lot of information useful for
- debugging to be shown in console.
"""
- PypeCommands().launch_tray(debug)
+ PypeCommands().launch_tray()
@PypeCommands.add_modules
@@ -75,7 +74,6 @@ def module(ctx):
@main.command()
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("--ftrack-url", envvar="FTRACK_SERVER",
help="Ftrack server url")
@click.option("--ftrack-user", envvar="FTRACK_API_USER",
@@ -88,8 +86,7 @@ def module(ctx):
help="Clockify API key.")
@click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE",
help="Clockify workspace")
-def eventserver(debug,
- ftrack_url,
+def eventserver(ftrack_url,
ftrack_user,
ftrack_api_key,
legacy,
@@ -100,8 +97,6 @@ def eventserver(debug,
This should be ideally used by system service (such us systemd or upstart
on linux and window service).
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().launch_eventservercli(
ftrack_url,
@@ -114,12 +109,11 @@ def eventserver(debug,
@main.command()
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-h", "--host", help="Host", default=None)
@click.option("-p", "--port", help="Port", default=None)
@click.option("-e", "--executable", help="Executable")
@click.option("-u", "--upload_dir", help="Upload dir")
-def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None):
+def webpublisherwebserver(executable, upload_dir, host=None, port=None):
"""Starts webserver for communication with Webpublish FR via command line
OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND
@@ -127,8 +121,6 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None):
Expect "pype.club" user created on Ftrack.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
PypeCommands().launch_webpublisher_webservercli(
upload_dir=upload_dir,
@@ -164,38 +156,34 @@ def extractenvironments(output_json_path, project, asset, task, app, envgroup):
@main.command()
@click.argument("paths", nargs=-1)
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-t", "--targets", help="Targets module", default=None,
multiple=True)
@click.option("-g", "--gui", is_flag=True,
help="Show Publish UI", default=False)
-def publish(debug, paths, targets, gui):
+def publish(paths, targets, gui):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
+
PypeCommands.publish(list(paths), targets, gui)
@main.command()
@click.argument("path")
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-h", "--host", help="Host")
@click.option("-u", "--user", help="User email address")
@click.option("-p", "--project", help="Project")
@click.option("-t", "--targets", help="Targets", default=None,
multiple=True)
-def remotepublishfromapp(debug, project, path, host, user=None, targets=None):
+def remotepublishfromapp(project, path, host, user=None, targets=None):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
+
PypeCommands.remotepublishfromapp(
project, path, host, user, targets=targets
)
@@ -203,24 +191,21 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None):
@main.command()
@click.argument("path")
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-u", "--user", help="User email address")
@click.option("-p", "--project", help="Project")
@click.option("-t", "--targets", help="Targets", default=None,
multiple=True)
-def remotepublish(debug, project, path, user=None, targets=None):
+def remotepublish(project, path, user=None, targets=None):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
+
PypeCommands.remotepublish(project, path, user, targets=targets)
@main.command()
-@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-p", "--project", required=True,
help="name of project asset is under")
@click.option("-a", "--asset", required=True,
@@ -228,7 +213,7 @@ def remotepublish(debug, project, path, user=None, targets=None):
@click.option("--path", required=True,
help="path where textures are found",
type=click.Path(exists=True))
-def texturecopy(debug, project, asset, path):
+def texturecopy(project, asset, path):
"""Copy specified textures to provided asset path.
It validates if project and asset exists. Then it will use speedcopy to
@@ -239,8 +224,7 @@ def texturecopy(debug, project, asset, path):
Result will be copied without directory structure so it will be flat then.
Nothing is written to database.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
+
PypeCommands().texture_copy(project, asset, path)
@@ -389,11 +373,9 @@ def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
@main.command()
-@click.option("-d", "--debug",
- is_flag=True, help=("Run process in debug mode"))
@click.option("-a", "--active_site", required=True,
help="Name of active stie")
-def syncserver(debug, active_site):
+def syncserver(active_site):
"""Run sync site server in background.
Some Site Sync use cases need to expose site to another one.
@@ -408,8 +390,7 @@ def syncserver(debug, active_site):
Settings (configured by starting OP Tray with env
var OPENPYPE_LOCAL_ID set to 'active_site'.
"""
- if debug:
- os.environ["OPENPYPE_DEBUG"] = "1"
+
PypeCommands().syncserver(active_site)
diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py
index cea1bdc023..2ad1255d27 100644
--- a/openpype/hosts/aftereffects/api/__init__.py
+++ b/openpype/hosts/aftereffects/api/__init__.py
@@ -16,7 +16,10 @@ from .pipeline import (
uninstall,
list_instances,
remove_instance,
- containerise
+ containerise,
+ get_context_data,
+ update_context_data,
+ get_context_title
)
from .workio import (
@@ -51,6 +54,9 @@ __all__ = [
"list_instances",
"remove_instance",
"containerise",
+ "get_context_data",
+ "update_context_data",
+ "get_context_title",
"file_extensions",
"has_unsaved_changes",
diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp
index 389d74505d..0ed799991e 100644
Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ
diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
index 668cb3fc24..a39f5781bb 100644
--- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
+++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml
@@ -1,5 +1,5 @@
-
diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx
index 8f82c9709d..91df433908 100644
--- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx
+++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx
@@ -417,7 +417,9 @@ function getRenderInfo(){
var file_url = item.file.toString();
return JSON.stringify({
- "file_name": file_url
+ "file_name": file_url,
+ "width": render_item.comp.width,
+ "height": render_item.comp.height
})
}
diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py
index 73aea2da11..3a41b4f26d 100644
--- a/openpype/hosts/aftereffects/api/pipeline.py
+++ b/openpype/hosts/aftereffects/api/pipeline.py
@@ -31,24 +31,6 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
-def check_inventory():
- if not lib.any_outdated():
- return
-
- # Warn about outdated containers.
- print("Starting new QApplication..")
- app = QtWidgets.QApplication(sys.argv)
- message_box = QtWidgets.QMessageBox()
- message_box.setIcon(QtWidgets.QMessageBox.Warning)
- msg = "There are outdated containers in the scene."
- message_box.setText(msg)
- message_box.exec_()
-
-
-def application_launch():
- check_inventory()
-
-
def install():
print("Installing Pype config...")
@@ -72,6 +54,11 @@ def uninstall():
deregister_creator_plugin_path(CREATE_PATH)
+def application_launch():
+ """Triggered after start of app"""
+ check_inventory()
+
+
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value
@@ -106,65 +93,6 @@ def get_asset_settings():
}
-def containerise(name,
- namespace,
- comp,
- context,
- loader=None,
- suffix="_CON"):
- """
- Containerisation enables a tracking of version, author and origin
- for loaded assets.
-
- Creates dictionary payloads that gets saved into file metadata. Each
- container contains of who loaded (loader) and members (single or multiple
- in case of background).
-
- Arguments:
- name (str): Name of resulting assembly
- namespace (str): Namespace under which to host container
- comp (Comp): Composition to containerise
- context (dict): Asset information
- loader (str, optional): Name of loader used to produce this container.
- suffix (str, optional): Suffix of container, defaults to `_CON`.
-
- Returns:
- container (str): Name of container assembly
- """
- data = {
- "schema": "openpype:container-2.0",
- "id": AVALON_CONTAINER_ID,
- "name": name,
- "namespace": namespace,
- "loader": str(loader),
- "representation": str(context["representation"]["_id"]),
- "members": comp.members or [comp.id]
- }
-
- stub = get_stub()
- stub.imprint(comp, data)
-
- return comp
-
-
-def _get_stub():
- """
- Handle pulling stub from PS to run operations on host
- Returns:
- (AEServerStub) or None
- """
- try:
- stub = get_stub() # only after Photoshop is up
- except lib.ConnectionNotEstablishedYet:
- print("Not connected yet, ignoring")
- return
-
- if not stub.get_active_document_name():
- return
-
- return stub
-
-
def ls():
"""Yields containers from active AfterEffects document.
@@ -205,6 +133,78 @@ def ls():
yield data
+def check_inventory():
+ """Checks loaded containers if they are of highest version"""
+ if not lib.any_outdated():
+ return
+
+ host = pyblish.api.registered_host()
+ outdated_containers = []
+ for container in host.ls():
+ representation = container['representation']
+ representation_doc = io.find_one(
+ {
+ "_id": io.ObjectId(representation),
+ "type": "representation"
+ },
+ projection={"parent": True}
+ )
+ if representation_doc and not lib.is_latest(representation_doc):
+ outdated_containers.append(container)
+
+ # Warn about outdated containers.
+ print("Starting new QApplication..")
+ _app = QtWidgets.QApplication(sys.argv)
+
+ message_box = QtWidgets.QMessageBox()
+ message_box.setIcon(QtWidgets.QMessageBox.Warning)
+ msg = "There are outdated containers in the scene."
+ message_box.setText(msg)
+ message_box.exec_()
+
+
+def containerise(name,
+ namespace,
+ comp,
+ context,
+ loader=None,
+ suffix="_CON"):
+ """
+ Containerisation enables a tracking of version, author and origin
+ for loaded assets.
+
+ Creates dictionary payloads that gets saved into file metadata. Each
+ container contains of who loaded (loader) and members (single or multiple
+ in case of background).
+
+ Arguments:
+ name (str): Name of resulting assembly
+ namespace (str): Namespace under which to host container
+ comp (AEItem): Composition to containerise
+ context (dict): Asset information
+ loader (str, optional): Name of loader used to produce this container.
+ suffix (str, optional): Suffix of container, defaults to `_CON`.
+
+ Returns:
+ container (str): Name of container assembly
+ """
+ data = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace,
+ "loader": str(loader),
+ "representation": str(context["representation"]["_id"]),
+ "members": comp.members or [comp.id]
+ }
+
+ stub = get_stub()
+ stub.imprint(comp.id, data)
+
+ return comp
+
+
+# created instances section
def list_instances():
"""
List all created instances from current workfile which
@@ -225,16 +225,8 @@ def list_instances():
layers_meta = stub.get_metadata()
for instance in layers_meta:
- if instance.get("schema") and \
- "container" in instance.get("schema"):
- continue
-
- uuid_val = instance.get("uuid")
- if uuid_val:
- instance['uuid'] = uuid_val
- else:
- instance['uuid'] = instance.get("members")[0] # legacy
- instances.append(instance)
+ if instance.get("id") == "pyblish.avalon.instance":
+ instances.append(instance)
return instances
@@ -255,8 +247,60 @@ def remove_instance(instance):
if not stub:
return
- stub.remove_instance(instance.get("uuid"))
- item = stub.get_item(instance.get("uuid"))
- if item:
- stub.rename_item(item.id,
- item.name.replace(stub.PUBLISH_ICON, ''))
+ inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
+ if not inst_id:
+ log.warning("No instance identifier for {}".format(instance))
+ return
+
+ stub.remove_instance(inst_id)
+
+ if instance.get("members"):
+ item = stub.get_item(instance["members"][0])
+ if item:
+ stub.rename_item(item.id,
+ item.name.replace(stub.PUBLISH_ICON, ''))
+
+
+# new publisher section
+def get_context_data():
+ meta = _get_stub().get_metadata()
+ for item in meta:
+ if item.get("id") == "publish_context":
+ item.pop("id")
+ return item
+
+ return {}
+
+
+def update_context_data(data, changes):
+ item = data
+ item["id"] = "publish_context"
+ _get_stub().imprint(item["id"], item)
+
+
+def get_context_title():
+ """Returns title for Creator window"""
+ import avalon.api
+
+ project_name = avalon.api.Session["AVALON_PROJECT"]
+ asset_name = avalon.api.Session["AVALON_ASSET"]
+ task_name = avalon.api.Session["AVALON_TASK"]
+ return "{}/{}/{}".format(project_name, asset_name, task_name)
+
+
+def _get_stub():
+ """
+ Handle pulling stub from PS to run operations on host
+ Returns:
+ (AEServerStub) or None
+ """
+ try:
+ stub = get_stub() # only after Photoshop is up
+ except lib.ConnectionNotEstablishedYet:
+ print("Not connected yet, ignoring")
+ return
+
+ if not stub.get_active_document_name():
+ return
+
+ return stub
diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py
index 70815bda6b..d6c732285a 100644
--- a/openpype/hosts/aftereffects/api/workio.py
+++ b/openpype/hosts/aftereffects/api/workio.py
@@ -51,4 +51,4 @@ def _active_document():
print("Nothing opened")
pass
- return document_name
\ No newline at end of file
+ return document_name
diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py
index b0893310c1..8719a8f46e 100644
--- a/openpype/hosts/aftereffects/api/ws_stub.py
+++ b/openpype/hosts/aftereffects/api/ws_stub.py
@@ -28,6 +28,9 @@ class AEItem(object):
workAreaDuration = attr.ib(default=None)
frameRate = attr.ib(default=None)
file_name = attr.ib(default=None)
+ instance_id = attr.ib(default=None) # New Publisher
+ width = attr.ib(default=None)
+ height = attr.ib(default=None)
class AfterEffectsServerStub():
@@ -110,11 +113,11 @@ class AfterEffectsServerStub():
self.log.debug("Couldn't find layer metadata")
- def imprint(self, item, data, all_items=None, items_meta=None):
+ def imprint(self, item_id, data, all_items=None, items_meta=None):
"""
Save item metadata to Label field of metadata of active document
Args:
- item (AEItem):
+ item_id (int|str): id of FootageItem or instance_id for workfiles
data(string): json representation for single layer
all_items (list of item): for performance, could be
injected for usage in loop, if not, single call will be
@@ -132,8 +135,9 @@ class AfterEffectsServerStub():
is_new = True
for item_meta in items_meta:
- if item_meta.get('members') \
- and str(item.id) == str(item_meta.get('members')[0]):
+ if ((item_meta.get('members') and
+ str(item_id) == str(item_meta.get('members')[0])) or
+ item_meta.get("instance_id") == item_id):
is_new = False
if data:
item_meta.update(data)
@@ -153,10 +157,12 @@ class AfterEffectsServerStub():
item_ids = [int(item.id) for item in all_items]
cleaned_data = []
for meta in result_meta:
- # for creation of instance OR loaded container
- if 'instance' in meta.get('id') or \
- int(meta.get('members')[0]) in item_ids:
- cleaned_data.append(meta)
+ # do not added instance with nonexistend item id
+ if meta.get("members"):
+ if int(meta["members"][0]) not in item_ids:
+ continue
+
+ cleaned_data.append(meta)
payload = json.dumps(cleaned_data, indent=4)
@@ -167,7 +173,7 @@ class AfterEffectsServerStub():
def get_active_document_full_name(self):
"""
- Returns just a name of active document via ws call
+ Returns absolute path of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
@@ -314,15 +320,13 @@ class AfterEffectsServerStub():
Keep matching item in file though.
Args:
- instance_id(string): instance uuid
+ instance_id(string): instance id
"""
cleaned_data = []
for instance in self.get_metadata():
- uuid_val = instance.get("uuid")
- if not uuid_val:
- uuid_val = instance.get("members")[0] # legacy
- if uuid_val != instance_id:
+ inst_id = instance.get("instance_id") or instance.get("uuid")
+ if inst_id != instance_id:
cleaned_data.append(instance)
payload = json.dumps(cleaned_data, indent=4)
@@ -357,7 +361,7 @@ class AfterEffectsServerStub():
item_id (int):
Returns:
- (namedtuple)
+ (AEItem)
"""
res = self.websocketserver.call(self.client.call
@@ -418,7 +422,7 @@ class AfterEffectsServerStub():
""" Get render queue info for render purposes
Returns:
- (namedtuple): with 'file_name' field
+ (AEItem): with 'file_name' field
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_render_info'))
@@ -606,7 +610,10 @@ class AfterEffectsServerStub():
d.get('workAreaStart'),
d.get('workAreaDuration'),
d.get('frameRate'),
- d.get('file_name'))
+ d.get('file_name'),
+ d.get("instance_id"),
+ d.get("width"),
+ d.get("height"))
ret.append(item)
return ret
diff --git a/openpype/hosts/aftereffects/plugins/create/create_local_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py
similarity index 64%
rename from openpype/hosts/aftereffects/plugins/create/create_local_render.py
rename to openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py
index 9d2cdcd7be..04413acbcf 100644
--- a/openpype/hosts/aftereffects/plugins/create/create_local_render.py
+++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py
@@ -1,7 +1,7 @@
-from openpype.hosts.aftereffects.plugins.create import create_render
+from openpype.hosts.aftereffects.plugins.create import create_legacy_render
-class CreateLocalRender(create_render.CreateRender):
+class CreateLocalRender(create_legacy_render.CreateRender):
""" Creator to render locally.
Created only after default render on farm. So family 'render.local' is
diff --git a/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py
new file mode 100644
index 0000000000..e4fbb47a33
--- /dev/null
+++ b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py
@@ -0,0 +1,62 @@
+from openpype.pipeline import create
+from openpype.pipeline import CreatorError
+from openpype.hosts.aftereffects.api import (
+ get_stub,
+ list_instances
+)
+
+
+class CreateRender(create.LegacyCreator):
+ """Render folder for publish.
+
+ Creates subsets in format 'familyTaskSubsetname',
+ eg 'renderCompositingMain'.
+
+ Create only single instance from composition at a time.
+ """
+
+ name = "renderDefault"
+ label = "Render on Farm"
+ family = "render"
+ defaults = ["Main"]
+
+ def process(self):
+ stub = get_stub() # only after After Effects is up
+ items = []
+ if (self.options or {}).get("useSelection"):
+ items = stub.get_selected_items(
+ comps=True, folders=False, footages=False
+ )
+ if len(items) > 1:
+ raise CreatorError(
+ "Please select only single composition at time."
+ )
+
+ if not items:
+ raise CreatorError((
+ "Nothing to create. Select composition "
+ "if 'useSelection' or create at least "
+ "one composition."
+ ))
+
+ existing_subsets = [
+ instance['subset'].lower()
+ for instance in list_instances()
+ ]
+
+ item = items.pop()
+ if self.name.lower() in existing_subsets:
+ txt = "Instance with name \"{}\" already exists.".format(self.name)
+ raise CreatorError(txt)
+
+ self.data["members"] = [item.id]
+ self.data["uuid"] = item.id # for SubsetManager
+ self.data["subset"] = (
+ self.data["subset"]
+ .replace(stub.PUBLISH_ICON, '')
+ .replace(stub.LOADED_ICON, '')
+ )
+
+ stub.imprint(item, self.data)
+ stub.set_label_color(item.id, 14) # Cyan options 0 - 16
+ stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])
diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py
index 831085a5f1..78d43d259a 100644
--- a/openpype/hosts/aftereffects/plugins/create/create_render.py
+++ b/openpype/hosts/aftereffects/plugins/create/create_render.py
@@ -1,38 +1,71 @@
+from avalon import api as avalon_api
+
+from openpype import resources
+from openpype.lib import BoolDef, UISeparatorDef
+from openpype.hosts.aftereffects import api
from openpype.pipeline import (
- CreatorError,
- LegacyCreator
-)
-from openpype.hosts.aftereffects.api import (
- get_stub,
- list_instances
+ Creator,
+ CreatedInstance,
+ CreatorError
)
-class CreateRender(LegacyCreator):
- """Render folder for publish.
-
- Creates subsets in format 'familyTaskSubsetname',
- eg 'renderCompositingMain'.
-
- Create only single instance from composition at a time.
- """
-
- name = "renderDefault"
- label = "Render on Farm"
+class RenderCreator(Creator):
+ identifier = "render"
+ label = "Render"
family = "render"
- defaults = ["Main"]
+ description = "Render creator"
- def process(self):
- stub = get_stub() # only after After Effects is up
- if (self.options or {}).get("useSelection"):
+ create_allow_context_change = True
+
+ def __init__(
+ self, create_context, system_settings, project_settings, headless=False
+ ):
+ super(RenderCreator, self).__init__(create_context, system_settings,
+ project_settings, headless)
+ self._default_variants = (project_settings["aftereffects"]
+ ["create"]
+ ["RenderCreator"]
+ ["defaults"])
+
+ def get_icon(self):
+ return resources.get_openpype_splash_filepath()
+
+ def collect_instances(self):
+ for instance_data in api.list_instances():
+ # legacy instances have family=='render' or 'renderLocal', use them
+ creator_id = (instance_data.get("creator_identifier") or
+ instance_data.get("family", '').replace("Local", ''))
+ if creator_id == self.identifier:
+ instance_data = self._handle_legacy(instance_data)
+ instance = CreatedInstance.from_existing(
+ instance_data, self
+ )
+ self._add_instance_to_context(instance)
+
+ def update_instances(self, update_list):
+ for created_inst, _changes in update_list:
+ api.get_stub().imprint(created_inst.get("instance_id"),
+ created_inst.data_to_store())
+
+ def remove_instances(self, instances):
+ for instance in instances:
+ api.remove_instance(instance)
+ self._remove_instance_from_context(instance)
+
+ def create(self, subset_name, data, pre_create_data):
+ stub = api.get_stub() # only after After Effects is up
+ if pre_create_data.get("use_selection"):
items = stub.get_selected_items(
comps=True, folders=False, footages=False
)
+ else:
+ items = stub.get_items(comps=True, folders=False, footages=False)
+
if len(items) > 1:
raise CreatorError(
"Please select only single composition at time."
)
-
if not items:
raise CreatorError((
"Nothing to create. Select composition "
@@ -40,24 +73,54 @@ class CreateRender(LegacyCreator):
"one composition."
))
- existing_subsets = [
- instance['subset'].lower()
- for instance in list_instances()
+ for inst in self.create_context.instances:
+ if subset_name == inst.subset_name:
+ raise CreatorError("{} already exists".format(
+ inst.subset_name))
+
+ data["members"] = [items[0].id]
+ new_instance = CreatedInstance(self.family, subset_name, data, self)
+ if "farm" in pre_create_data:
+ use_farm = pre_create_data["farm"]
+ new_instance.creator_attributes["farm"] = use_farm
+
+ api.get_stub().imprint(new_instance.id,
+ new_instance.data_to_store())
+ self._add_instance_to_context(new_instance)
+
+ def get_default_variants(self):
+ return self._default_variants
+
+ def get_instance_attr_defs(self):
+ return [BoolDef("farm", label="Render on farm")]
+
+ def get_pre_create_attr_defs(self):
+ output = [
+ BoolDef("use_selection", default=True, label="Use selection"),
+ UISeparatorDef(),
+ BoolDef("farm", label="Render on farm")
]
+ return output
- item = items.pop()
- if self.name.lower() in existing_subsets:
- txt = "Instance with name \"{}\" already exists.".format(self.name)
- raise CreatorError(txt)
+ def get_detail_description(self):
+ return """Creator for Render instances"""
- self.data["members"] = [item.id]
- self.data["uuid"] = item.id # for SubsetManager
- self.data["subset"] = (
- self.data["subset"]
- .replace(stub.PUBLISH_ICON, '')
- .replace(stub.LOADED_ICON, '')
- )
+ def _handle_legacy(self, instance_data):
+ """Converts old instances to new format."""
+ if not instance_data.get("members"):
+ instance_data["members"] = [instance_data.get("uuid")]
- stub.imprint(item, self.data)
- stub.set_label_color(item.id, 14) # Cyan options 0 - 16
- stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])
+ if instance_data.get("uuid"):
+ # uuid not needed, replaced with unique instance_id
+ api.get_stub().remove_instance(instance_data.get("uuid"))
+ instance_data.pop("uuid")
+
+ if not instance_data.get("task"):
+ instance_data["task"] = avalon_api.Session.get("AVALON_TASK")
+
+ if not instance_data.get("creator_attributes"):
+ is_old_farm = instance_data["family"] != "renderLocal"
+ instance_data["creator_attributes"] = {"farm": is_old_farm}
+ instance_data["family"] = self.family
+
+ return instance_data
diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
new file mode 100644
index 0000000000..2d9d42ee8c
--- /dev/null
+++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py
@@ -0,0 +1,75 @@
+from avalon import io
+
+import openpype.hosts.aftereffects.api as api
+from openpype.pipeline import (
+ AutoCreator,
+ CreatedInstance
+)
+
+
+class AEWorkfileCreator(AutoCreator):
+ identifier = "workfile"
+ family = "workfile"
+
+ def get_instance_attr_defs(self):
+ return []
+
+ def collect_instances(self):
+ for instance_data in api.list_instances():
+ creator_id = instance_data.get("creator_identifier")
+ if creator_id == self.identifier:
+ subset_name = instance_data["subset"]
+ instance = CreatedInstance(
+ self.family, subset_name, instance_data, self
+ )
+ self._add_instance_to_context(instance)
+
+ def update_instances(self, update_list):
+ # nothing to change on workfiles
+ pass
+
+ def create(self, options=None):
+ existing_instance = None
+ for instance in self.create_context.instances:
+ if instance.family == self.family:
+ existing_instance = instance
+ break
+
+ variant = ''
+ project_name = io.Session["AVALON_PROJECT"]
+ asset_name = io.Session["AVALON_ASSET"]
+ task_name = io.Session["AVALON_TASK"]
+ host_name = io.Session["AVALON_APP"]
+
+ if existing_instance is None:
+ asset_doc = io.find_one({"type": "asset", "name": asset_name})
+ subset_name = self.get_subset_name(
+ variant, task_name, asset_doc, project_name, host_name
+ )
+ data = {
+ "asset": asset_name,
+ "task": task_name,
+ "variant": variant
+ }
+ data.update(self.get_dynamic_data(
+ variant, task_name, asset_doc, project_name, host_name
+ ))
+
+ new_instance = CreatedInstance(
+ self.family, subset_name, data, self
+ )
+ self._add_instance_to_context(new_instance)
+
+ api.get_stub().imprint(new_instance.get("instance_id"),
+ new_instance.data_to_store())
+
+ elif (
+ existing_instance["asset"] != asset_name
+ or existing_instance["task"] != task_name
+ ):
+ asset_doc = io.find_one({"type": "asset", "name": asset_name})
+ subset_name = self.get_subset_name(
+ variant, task_name, asset_doc, project_name, host_name
+ )
+ existing_instance["asset"] = asset_name
+ existing_instance["task"] = task_name
diff --git a/openpype/hosts/aftereffects/plugins/load/load_background.py b/openpype/hosts/aftereffects/plugins/load/load_background.py
index be43cae44e..d346df504a 100644
--- a/openpype/hosts/aftereffects/plugins/load/load_background.py
+++ b/openpype/hosts/aftereffects/plugins/load/load_background.py
@@ -90,7 +90,7 @@ class BackgroundLoader(AfterEffectsLoader):
container["namespace"] = comp_name
container["members"] = comp.members
- stub.imprint(comp, container)
+ stub.imprint(comp.id, container)
def remove(self, container):
"""
@@ -99,10 +99,9 @@ class BackgroundLoader(AfterEffectsLoader):
Args:
container (dict): container to be removed - used to get layer_id
"""
- print("!!!! container:: {}".format(container))
stub = self.get_stub()
layer = container.pop("layer")
- stub.imprint(layer, {})
+ stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
diff --git a/openpype/hosts/aftereffects/plugins/load/load_file.py b/openpype/hosts/aftereffects/plugins/load/load_file.py
index 9eb9e80a2c..6ab69c6bfa 100644
--- a/openpype/hosts/aftereffects/plugins/load/load_file.py
+++ b/openpype/hosts/aftereffects/plugins/load/load_file.py
@@ -96,9 +96,9 @@ class FileLoader(AfterEffectsLoader):
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
- layer, {"representation": str(representation["_id"]),
- "name": context["subset"],
- "namespace": layer_name}
+ layer.id, {"representation": str(representation["_id"]),
+ "name": context["subset"],
+ "namespace": layer_name}
)
def remove(self, container):
@@ -109,7 +109,7 @@ class FileLoader(AfterEffectsLoader):
"""
stub = self.get_stub()
layer = container.pop("layer")
- stub.imprint(layer, {})
+ stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py
index 80679725e6..8647ba498b 100644
--- a/openpype/hosts/aftereffects/plugins/publish/collect_audio.py
+++ b/openpype/hosts/aftereffects/plugins/publish/collect_audio.py
@@ -17,12 +17,11 @@ class CollectAudio(pyblish.api.ContextPlugin):
def process(self, context):
for instance in context:
- if instance.data["family"] == 'render.farm':
+ if 'render.farm' in instance.data.get("families", []):
comp_id = instance.data["comp_id"]
if not comp_id:
self.log.debug("No comp_id filled in instance")
- # @iLLiCiTiT QUESTION Should return or continue?
- return
+ continue
context.data["audioFile"] = os.path.normpath(
get_stub().get_audio_url(comp_id)
).replace("\\", "/")
diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py
index 3e44acd7e9..fa23bf92b0 100644
--- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py
+++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py
@@ -21,135 +21,129 @@ class AERenderInstance(RenderInstance):
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
+ publish_attributes = attr.ib(default=None)
+ file_name = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
- order = pyblish.api.CollectorOrder + 0.400
+ order = pyblish.api.CollectorOrder + 0.405
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
- # internal
- family_remapping = {
- "render": ("render.farm", "farm"), # (family, label)
- "renderLocal": ("render", "local")
- }
padding_width = 6
rendered_extension = 'png'
- stub = get_stub()
+ _stub = None
+
+ @classmethod
+ def get_stub(cls):
+ if not cls._stub:
+ cls._stub = get_stub()
+ return cls._stub
def get_instances(self, context):
instances = []
+ instances_to_remove = []
- app_version = self.stub.get_app_version()
+ app_version = CollectAERender.get_stub().get_app_version()
app_version = app_version[0:4]
current_file = context.data["currentFile"]
version = context.data["version"]
- asset_entity = context.data["assetEntity"]
+
project_entity = context.data["projectEntity"]
- compositions = self.stub.get_items(True)
+ compositions = CollectAERender.get_stub().get_items(True)
compositions_by_id = {item.id: item for item in compositions}
- for inst in self.stub.get_metadata():
- schema = inst.get('schema')
- # loaded asset container skip it
- if schema and 'container' in schema:
+ for inst in context:
+ if not inst.data.get("active", True):
continue
- if not inst["members"]:
- raise ValueError("Couldn't find id, unable to publish. " +
- "Please recreate instance.")
- item_id = inst["members"][0]
+ family = inst.data["family"]
+ if family not in ["render", "renderLocal"]: # legacy
+ continue
- work_area_info = self.stub.get_work_area(int(item_id))
+ item_id = inst.data["members"][0]
+
+ work_area_info = CollectAERender.get_stub().get_work_area(
+ int(item_id))
if not work_area_info:
self.log.warning("Orphaned instance, deleting metadata")
- self.stub.remove_instance(int(item_id))
+ inst_id = inst.get("instance_id") or item_id
+ CollectAERender.get_stub().remove_instance(inst_id)
continue
- frameStart = work_area_info.workAreaStart
-
- frameEnd = round(work_area_info.workAreaStart +
- float(work_area_info.workAreaDuration) *
- float(work_area_info.frameRate)) - 1
+ frame_start = work_area_info.workAreaStart
+ frame_end = round(work_area_info.workAreaStart +
+ float(work_area_info.workAreaDuration) *
+ float(work_area_info.frameRate)) - 1
fps = work_area_info.frameRate
# TODO add resolution when supported by extension
- if inst["family"] in self.family_remapping.keys() \
- and inst["active"]:
- remapped_family = self.family_remapping[inst["family"]]
- instance = AERenderInstance(
- family=remapped_family[0],
- families=[remapped_family[0]],
- version=version,
- time="",
- source=current_file,
- label="{} - {}".format(inst["subset"], remapped_family[1]),
- subset=inst["subset"],
- asset=context.data["assetEntity"]["name"],
- attachTo=False,
- setMembers='',
- publish=True,
- renderer='aerender',
- name=inst["subset"],
- resolutionWidth=asset_entity["data"].get(
- "resolutionWidth",
- project_entity["data"]["resolutionWidth"]),
- resolutionHeight=asset_entity["data"].get(
- "resolutionHeight",
- project_entity["data"]["resolutionHeight"]),
- pixelAspect=1,
- tileRendering=False,
- tilesX=0,
- tilesY=0,
- frameStart=frameStart,
- frameEnd=frameEnd,
- frameStep=1,
- toBeRenderedOn='deadline',
- fps=fps,
- app_version=app_version
- )
+ task_name = inst.data.get("task") # legacy
- comp = compositions_by_id.get(int(item_id))
- if not comp:
- raise ValueError("There is no composition for item {}".
- format(item_id))
- instance.comp_name = comp.name
- instance.comp_id = item_id
- instance._anatomy = context.data["anatomy"]
- instance.anatomyData = context.data["anatomyData"]
+ render_q = CollectAERender.get_stub().get_render_info()
+ if not render_q:
+ raise ValueError("No file extension set in Render Queue")
- instance.outputDir = self._get_output_dir(instance)
- instance.context = context
+ subset_name = inst.data["subset"]
+ instance = AERenderInstance(
+ family=family,
+ families=inst.data.get("families", []),
+ version=version,
+ time="",
+ source=current_file,
+ label="{} - {}".format(subset_name, family),
+ subset=subset_name,
+ asset=inst.data["asset"],
+ task=task_name,
+ attachTo=False,
+ setMembers='',
+ publish=True,
+ renderer='aerender',
+ name=subset_name,
+ resolutionWidth=render_q.width,
+ resolutionHeight=render_q.height,
+ pixelAspect=1,
+ tileRendering=False,
+ tilesX=0,
+ tilesY=0,
+ frameStart=frame_start,
+ frameEnd=frame_end,
+ frameStep=1,
+ toBeRenderedOn='deadline',
+ fps=fps,
+ app_version=app_version,
+ publish_attributes=inst.data.get("publish_attributes"),
+ file_name=render_q.file_name
+ )
- settings = get_project_settings(os.getenv("AVALON_PROJECT"))
- reviewable_subset_filter = \
- (settings["deadline"]
- ["publish"]
- ["ProcessSubmittedJobOnFarm"]
- ["aov_filter"])
+ comp = compositions_by_id.get(int(item_id))
+ if not comp:
+ raise ValueError("There is no composition for item {}".
+ format(item_id))
+ instance.outputDir = self._get_output_dir(instance)
+ instance.comp_name = comp.name
+ instance.comp_id = item_id
- if inst["family"] == "renderLocal":
- # for local renders
- instance.anatomyData["version"] = instance.version
- instance.anatomyData["subset"] = instance.subset
- instance.stagingDir = tempfile.mkdtemp()
- instance.projectEntity = project_entity
+ is_local = "renderLocal" in inst.data["family"] # legacy
+ if inst.data.get("creator_attributes"):
+ is_local = not inst.data["creator_attributes"].get("farm")
+ if is_local:
+ # for local renders
+ instance = self._update_for_local(instance, project_entity)
+ else:
+ fam = "render.farm"
+ if fam not in instance.families:
+ instance.families.append(fam)
- if self.hosts[0] in reviewable_subset_filter.keys():
- for aov_pattern in \
- reviewable_subset_filter[self.hosts[0]]:
- if re.match(aov_pattern, instance.subset):
- instance.families.append("review")
- instance.review = True
- break
-
- self.log.info("New instance:: {}".format(instance))
- instances.append(instance)
+ instances.append(instance)
+ instances_to_remove.append(inst)
+ for instance in instances_to_remove:
+ context.remove(instance)
return instances
def get_expected_files(self, render_instance):
@@ -168,15 +162,11 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
start = render_instance.frameStart
end = render_instance.frameEnd
- # pull file name from Render Queue Output module
- render_q = self.stub.get_render_info()
- if not render_q:
- raise ValueError("No file extension set in Render Queue")
- _, ext = os.path.splitext(os.path.basename(render_q.file_name))
+ _, ext = os.path.splitext(os.path.basename(render_instance.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
- if "#" not in render_q.file_name: # single frame (mov)W
+ if "#" not in render_instance.file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.asset,
render_instance.subset,
@@ -216,3 +206,24 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
# for submit_publish_job
return base_dir
+
+ def _update_for_local(self, instance, project_entity):
+ """Update old saved instances to current publishing format"""
+ instance.stagingDir = tempfile.mkdtemp()
+ instance.projectEntity = project_entity
+ fam = "render.local"
+ if fam not in instance.families:
+ instance.families.append(fam)
+
+ settings = get_project_settings(os.getenv("AVALON_PROJECT"))
+ reviewable_subset_filter = (settings["deadline"]
+ ["publish"]
+ ["ProcessSubmittedJobOnFarm"]
+ ["aov_filter"].get(self.hosts[0]))
+ for aov_pattern in reviewable_subset_filter:
+ if re.match(aov_pattern, instance.subset):
+ instance.families.append("review")
+ instance.review = True
+ break
+
+ return instance
diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
index 21a0cd7a1b..06b73f4b5d 100644
--- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py
@@ -11,15 +11,45 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
+ existing_instance = None
+ for instance in context:
+ if instance.data["family"] == "workfile":
+ self.log.debug("Workfile instance found, won't create new")
+ existing_instance = instance
+ break
+
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
+ if existing_instance is None: # old publish
+ instance = self._get_new_instance(context, scene_file)
+ else:
+ instance = existing_instance
+
+ # creating representation
+ representation = {
+ 'name': 'aep',
+ 'ext': 'aep',
+ 'files': scene_file,
+ "stagingDir": staging_dir,
+ }
+
+ if not instance.data.get("representations"):
+ instance.data["representations"] = []
+ instance.data["representations"].append(representation)
+
+ instance.data["publish"] = instance.data["active"] # for DL
+
+ def _get_new_instance(self, context, scene_file):
+ task = api.Session["AVALON_TASK"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
- shared_instance_data = {
+ instance_data = {
+ "active": True,
"asset": asset_entity["name"],
+ "task": task,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
@@ -58,20 +88,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"representations": list()
})
- # adding basic script data
- instance.data.update(shared_instance_data)
+ instance.data.update(instance_data)
- # creating representation
- representation = {
- 'name': 'aep',
- 'ext': 'aep',
- 'files': scene_file,
- "stagingDir": staging_dir,
- }
-
- instance.data["representations"].append(representation)
-
- self.log.info('Publishing After Effects workfile')
-
- for i in context:
- self.log.debug(f"{i.data['families']}")
+ return instance
diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
index b738068a7b..7323a0b125 100644
--- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
+++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py
@@ -12,7 +12,7 @@ class ExtractLocalRender(openpype.api.Extractor):
order = openpype.api.Extractor.order - 0.47
label = "Extract Local Render"
hosts = ["aftereffects"]
- families = ["render"]
+ families = ["renderLocal", "render.local"]
def process(self, instance):
stub = get_stub()
diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py
index e20598b311..eb2977309f 100644
--- a/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py
+++ b/openpype/hosts/aftereffects/plugins/publish/extract_save_scene.py
@@ -1,15 +1,16 @@
+import pyblish.api
+
import openpype.api
from openpype.hosts.aftereffects.api import get_stub
-class ExtractSaveScene(openpype.api.Extractor):
+class ExtractSaveScene(pyblish.api.ContextPlugin):
"""Save scene before extraction."""
order = openpype.api.Extractor.order - 0.48
label = "Extract Save Scene"
hosts = ["aftereffects"]
- families = ["workfile"]
- def process(self, instance):
+ def process(self, context):
stub = get_stub()
stub.save()
diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml
index 36fa90456e..0591020ed3 100644
--- a/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml
+++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_scene_settings.xml
@@ -12,6 +12,8 @@ One of the settings in a scene doesn't match to asset settings in database.
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
+
+ In the scene it is right mouse click on published composition > `Composition Settings`.
### __Detailed Info__ (optional)
diff --git a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py
new file mode 100644
index 0000000000..03ec184524
--- /dev/null
+++ b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py
@@ -0,0 +1,54 @@
+import json
+import pyblish.api
+from openpype.hosts.aftereffects.api import list_instances
+
+
+class PreCollectRender(pyblish.api.ContextPlugin):
+ """
+ Checks if render instance is of old type, adds to families to both
+ existing collectors work same way.
+
+ Could be removed in the future when no one uses old publish.
+ """
+
+ label = "PreCollect Render"
+ order = pyblish.api.CollectorOrder + 0.400
+ hosts = ["aftereffects"]
+
+ family_remapping = {
+ "render": ("render.farm", "farm"), # (family, label)
+ "renderLocal": ("render.local", "local")
+ }
+
+ def process(self, context):
+ if context.data.get("newPublishing"):
+ self.log.debug("Not applicable for New Publisher, skip")
+ return
+
+ for inst in list_instances():
+ if inst.get("creator_attributes"):
+ raise ValueError("Instance created in New publisher, "
+ "cannot be published in Pyblish.\n"
+ "Please publish in New Publisher "
+ "or recreate instances with legacy Creators")
+
+ if inst["family"] not in self.family_remapping.keys():
+ continue
+
+ if not inst["members"]:
+ raise ValueError("Couldn't find id, unable to publish. " +
+ "Please recreate instance.")
+
+ instance = context.create_instance(inst["subset"])
+ inst["families"] = [self.family_remapping[inst["family"]][0]]
+ instance.data.update(inst)
+
+ self._debug_log(instance)
+
+ def _debug_log(self, instance):
+ def _default_json(value):
+ return str(value)
+
+ self.log.info(
+ json.dumps(instance.data, indent=4, default=_default_json)
+ )
diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
index 1a303f5da4..7a9356f020 100644
--- a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
+++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py
@@ -31,7 +31,7 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
data = stub.read(instance[0])
data["asset"] = legacy_io.Session["AVALON_ASSET"]
- stub.imprint(instance[0], data)
+ stub.imprint(instance[0].instance_id, data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
index 273ccd295e..14e224fdc2 100644
--- a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
+++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py
@@ -5,11 +5,15 @@ import re
import pyblish.api
-from openpype.pipeline import PublishXmlValidationError
+from openpype.pipeline import (
+ PublishXmlValidationError,
+ OptionalPyblishPluginMixin
+)
from openpype.hosts.aftereffects.api import get_asset_settings
-class ValidateSceneSettings(pyblish.api.InstancePlugin):
+class ValidateSceneSettings(OptionalPyblishPluginMixin,
+ pyblish.api.InstancePlugin):
"""
Ensures that Composition Settings (right mouse on comp) are same as
in FTrack on task.
@@ -59,15 +63,20 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
def process(self, instance):
"""Plugin entry point."""
+ # Skip the instance if is not active by data on the instance
+ if not self.is_active(instance.data):
+ return
+
expected_settings = get_asset_settings()
self.log.info("config from DB::{}".format(expected_settings))
- if any(re.search(pattern, os.getenv('AVALON_TASK'))
+ task_name = instance.data["anatomyData"]["task"]["name"]
+ if any(re.search(pattern, task_name)
for pattern in self.skip_resolution_check):
expected_settings.pop("resolutionWidth")
expected_settings.pop("resolutionHeight")
- if any(re.search(pattern, os.getenv('AVALON_TASK'))
+ if any(re.search(pattern, task_name)
for pattern in self.skip_timelines_check):
expected_settings.pop('fps', None)
expected_settings.pop('frameStart', None)
@@ -87,10 +96,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
duration = instance.data.get("frameEndHandle") - \
instance.data.get("frameStartHandle") + 1
- self.log.debug("filtered config::{}".format(expected_settings))
+ self.log.debug("validated items::{}".format(expected_settings))
current_settings = {
"fps": fps,
+ "frameStart": instance.data.get("frameStart"),
+ "frameEnd": instance.data.get("frameEnd"),
+ "handleStart": instance.data.get("handleStart"),
+ "handleEnd": instance.data.get("handleEnd"),
"frameStartHandle": instance.data.get("frameStartHandle"),
"frameEndHandle": instance.data.get("frameEndHandle"),
"resolutionWidth": instance.data.get("resolutionWidth"),
@@ -103,24 +116,22 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
invalid_keys = set()
for key, value in expected_settings.items():
if value != current_settings[key]:
- invalid_settings.append(
- "{} expected: {} found: {}".format(key, value,
- current_settings[key])
- )
+ msg = "'{}' expected: '{}' found: '{}'".format(
+ key, value, current_settings[key])
+
+ if key == "duration" and expected_settings.get("handleStart"):
+ msg += "Handles included in calculation. Remove " \
+ "handles in DB or extend frame range in " \
+ "Composition Setting."
+
+ invalid_settings.append(msg)
invalid_keys.add(key)
- if ((expected_settings.get("handleStart")
- or expected_settings.get("handleEnd"))
- and invalid_settings):
- msg = "Handles included in calculation. Remove handles in DB " +\
- "or extend frame range in Composition Setting."
- invalid_settings[-1]["reason"] = msg
-
- msg = "Found invalid settings:\n{}".format(
- "\n".join(invalid_settings)
- )
-
if invalid_settings:
+ msg = "Found invalid settings:\n{}".format(
+ "\n".join(invalid_settings)
+ )
+
invalid_keys_str = ",".join(invalid_keys)
break_str = " "
invalid_setting_str = "Found invalid settings: {}".\
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 7b8a3dc46c..603519069a 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -159,7 +159,7 @@ def validate_fps():
if parent is None:
pass
else:
- dialog = popup.Popup(parent=parent)
+ dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Houdini scene does not match project FPS")
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
@@ -167,7 +167,7 @@ def validate_fps():
dialog.setButtonText("Fix")
# on_show is the Fix button clicked callback
- dialog.on_clicked.connect(lambda: set_scene_fps(fps))
+ dialog.on_clicked_state.connect(lambda: set_scene_fps(fps))
dialog.show()
diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py
index cf09c39b21..088304ab05 100644
--- a/openpype/hosts/maya/api/lib.py
+++ b/openpype/hosts/maya/api/lib.py
@@ -2226,15 +2226,17 @@ def validate_fps():
parent = get_main_window()
- dialog = popup.Popup2(parent=parent)
+ dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
- dialog.setWindowTitle("Maya scene not in line with project")
- dialog.setMessage("The FPS is out of sync, please fix")
+ dialog.setWindowTitle("Maya scene does not match project FPS")
+ dialog.setMessage("Scene %i FPS does not match project %i FPS" %
+ (current_fps, fps))
+ dialog.setButtonText("Fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
update = toggle.isChecked()
- dialog.on_show.connect(lambda: set_scene_fps(fps, update))
+ dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
dialog.show()
diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py
index dd05bfbb21..b0e8fac635 100644
--- a/openpype/hosts/maya/api/pipeline.py
+++ b/openpype/hosts/maya/api/pipeline.py
@@ -448,7 +448,7 @@ def on_open():
dialog.setWindowTitle("Maya scene has outdated content")
dialog.setMessage("There are outdated containers in "
"your Maya scene.")
- dialog.on_show.connect(_on_show_inventory)
+ dialog.on_clicked.connect(_on_show_inventory)
dialog.show()
diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py
index eafb707249..4e38f811c9 100644
--- a/openpype/hosts/nuke/api/lib.py
+++ b/openpype/hosts/nuke/api/lib.py
@@ -1063,6 +1063,14 @@ def add_deadline_tab(node):
knob.setValue(0)
node.addKnob(knob)
+ knob = nuke.Text_Knob("divd", '')
+ knob.setValue('')
+ node.addKnob(knob)
+
+ knob = nuke.Boolean_Knob("suspend_publish", "Suspend publish")
+ knob.setValue(False)
+ node.addKnob(knob)
+
def get_deadline_knob_names():
return [
diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py
index 3ac750a48f..eaf0ab6911 100644
--- a/openpype/hosts/nuke/api/plugin.py
+++ b/openpype/hosts/nuke/api/plugin.py
@@ -1,6 +1,8 @@
import os
import random
import string
+from collections import OrderedDict
+from abc import abstractmethod
import nuke
@@ -594,3 +596,139 @@ class ExporterReviewMov(ExporterReview):
nuke.scriptSave()
return self.data
+
+
+class AbstractWriteRender(OpenPypeCreator):
+ """Abstract creator to gather similar implementation for Write creators"""
+ name = ""
+ label = ""
+ hosts = ["nuke"]
+ n_class = "Write"
+ family = "render"
+ icon = "sign-out"
+ defaults = ["Main", "Mask"]
+
+ def __init__(self, *args, **kwargs):
+ super(AbstractWriteRender, self).__init__(*args, **kwargs)
+
+ data = OrderedDict()
+
+ data["family"] = self.family
+ data["families"] = self.n_class
+
+ for k, v in self.data.items():
+ if k not in data.keys():
+ data.update({k: v})
+
+ self.data = data
+ self.nodes = nuke.selectedNodes()
+ self.log.debug("_ self.data: '{}'".format(self.data))
+
+ def process(self):
+
+ inputs = []
+ outputs = []
+ instance = nuke.toNode(self.data["subset"])
+ selected_node = None
+
+ # use selection
+ if (self.options or {}).get("useSelection"):
+ nodes = self.nodes
+
+ if not (len(nodes) < 2):
+ msg = ("Select only one node. "
+ "The node you want to connect to, "
+ "or tick off `Use selection`")
+ self.log.error(msg)
+ nuke.message(msg)
+ return
+
+ if len(nodes) == 0:
+ msg = (
+ "No nodes selected. Please select a single node to connect"
+ " to or tick off `Use selection`"
+ )
+ self.log.error(msg)
+ nuke.message(msg)
+ return
+
+ selected_node = nodes[0]
+ inputs = [selected_node]
+ outputs = selected_node.dependent()
+
+ if instance:
+ if (instance.name() in selected_node.name()):
+ selected_node = instance.dependencies()[0]
+
+ # if node already exist
+ if instance:
+ # collect input / outputs
+ inputs = instance.dependencies()
+ outputs = instance.dependent()
+ selected_node = inputs[0]
+ # remove old one
+ nuke.delete(instance)
+
+ # recreate new
+ write_data = {
+ "nodeclass": self.n_class,
+ "families": [self.family],
+ "avalon": self.data
+ }
+
+ # add creator data
+ creator_data = {"creator": self.__class__.__name__}
+ self.data.update(creator_data)
+ write_data.update(creator_data)
+
+ if self.presets.get('fpath_template'):
+ self.log.info("Adding template path from preset")
+ write_data.update(
+ {"fpath_template": self.presets["fpath_template"]}
+ )
+ else:
+ self.log.info("Adding template path from plugin")
+ write_data.update({
+ "fpath_template":
+ ("{work}/" + self.family + "s/nuke/{subset}"
+ "/{subset}.{frame}.{ext}")})
+
+ write_node = self._create_write_node(selected_node,
+ inputs, outputs,
+ write_data)
+
+ # relinking to collected connections
+ for i, input in enumerate(inputs):
+ write_node.setInput(i, input)
+
+ write_node.autoplace()
+
+ for output in outputs:
+ output.setInput(0, write_node)
+
+ write_node = self._modify_write_node(write_node)
+
+ return write_node
+
+ @abstractmethod
+ def _create_write_node(self, selected_node, inputs, outputs, write_data):
+ """Family dependent implementation of Write node creation
+
+ Args:
+ selected_node (nuke.Node)
+ inputs (list of nuke.Node) - input dependencies (what is connected)
+ outputs (list of nuke.Node) - output dependencies
+ write_data (dict) - values used to fill Knobs
+ Returns:
+ node (nuke.Node): group node with data as Knobs
+ """
+ pass
+
+ @abstractmethod
+ def _modify_write_node(self, write_node):
+ """Family dependent modification of created 'write_node'
+
+ Returns:
+ node (nuke.Node): group node with data as Knobs
+ """
+ pass
diff --git a/openpype/hosts/nuke/plugins/__init__.py b/openpype/hosts/nuke/plugins/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/hosts/nuke/plugins/create/__init__.py b/openpype/hosts/nuke/plugins/create/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py
index 761439fdb2..7297f74c13 100644
--- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py
+++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py
@@ -1,12 +1,10 @@
-from collections import OrderedDict
-
import nuke
from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import create_write_node
-class CreateWritePrerender(plugin.OpenPypeCreator):
+class CreateWritePrerender(plugin.AbstractWriteRender):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
@@ -19,85 +17,7 @@ class CreateWritePrerender(plugin.OpenPypeCreator):
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
- data = OrderedDict()
-
- data["family"] = self.family
- data["families"] = self.n_class
-
- for k, v in self.data.items():
- if k not in data.keys():
- data.update({k: v})
-
- self.data = data
- self.nodes = nuke.selectedNodes()
- self.log.debug("_ self.data: '{}'".format(self.data))
-
- def process(self):
- inputs = []
- outputs = []
- instance = nuke.toNode(self.data["subset"])
- selected_node = None
-
- # use selection
- if (self.options or {}).get("useSelection"):
- nodes = self.nodes
-
- if not (len(nodes) < 2):
- msg = ("Select only one node. The node "
- "you want to connect to, "
- "or tick off `Use selection`")
- self.log.error(msg)
- nuke.message(msg)
-
- if len(nodes) == 0:
- msg = (
- "No nodes selected. Please select a single node to connect"
- " to or tick off `Use selection`"
- )
- self.log.error(msg)
- nuke.message(msg)
-
- selected_node = nodes[0]
- inputs = [selected_node]
- outputs = selected_node.dependent()
-
- if instance:
- if (instance.name() in selected_node.name()):
- selected_node = instance.dependencies()[0]
-
- # if node already exist
- if instance:
- # collect input / outputs
- inputs = instance.dependencies()
- outputs = instance.dependent()
- selected_node = inputs[0]
- # remove old one
- nuke.delete(instance)
-
- # recreate new
- write_data = {
- "nodeclass": self.n_class,
- "families": [self.family],
- "avalon": self.data
- }
-
- # add creator data
- creator_data = {"creator": self.__class__.__name__}
- self.data.update(creator_data)
- write_data.update(creator_data)
-
- if self.presets.get('fpath_template'):
- self.log.info("Adding template path from preset")
- write_data.update(
- {"fpath_template": self.presets["fpath_template"]}
- )
- else:
- self.log.info("Adding template path from plugin")
- write_data.update({
- "fpath_template": ("{work}/prerenders/nuke/{subset}"
- "/{subset}.{frame}.{ext}")})
-
- self.log.info("write_data: {}".format(write_data))
+ def _create_write_node(self, selected_node, inputs, outputs, write_data):
reviewable = self.presets.get("reviewable")
write_node = create_write_node(
self.data["subset"],
@@ -107,15 +27,9 @@ class CreateWritePrerender(plugin.OpenPypeCreator):
review=reviewable,
linked_knobs=["channels", "___", "first", "last", "use_limit"])
- # relinking to collected connections
- for i, input in enumerate(inputs):
- write_node.setInput(i, input)
-
- write_node.autoplace()
-
- for output in outputs:
- output.setInput(0, write_node)
+ return write_node
+ def _modify_write_node(self, write_node):
# open group node
write_node.begin()
for n in nuke.allNodes():
diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py
index a9c4b5341e..18a101546f 100644
--- a/openpype/hosts/nuke/plugins/create/create_write_render.py
+++ b/openpype/hosts/nuke/plugins/create/create_write_render.py
@@ -1,12 +1,10 @@
-from collections import OrderedDict
-
import nuke
from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import create_write_node
-class CreateWriteRender(plugin.OpenPypeCreator):
+class CreateWriteRender(plugin.AbstractWriteRender):
# change this to template preset
name = "WriteRender"
label = "Create Write Render"
@@ -19,87 +17,7 @@ class CreateWriteRender(plugin.OpenPypeCreator):
def __init__(self, *args, **kwargs):
super(CreateWriteRender, self).__init__(*args, **kwargs)
- data = OrderedDict()
-
- data["family"] = self.family
- data["families"] = self.n_class
-
- for k, v in self.data.items():
- if k not in data.keys():
- data.update({k: v})
-
- self.data = data
- self.nodes = nuke.selectedNodes()
- self.log.debug("_ self.data: '{}'".format(self.data))
-
- def process(self):
-
- inputs = []
- outputs = []
- instance = nuke.toNode(self.data["subset"])
- selected_node = None
-
- # use selection
- if (self.options or {}).get("useSelection"):
- nodes = self.nodes
-
- if not (len(nodes) < 2):
- msg = ("Select only one node. "
- "The node you want to connect to, "
- "or tick off `Use selection`")
- self.log.error(msg)
- nuke.message(msg)
- return
-
- if len(nodes) == 0:
- msg = (
- "No nodes selected. Please select a single node to connect"
- " to or tick off `Use selection`"
- )
- self.log.error(msg)
- nuke.message(msg)
- return
-
- selected_node = nodes[0]
- inputs = [selected_node]
- outputs = selected_node.dependent()
-
- if instance:
- if (instance.name() in selected_node.name()):
- selected_node = instance.dependencies()[0]
-
- # if node already exist
- if instance:
- # collect input / outputs
- inputs = instance.dependencies()
- outputs = instance.dependent()
- selected_node = inputs[0]
- # remove old one
- nuke.delete(instance)
-
- # recreate new
- write_data = {
- "nodeclass": self.n_class,
- "families": [self.family],
- "avalon": self.data
- }
-
- # add creator data
- creator_data = {"creator": self.__class__.__name__}
- self.data.update(creator_data)
- write_data.update(creator_data)
-
- if self.presets.get('fpath_template'):
- self.log.info("Adding template path from preset")
- write_data.update(
- {"fpath_template": self.presets["fpath_template"]}
- )
- else:
- self.log.info("Adding template path from plugin")
- write_data.update({
- "fpath_template": ("{work}/renders/nuke/{subset}"
- "/{subset}.{frame}.{ext}")})
-
+ def _create_write_node(self, selected_node, inputs, outputs, write_data):
# add reformat node to cut off all outside of format bounding box
# get width and height
try:
@@ -126,13 +44,7 @@ class CreateWriteRender(plugin.OpenPypeCreator):
input=selected_node,
prenodes=_prenodes)
- # relinking to collected connections
- for i, input in enumerate(inputs):
- write_node.setInput(i, input)
-
- write_node.autoplace()
-
- for output in outputs:
- output.setInput(0, write_node)
-
+ return write_node
+
+ def _modify_write_node(self, write_node):
return write_node
diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py
index 0037b64ce3..d22b5eab3f 100644
--- a/openpype/hosts/nuke/plugins/create/create_write_still.py
+++ b/openpype/hosts/nuke/plugins/create/create_write_still.py
@@ -1,12 +1,10 @@
-from collections import OrderedDict
-
import nuke
from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import create_write_node
-class CreateWriteStill(plugin.OpenPypeCreator):
+class CreateWriteStill(plugin.AbstractWriteRender):
# change this to template preset
name = "WriteStillFrame"
label = "Create Write Still Image"
@@ -23,77 +21,8 @@ class CreateWriteStill(plugin.OpenPypeCreator):
def __init__(self, *args, **kwargs):
super(CreateWriteStill, self).__init__(*args, **kwargs)
- data = OrderedDict()
-
- data["family"] = self.family
- data["families"] = self.n_class
-
- for k, v in self.data.items():
- if k not in data.keys():
- data.update({k: v})
-
- self.data = data
- self.nodes = nuke.selectedNodes()
- self.log.debug("_ self.data: '{}'".format(self.data))
-
- def process(self):
-
- inputs = []
- outputs = []
- instance = nuke.toNode(self.data["subset"])
- selected_node = None
-
- # use selection
- if (self.options or {}).get("useSelection"):
- nodes = self.nodes
-
- if not (len(nodes) < 2):
- msg = ("Select only one node. "
- "The node you want to connect to, "
- "or tick off `Use selection`")
- self.log.error(msg)
- nuke.message(msg)
- return
-
- if len(nodes) == 0:
- msg = (
- "No nodes selected. Please select a single node to connect"
- " to or tick off `Use selection`"
- )
- self.log.error(msg)
- nuke.message(msg)
- return
-
- selected_node = nodes[0]
- inputs = [selected_node]
- outputs = selected_node.dependent()
-
- if instance:
- if (instance.name() in selected_node.name()):
- selected_node = instance.dependencies()[0]
-
- # if node already exist
- if instance:
- # collect input / outputs
- inputs = instance.dependencies()
- outputs = instance.dependent()
- selected_node = inputs[0]
- # remove old one
- nuke.delete(instance)
-
- # recreate new
- write_data = {
- "nodeclass": self.n_class,
- "families": [self.family],
- "avalon": self.data
- }
-
- # add creator data
- creator_data = {"creator": self.__class__.__name__}
- self.data.update(creator_data)
- write_data.update(creator_data)
-
- self.log.info("Adding template path from plugin")
+ def _create_write_node(self, selected_node, inputs, outputs, write_data):
+ # explicitly reset template to 'renders', not same as other 2 writes
write_data.update({
"fpath_template": (
"{work}/renders/nuke/{subset}/{subset}.{ext}")})
@@ -118,16 +47,9 @@ class CreateWriteStill(plugin.OpenPypeCreator):
farm=False,
linked_knobs=["channels", "___", "first", "last", "use_limit"])
- # relinking to collected connections
- for i, input in enumerate(inputs):
- write_node.setInput(i, input)
+ return write_node
- write_node.autoplace()
-
- for output in outputs:
- output.setInput(0, write_node)
-
- # link frame hold to group node
+ def _modify_write_node(self, write_node):
write_node.begin()
for n in nuke.allNodes():
# get write node
diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
index d778421bde..1a8fa3e6ad 100644
--- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py
+++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py
@@ -70,6 +70,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
instance = context.create_instance(subset)
instance.append(node)
+ suspend_publish = False
+ if "suspend_publish" in node.knobs():
+ suspend_publish = node["suspend_publish"].value()
+ instance.data["suspend_publish"] = suspend_publish
+
# get review knob value
review = False
if "review" in node.knobs():
diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py
index 17ea957066..94152b5706 100644
--- a/openpype/hosts/photoshop/api/__init__.py
+++ b/openpype/hosts/photoshop/api/__init__.py
@@ -12,7 +12,10 @@ from .pipeline import (
remove_instance,
install,
uninstall,
- containerise
+ containerise,
+ get_context_data,
+ update_context_data,
+ get_context_title
)
from .plugin import (
PhotoshopLoader,
@@ -43,6 +46,9 @@ __all__ = [
"install",
"uninstall",
"containerise",
+ "get_context_data",
+ "update_context_data",
+ "get_context_title",
# Plugin
"PhotoshopLoader",
diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py
index 906418aced..fc90be8716 100644
--- a/openpype/hosts/photoshop/api/pipeline.py
+++ b/openpype/hosts/photoshop/api/pipeline.py
@@ -149,13 +149,9 @@ def list_instances():
instances = []
layers_meta = stub.get_layers_metadata()
if layers_meta:
- for key, instance in layers_meta.items():
- schema = instance.get("schema")
- if schema and "container" in schema:
- continue
-
- instance['uuid'] = key
- instances.append(instance)
+ for instance in layers_meta:
+ if instance.get("id") == "pyblish.avalon.instance":
+ instances.append(instance)
return instances
@@ -176,11 +172,18 @@ def remove_instance(instance):
if not stub:
return
- stub.remove_instance(instance.get("uuid"))
- layer = stub.get_layer(instance.get("uuid"))
- if layer:
- stub.rename_layer(instance.get("uuid"),
- layer.name.replace(stub.PUBLISH_ICON, ''))
+ inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
+ if not inst_id:
+ log.warning("No instance identifier for {}".format(instance))
+ return
+
+ stub.remove_instance(inst_id)
+
+ if instance.get("members"):
+ item = stub.get_layer(instance["members"][0])
+ if item:
+ stub.rename_layer(item.id,
+ item.name.replace(stub.PUBLISH_ICON, ''))
def _get_stub():
@@ -232,6 +235,34 @@ def containerise(
"members": [str(layer.id)]
}
stub = lib.stub()
- stub.imprint(layer, data)
+ stub.imprint(layer.id, data)
return layer
+
+
+def get_context_data():
+ """Get stored values for context (validation enable/disable etc)"""
+ meta = _get_stub().get_layers_metadata()
+ for item in meta:
+ if item.get("id") == "publish_context":
+ item.pop("id")
+ return item
+
+ return {}
+
+
+def update_context_data(data, changes):
+ """Store value needed for context"""
+ item = data
+ item["id"] = "publish_context"
+ _get_stub().imprint(item["id"], item)
+
+
+def get_context_title():
+ """Returns title for Creator window"""
+ import avalon.api
+
+ project_name = avalon.api.Session["AVALON_PROJECT"]
+ asset_name = avalon.api.Session["AVALON_ASSET"]
+ task_name = avalon.api.Session["AVALON_TASK"]
+ return "{}/{}/{}".format(project_name, asset_name, task_name)
diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py
index 64d89f5420..fa076ecc7e 100644
--- a/openpype/hosts/photoshop/api/ws_stub.py
+++ b/openpype/hosts/photoshop/api/ws_stub.py
@@ -27,6 +27,7 @@ class PSItem(object):
members = attr.ib(factory=list)
long_name = attr.ib(default=None)
color_code = attr.ib(default=None) # color code of layer
+ instance_id = attr.ib(default=None)
class PhotoshopServerStub:
@@ -76,13 +77,31 @@ class PhotoshopServerStub:
layer: (PSItem)
layers_meta: full list from Headline (for performance in loops)
Returns:
+ (dict) of layer metadata stored in PS file
+
+ Example:
+ {
+ 'id': 'pyblish.avalon.container',
+ 'loader': 'ImageLoader',
+ 'members': ['64'],
+ 'name': 'imageMainMiddle',
+ 'namespace': 'Hero_imageMainMiddle_001',
+ 'representation': '6203dc91e80934d9f6ee7d96',
+ 'schema': 'openpype:container-2.0'
+ }
"""
if layers_meta is None:
layers_meta = self.get_layers_metadata()
- return layers_meta.get(str(layer.id))
+ for layer_meta in layers_meta:
+ layer_id = layer_meta.get("uuid") # legacy
+ if layer_meta.get("members"):
+ layer_id = layer_meta["members"][0]
+ if str(layer.id) == str(layer_id):
+ return layer_meta
+ print("Unable to find layer metadata for {}".format(layer.id))
- def imprint(self, layer, data, all_layers=None, layers_meta=None):
+ def imprint(self, item_id, data, all_layers=None, items_meta=None):
"""Save layer metadata to Headline field of active document
Stores metadata in format:
@@ -108,28 +127,37 @@ class PhotoshopServerStub:
}] - for loaded instances
Args:
- layer (PSItem):
+ item_id (str):
data(string): json representation for single layer
all_layers (list of PSItem): for performance, could be
injected for usage in loop, if not, single call will be
triggered
- layers_meta(string): json representation from Headline
+ items_meta(string): json representation from Headline
(for performance - provide only if imprint is in
loop - value should be same)
Returns: None
"""
- if not layers_meta:
- layers_meta = self.get_layers_metadata()
+ if not items_meta:
+ items_meta = self.get_layers_metadata()
# json.dumps writes integer values in a dictionary to string, so
# anticipating it here.
- if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
- if data:
- layers_meta[str(layer.id)].update(data)
+ item_id = str(item_id)
+ is_new = True
+ result_meta = []
+ for item_meta in items_meta:
+ if ((item_meta.get('members') and
+ item_id == str(item_meta.get('members')[0])) or
+ item_meta.get("instance_id") == item_id):
+ is_new = False
+ if data:
+ item_meta.update(data)
+ result_meta.append(item_meta)
else:
- layers_meta.pop(str(layer.id))
- else:
- layers_meta[str(layer.id)] = data
+ result_meta.append(item_meta)
+
+ if is_new:
+ result_meta.append(data)
# Ensure only valid ids are stored.
if not all_layers:
@@ -137,12 +165,14 @@ class PhotoshopServerStub:
layer_ids = [layer.id for layer in all_layers]
cleaned_data = []
- for layer_id in layers_meta:
- if int(layer_id) in layer_ids:
- cleaned_data.append(layers_meta[layer_id])
+ for item in result_meta:
+ if item.get("members"):
+ if int(item["members"][0]) not in layer_ids:
+ continue
+
+ cleaned_data.append(item)
payload = json.dumps(cleaned_data, indent=4)
-
self.websocketserver.call(
self.client.call('Photoshop.imprint', payload=payload)
)
@@ -370,38 +400,27 @@ class PhotoshopServerStub:
(Headline accessible by File > File Info)
Returns:
- (string): - json documents
+ (list)
example:
{"8":{"active":true,"subset":"imageBG",
"family":"image","id":"pyblish.avalon.instance",
"asset":"Town"}}
8 is layer(group) id - used for deletion, update etc.
"""
- layers_data = {}
res = self.websocketserver.call(self.client.call('Photoshop.read'))
+ layers_data = []
try:
- layers_data = json.loads(res)
+ if res:
+ layers_data = json.loads(res)
except json.decoder.JSONDecodeError:
- pass
+ raise ValueError("{} cannot be parsed, recreate meta".format(res))
# format of metadata changed from {} to [] because of standardization
# keep current implementation logic as its working
- if not isinstance(layers_data, dict):
- temp_layers_meta = {}
- for layer_meta in layers_data:
- layer_id = layer_meta.get("uuid")
- if not layer_id:
- layer_id = layer_meta.get("members")[0]
-
- temp_layers_meta[layer_id] = layer_meta
- layers_data = temp_layers_meta
- else:
- # legacy version of metadata
+ if isinstance(layers_data, dict):
for layer_id, layer_meta in layers_data.items():
if layer_meta.get("schema") != "openpype:container-2.0":
- layer_meta["uuid"] = str(layer_id)
- else:
layer_meta["members"] = [str(layer_id)]
-
+ layers_data = list(layers_data.values())
return layers_data
def import_smart_object(self, path, layer_name, as_reference=False):
@@ -472,11 +491,12 @@ class PhotoshopServerStub:
)
def remove_instance(self, instance_id):
- cleaned_data = {}
+ cleaned_data = []
- for key, instance in self.get_layers_metadata().items():
- if key != instance_id:
- cleaned_data[key] = instance
+ for item in self.get_layers_metadata():
+ inst_id = item.get("instance_id") or item.get("uuid")
+ if inst_id != instance_id:
+ cleaned_data.append(item)
payload = json.dumps(cleaned_data, indent=4)
@@ -528,6 +548,7 @@ class PhotoshopServerStub:
d.get('type'),
d.get('members'),
d.get('long_name'),
- d.get("color_code")
+ d.get("color_code"),
+ d.get("instance_id")
))
return ret
diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py
index 5078cbb587..c2fe8b6c78 100644
--- a/openpype/hosts/photoshop/plugins/create/create_image.py
+++ b/openpype/hosts/photoshop/plugins/create/create_image.py
@@ -1,99 +1,145 @@
-from Qt import QtWidgets
-from openpype.pipeline import LegacyCreator
-from openpype.hosts.photoshop import api as photoshop
+from avalon import api as avalon_api
+from openpype.hosts.photoshop import api
+from openpype.lib import BoolDef
+from openpype.pipeline import (
+ Creator,
+ CreatedInstance
+)
-class CreateImage(LegacyCreator):
- """Image folder for publish."""
-
- name = "imageDefault"
+class ImageCreator(Creator):
+ """Creates image instance for publishing."""
+ identifier = "image"
label = "Image"
family = "image"
- defaults = ["Main"]
+ description = "Image creator"
- def process(self):
- groups = []
- layers = []
- create_group = False
+ def collect_instances(self):
+ for instance_data in api.list_instances():
+ # legacy instances have family=='image'
+ creator_id = (instance_data.get("creator_identifier") or
+ instance_data.get("family"))
- stub = photoshop.stub()
- if (self.options or {}).get("useSelection"):
- multiple_instances = False
- selection = stub.get_selected_layers()
- self.log.info("selection {}".format(selection))
- if len(selection) > 1:
- # Ask user whether to create one image or image per selected
- # item.
- msg_box = QtWidgets.QMessageBox()
- msg_box.setIcon(QtWidgets.QMessageBox.Warning)
- msg_box.setText(
- "Multiple layers selected."
- "\nDo you want to make one image per layer?"
+ if creator_id == self.identifier:
+ instance_data = self._handle_legacy(instance_data)
+ layer = api.stub().get_layer(instance_data["members"][0])
+ instance_data["layer"] = layer
+ instance = CreatedInstance.from_existing(
+ instance_data, self
)
- msg_box.setStandardButtons(
- QtWidgets.QMessageBox.Yes |
- QtWidgets.QMessageBox.No |
- QtWidgets.QMessageBox.Cancel
- )
- ret = msg_box.exec_()
- if ret == QtWidgets.QMessageBox.Yes:
- multiple_instances = True
- elif ret == QtWidgets.QMessageBox.Cancel:
- return
+ self._add_instance_to_context(instance)
- if multiple_instances:
- for item in selection:
- if item.group:
- groups.append(item)
- else:
- layers.append(item)
+ def create(self, subset_name_from_ui, data, pre_create_data):
+ groups_to_create = []
+ top_layers_to_wrap = []
+ create_empty_group = False
+
+ stub = api.stub() # only after PS is up
+ top_level_selected_items = stub.get_selected_layers()
+ if pre_create_data.get("use_selection"):
+ only_single_item_selected = len(top_level_selected_items) == 1
+ for selected_item in top_level_selected_items:
+ if (
+ only_single_item_selected or
+ pre_create_data.get("create_multiple")):
+ if selected_item.group:
+ groups_to_create.append(selected_item)
+ else:
+ top_layers_to_wrap.append(selected_item)
else:
- group = stub.group_selected_layers(self.name)
- groups.append(group)
+ group = stub.group_selected_layers(subset_name_from_ui)
+ groups_to_create.append(group)
- elif len(selection) == 1:
- # One selected item. Use group if its a LayerSet (group), else
- # create a new group.
- if selection[0].group:
- groups.append(selection[0])
- else:
- layers.append(selection[0])
- elif len(selection) == 0:
- # No selection creates an empty group.
- create_group = True
- else:
- group = stub.create_group(self.name)
- groups.append(group)
+ if not groups_to_create and not top_layers_to_wrap:
+ group = stub.create_group(subset_name_from_ui)
+ groups_to_create.append(group)
- if create_group:
- group = stub.create_group(self.name)
- groups.append(group)
-
- for layer in layers:
+ # wrap each top level layer into separate new group
+ for layer in top_layers_to_wrap:
stub.select_layers([layer])
group = stub.group_selected_layers(layer.name)
- groups.append(group)
+ groups_to_create.append(group)
- creator_subset_name = self.data["subset"]
- for group in groups:
- long_names = []
- group.name = group.name.replace(stub.PUBLISH_ICON, ''). \
- replace(stub.LOADED_ICON, '')
+ creating_multiple_groups = len(groups_to_create) > 1
+ for group in groups_to_create:
+ subset_name = subset_name_from_ui # reset to name from creator UI
+ layer_names_in_hierarchy = []
+ created_group_name = self._clean_highlights(stub, group.name)
- subset_name = creator_subset_name
- if len(groups) > 1:
+ if creating_multiple_groups:
+ # concatenate with layer name to differentiate subsets
subset_name += group.name.title().replace(" ", "")
if group.long_name:
for directory in group.long_name[::-1]:
- name = directory.replace(stub.PUBLISH_ICON, '').\
- replace(stub.LOADED_ICON, '')
- long_names.append(name)
+ name = self._clean_highlights(stub, directory)
+ layer_names_in_hierarchy.append(name)
- self.data.update({"subset": subset_name})
- self.data.update({"uuid": str(group.id)})
- self.data.update({"long_name": "_".join(long_names)})
- stub.imprint(group, self.data)
+ data.update({"subset": subset_name})
+ data.update({"members": [str(group.id)]})
+ data.update({"long_name": "_".join(layer_names_in_hierarchy)})
+
+ new_instance = CreatedInstance(self.family, subset_name, data,
+ self)
+
+ stub.imprint(new_instance.get("instance_id"),
+ new_instance.data_to_store())
+ self._add_instance_to_context(new_instance)
# reusing existing group, need to rename afterwards
- if not create_group:
- stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name)
+ if not create_empty_group:
+ stub.rename_layer(group.id,
+ stub.PUBLISH_ICON + created_group_name)
+
+ def update_instances(self, update_list):
+ self.log.debug("update_list:: {}".format(update_list))
+ for created_inst, _changes in update_list:
+ if created_inst.get("layer"):
+ # not storing PSItem layer to metadata
+ created_inst.pop("layer")
+ api.stub().imprint(created_inst.get("instance_id"),
+ created_inst.data_to_store())
+
+ def remove_instances(self, instances):
+ for instance in instances:
+ api.remove_instance(instance)
+ self._remove_instance_from_context(instance)
+
+ def get_default_variants(self):
+ return [
+ "Main"
+ ]
+
+ def get_pre_create_attr_defs(self):
+ output = [
+ BoolDef("use_selection", default=True,
+ label="Create only for selected"),
+ BoolDef("create_multiple",
+ default=True,
+ label="Create separate instance for each selected")
+ ]
+ return output
+
+ def get_detail_description(self):
+ return """Creator for Image instances"""
+
+ def _handle_legacy(self, instance_data):
+ """Converts old instances to new format."""
+ if not instance_data.get("members"):
+ instance_data["members"] = [instance_data.get("uuid")]
+
+ if instance_data.get("uuid"):
+ # uuid not needed, replaced with unique instance_id
+ api.stub().remove_instance(instance_data.get("uuid"))
+ instance_data.pop("uuid")
+
+ if not instance_data.get("task"):
+ instance_data["task"] = avalon_api.Session.get("AVALON_TASK")
+
+ if not instance_data.get("variant"):
+ instance_data["variant"] = ''
+
+ return instance_data
+
+ def _clean_highlights(self, stub, item):
+ return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON,
+ '')
diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py
new file mode 100644
index 0000000000..9736471a26
--- /dev/null
+++ b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py
@@ -0,0 +1,100 @@
+from Qt import QtWidgets
+from openpype.pipeline import create
+from openpype.hosts.photoshop import api as photoshop
+
+
+class CreateImage(create.LegacyCreator):
+ """Image folder for publish."""
+
+ name = "imageDefault"
+ label = "Image"
+ family = "image"
+ defaults = ["Main"]
+
+ def process(self):
+ groups = []
+ layers = []
+ create_group = False
+
+ stub = photoshop.stub()
+ if (self.options or {}).get("useSelection"):
+ multiple_instances = False
+ selection = stub.get_selected_layers()
+ self.log.info("selection {}".format(selection))
+ if len(selection) > 1:
+ # Ask user whether to create one image or image per selected
+ # item.
+ msg_box = QtWidgets.QMessageBox()
+ msg_box.setIcon(QtWidgets.QMessageBox.Warning)
+ msg_box.setText(
+ "Multiple layers selected."
+ "\nDo you want to make one image per layer?"
+ )
+ msg_box.setStandardButtons(
+ QtWidgets.QMessageBox.Yes |
+ QtWidgets.QMessageBox.No |
+ QtWidgets.QMessageBox.Cancel
+ )
+ ret = msg_box.exec_()
+ if ret == QtWidgets.QMessageBox.Yes:
+ multiple_instances = True
+ elif ret == QtWidgets.QMessageBox.Cancel:
+ return
+
+ if multiple_instances:
+ for item in selection:
+ if item.group:
+ groups.append(item)
+ else:
+ layers.append(item)
+ else:
+ group = stub.group_selected_layers(self.name)
+ groups.append(group)
+
+ elif len(selection) == 1:
+ # One selected item. Use group if its a LayerSet (group), else
+ # create a new group.
+ if selection[0].group:
+ groups.append(selection[0])
+ else:
+ layers.append(selection[0])
+ elif len(selection) == 0:
+ # No selection creates an empty group.
+ create_group = True
+ else:
+ group = stub.create_group(self.name)
+ groups.append(group)
+
+ if create_group:
+ group = stub.create_group(self.name)
+ groups.append(group)
+
+ for layer in layers:
+ stub.select_layers([layer])
+ group = stub.group_selected_layers(layer.name)
+ groups.append(group)
+
+ creator_subset_name = self.data["subset"]
+ for group in groups:
+ long_names = []
+ group.name = group.name.replace(stub.PUBLISH_ICON, ''). \
+ replace(stub.LOADED_ICON, '')
+
+ subset_name = creator_subset_name
+ if len(groups) > 1:
+ subset_name += group.name.title().replace(" ", "")
+
+ if group.long_name:
+ for directory in group.long_name[::-1]:
+ name = directory.replace(stub.PUBLISH_ICON, '').\
+ replace(stub.LOADED_ICON, '')
+ long_names.append(name)
+
+ self.data.update({"subset": subset_name})
+ self.data.update({"uuid": str(group.id)})
+ self.data.update({"members": [str(group.id)]})
+ self.data.update({"long_name": "_".join(long_names)})
+ stub.imprint(group, self.data)
+ # reusing existing group, need to rename afterwards
+ if not create_group:
+ stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name)
diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py
new file mode 100644
index 0000000000..d66a05cad7
--- /dev/null
+++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py
@@ -0,0 +1,73 @@
+from avalon import io
+
+import openpype.hosts.photoshop.api as api
+from openpype.pipeline import (
+ AutoCreator,
+ CreatedInstance
+)
+
+
+class PSWorkfileCreator(AutoCreator):
+ identifier = "workfile"
+ family = "workfile"
+
+ def get_instance_attr_defs(self):
+ return []
+
+ def collect_instances(self):
+ for instance_data in api.list_instances():
+ creator_id = instance_data.get("creator_identifier")
+ if creator_id == self.identifier:
+ subset_name = instance_data["subset"]
+ instance = CreatedInstance(
+ self.family, subset_name, instance_data, self
+ )
+ self._add_instance_to_context(instance)
+
+ def update_instances(self, update_list):
+ # nothing to change on workfiles
+ pass
+
+ def create(self, options=None):
+ existing_instance = None
+ for instance in self.create_context.instances:
+ if instance.family == self.family:
+ existing_instance = instance
+ break
+
+ variant = ''
+ project_name = io.Session["AVALON_PROJECT"]
+ asset_name = io.Session["AVALON_ASSET"]
+ task_name = io.Session["AVALON_TASK"]
+ host_name = io.Session["AVALON_APP"]
+ if existing_instance is None:
+ asset_doc = io.find_one({"type": "asset", "name": asset_name})
+ subset_name = self.get_subset_name(
+ variant, task_name, asset_doc, project_name, host_name
+ )
+ data = {
+ "asset": asset_name,
+ "task": task_name,
+ "variant": variant
+ }
+ data.update(self.get_dynamic_data(
+ variant, task_name, asset_doc, project_name, host_name
+ ))
+
+ new_instance = CreatedInstance(
+ self.family, subset_name, data, self
+ )
+ self._add_instance_to_context(new_instance)
+ api.stub().imprint(new_instance.get("instance_id"),
+ new_instance.data_to_store())
+
+ elif (
+ existing_instance["asset"] != asset_name
+ or existing_instance["task"] != task_name
+ ):
+ asset_doc = io.find_one({"type": "asset", "name": asset_name})
+ subset_name = self.get_subset_name(
+ variant, task_name, asset_doc, project_name, host_name
+ )
+ existing_instance["asset"] = asset_name
+ existing_instance["task"] = task_name
diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py
index 0a9421b8f2..91a9787781 100644
--- a/openpype/hosts/photoshop/plugins/load/load_image.py
+++ b/openpype/hosts/photoshop/plugins/load/load_image.py
@@ -61,7 +61,7 @@ class ImageLoader(photoshop.PhotoshopLoader):
)
stub.imprint(
- layer, {"representation": str(representation["_id"])}
+ layer.id, {"representation": str(representation["_id"])}
)
def remove(self, container):
@@ -73,7 +73,7 @@ class ImageLoader(photoshop.PhotoshopLoader):
stub = self.get_stub()
layer = container.pop("layer")
- stub.imprint(layer, {})
+ stub.imprint(layer.id, {})
stub.delete_layer(layer.id)
def switch(self, container, representation):
diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py
index f5f0545d39..1f32a5d23c 100644
--- a/openpype/hosts/photoshop/plugins/load/load_reference.py
+++ b/openpype/hosts/photoshop/plugins/load/load_reference.py
@@ -61,7 +61,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader):
)
stub.imprint(
- layer, {"representation": str(representation["_id"])}
+ layer.id, {"representation": str(representation["_id"])}
)
def remove(self, container):
@@ -72,7 +72,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader):
"""
stub = self.get_stub()
layer = container.pop("layer")
- stub.imprint(layer, {})
+ stub.imprint(layer.id, {})
stub.delete_layer(layer.id)
def switch(self, container, representation):
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py
new file mode 100644
index 0000000000..5e6e916611
--- /dev/null
+++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py
@@ -0,0 +1,73 @@
+"""Parses batch context from json and continues in publish process.
+
+Provides:
+ context -> Loaded batch file.
+ - asset
+ - task (task name)
+ - taskType
+ - project_name
+ - variant
+
+Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as
+webpublisher should be eventually ejected as an addon, eg. mentioned plugin
+shouldn't be pushed into general publish plugins.
+"""
+
+import os
+
+import pyblish.api
+from avalon import io
+from openpype.lib.plugin_tools import (
+ parse_json,
+ get_batch_asset_task_info
+)
+
+
+class CollectBatchData(pyblish.api.ContextPlugin):
+ """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir.
+
+ The directory must contain 'manifest.json' file where batch data should be
+ stored.
+ """
+ # must be really early, context values are only in json file
+ order = pyblish.api.CollectorOrder - 0.495
+ label = "Collect batch data"
+ hosts = ["photoshop"]
+ targets = ["remotepublish"]
+
+ def process(self, context):
+ self.log.info("CollectBatchData")
+ batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
+
+ assert batch_dir, (
+ "Missing `OPENPYPE_PUBLISH_DATA`")
+
+ assert os.path.exists(batch_dir), \
+ "Folder {} doesn't exist".format(batch_dir)
+
+ project_name = os.environ.get("AVALON_PROJECT")
+ if project_name is None:
+ raise AssertionError(
+ "Environment `AVALON_PROJECT` was not found."
+ "Could not set project `root` which may cause issues."
+ )
+
+ batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
+
+ context.data["batchDir"] = batch_dir
+ context.data["batchData"] = batch_data
+
+ asset_name, task_name, task_type = get_batch_asset_task_info(
+ batch_data["context"]
+ )
+
+ os.environ["AVALON_ASSET"] = asset_name
+ io.Session["AVALON_ASSET"] = asset_name
+ os.environ["AVALON_TASK"] = task_name
+ io.Session["AVALON_TASK"] = task_name
+
+ context.data["asset"] = asset_name
+ context.data["task"] = task_name
+ context.data["taskType"] = task_type
+ context.data["project_name"] = project_name
+ context.data["variant"] = batch_data["variant"]
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
index 7d44d55a80..122428eea0 100644
--- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
+++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py
@@ -4,7 +4,6 @@ import re
import pyblish.api
from openpype.lib import prepare_template_data
-from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info
from openpype.hosts.photoshop import api as photoshop
@@ -46,7 +45,10 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
existing_subset_names = self._get_existing_subset_names(context)
- asset_name, task_name, variant = self._parse_batch(batch_dir)
+ # from CollectBatchData
+ asset_name = context.data["asset"]
+ task_name = context.data["task"]
+ variant = context.data["variant"]
stub = photoshop.stub()
layers = stub.get_layers()
@@ -130,25 +132,6 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
return existing_subset_names
- def _parse_batch(self, batch_dir):
- """Parses asset_name, task_name, variant from batch manifest."""
- task_data = None
- if batch_dir and os.path.exists(batch_dir):
- task_data = parse_json(os.path.join(batch_dir,
- "manifest.json"))
- if not task_data:
- raise ValueError(
- "Cannot parse batch meta in {} folder".format(batch_dir))
- variant = task_data["variant"]
-
- asset, task_name, task_type = get_batch_asset_task_info(
- task_data["context"])
-
- if not task_name:
- task_name = task_type
-
- return asset, task_name, variant
-
def _create_instance(self, context, layer, family,
asset, subset, task_name):
instance = context.create_instance(layer.name)
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py
index 50b50f86d9..b466ec8687 100644
--- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py
+++ b/openpype/hosts/photoshop/plugins/publish/collect_instances.py
@@ -1,3 +1,5 @@
+import pprint
+
import pyblish.api
from openpype.settings import get_project_settings
@@ -9,8 +11,8 @@ from openpype.pipeline import legacy_io
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by LayerSet and file metadata
- This collector takes into account assets that are associated with
- an LayerSet and marked with a unique identifier;
+ Collects publishable instances from file metadata or enhance
+ already collected by creator (family == "image").
If no image instances are explicitly created, it looks if there is value
in `flatten_subset_template` (configurable in Settings), in that case it
@@ -20,7 +22,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
id (str): "pyblish.avalon.instance"
"""
- label = "Instances"
+ label = "Collect Instances"
order = pyblish.api.CollectorOrder
hosts = ["photoshop"]
families_mapping = {
@@ -30,42 +32,53 @@ class CollectInstances(pyblish.api.ContextPlugin):
flatten_subset_template = ""
def process(self, context):
+ instance_by_layer_id = {}
+ for instance in context:
+ if (
+ instance.data["family"] == "image" and
+ instance.data.get("members")):
+ layer_id = str(instance.data["members"][0])
+ instance_by_layer_id[layer_id] = instance
+
stub = photoshop.stub()
- layers = stub.get_layers()
+ layer_items = stub.get_layers()
layers_meta = stub.get_layers_metadata()
instance_names = []
+
all_layer_ids = []
- for layer in layers:
- all_layer_ids.append(layer.id)
- layer_data = stub.read(layer, layers_meta)
+ for layer_item in layer_items:
+ layer_meta_data = stub.read(layer_item, layers_meta)
+ all_layer_ids.append(layer_item.id)
# Skip layers without metadata.
- if layer_data is None:
+ if layer_meta_data is None:
continue
# Skip containers.
- if "container" in layer_data["id"]:
+ if "container" in layer_meta_data["id"]:
continue
- # child_layers = [*layer.Layers]
- # self.log.debug("child_layers {}".format(child_layers))
- # if not child_layers:
- # self.log.info("%s skipped, it was empty." % layer.Name)
- # continue
+ # active might not be in legacy meta
+ if not layer_meta_data.get("active", True):
+ continue
- instance = context.create_instance(layer_data["subset"])
- instance.data["layer"] = layer
- instance.data.update(layer_data)
+ instance = instance_by_layer_id.get(str(layer_item.id))
+ if instance is None:
+ instance = context.create_instance(layer_meta_data["subset"])
+
+ instance.data["layer"] = layer_item
+ instance.data.update(layer_meta_data)
instance.data["families"] = self.families_mapping[
- layer_data["family"]
+ layer_meta_data["family"]
]
- instance.data["publish"] = layer.visible
- instance_names.append(layer_data["subset"])
+ instance.data["publish"] = layer_item.visible
+ instance_names.append(layer_meta_data["subset"])
# Produce diagnostic message for any graphical
# user interface interested in visualising it.
self.log.info("Found: \"%s\" " % instance.data["name"])
- self.log.info("instance: {} ".format(instance.data))
+ self.log.info("instance: {} ".format(
+ pprint.pformat(instance.data, indent=4)))
if len(instance_names) != len(set(instance_names)):
self.log.warning("Duplicate instances found. " +
@@ -82,8 +95,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
task_name = legacy_io.Session["AVALON_TASK"]
asset_name = context.data["assetEntity"]["name"]
+ variant = context.data.get("variant") or variants[0]
fill_pairs = {
- "variant": variants[0],
+ "variant": variant,
"family": family,
"task": task_name
}
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py
index f3842b9ee5..2ea5503f3f 100644
--- a/openpype/hosts/photoshop/plugins/publish/collect_review.py
+++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py
@@ -1,3 +1,11 @@
+"""
+Requires:
+ None
+
+Provides:
+ instance -> family ("review")
+"""
+
import os
import pyblish.api
@@ -6,33 +14,35 @@ from openpype.lib import get_subset_name_with_asset_doc
class CollectReview(pyblish.api.ContextPlugin):
- """Gather the active document as review instance."""
+ """Gather the active document as review instance.
+ Triggers once even if no 'image' is published as by defaults it creates
+ flatten image from a workfile.
+ """
+
+ label = "Collect Review"
label = "Review"
- order = pyblish.api.CollectorOrder + 0.1
hosts = ["photoshop"]
+ order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
family = "review"
subset = get_subset_name_with_asset_doc(
family,
- "",
+ context.data.get("variant", ''),
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"]
)
- file_path = context.data["currentFile"]
- base_name = os.path.basename(file_path)
-
instance = context.create_instance(subset)
instance.data.update({
"subset": subset,
- "label": base_name,
- "name": base_name,
+ "label": subset,
+ "name": subset,
"family": family,
- "families": ["ftrack"],
+ "families": [],
"representations": [],
"asset": os.environ["AVALON_ASSET"]
})
diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py
index 0dbe2c6609..e4f0a07b34 100644
--- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py
@@ -12,6 +12,13 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
hosts = ["photoshop"]
def process(self, context):
+ existing_instance = None
+ for instance in context:
+ if instance.data["family"] == "workfile":
+ self.log.debug("Workfile instance found, won't create new")
+ existing_instance = instance
+ break
+
family = "workfile"
subset = get_subset_name_with_asset_doc(
family,
@@ -27,16 +34,19 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
base_name = os.path.basename(file_path)
# Create instance
- instance = context.create_instance(subset)
- instance.data.update({
- "subset": subset,
- "label": base_name,
- "name": base_name,
- "family": family,
- "families": [],
- "representations": [],
- "asset": os.environ["AVALON_ASSET"]
- })
+ if existing_instance is None:
+ instance = context.create_instance(subset)
+ instance.data.update({
+ "subset": subset,
+ "label": base_name,
+ "name": base_name,
+ "family": family,
+ "families": [],
+ "representations": [],
+ "asset": os.environ["AVALON_ASSET"]
+ })
+ else:
+ instance = existing_instance
# creating representation
_, ext = os.path.splitext(file_path)
diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py
index b07d0740c1..a133e33409 100644
--- a/openpype/hosts/photoshop/plugins/publish/extract_image.py
+++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py
@@ -16,7 +16,6 @@ class ExtractImage(openpype.api.Extractor):
formats = ["png", "jpg"]
def process(self, instance):
-
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml
new file mode 100644
index 0000000000..5a1e266748
--- /dev/null
+++ b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml
@@ -0,0 +1,21 @@
+
+
+
+Subset name
+
+## Invalid subset or layer name
+
+Subset or layer name cannot contain specific characters (spaces etc) which could cause issue when subset name is used in a published file name.
+ {msg}
+
+### How to repair?
+
+You can fix this with "repair" button on the right.
+
+
+### __Detailed Info__ (optional)
+
+Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings.
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml
new file mode 100644
index 0000000000..4b47973193
--- /dev/null
+++ b/openpype/hosts/photoshop/plugins/publish/help/validate_unique_subsets.xml
@@ -0,0 +1,14 @@
+
+
+
+Subset not unique
+
+## Non unique subset name found
+
+ Non unique subset names: '{non_unique}'
+### How to repair?
+
+Remove offending instance, rename it to have unique name. Maybe layer name wasn't used for multiple instances?
+
+
+
\ No newline at end of file
diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py
index 583e9c7a4e..bcae24108c 100644
--- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py
+++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py
@@ -2,6 +2,7 @@ import re
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.photoshop import api as photoshop
@@ -22,33 +23,34 @@ class ValidateNamingRepair(pyblish.api.Action):
failed.append(result["instance"])
invalid_chars, replace_char = plugin.get_replace_chars()
- self.log.info("{} --- {}".format(invalid_chars, replace_char))
+ self.log.debug("{} --- {}".format(invalid_chars, replace_char))
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
- self.log.info("validate_naming instance {}".format(instance))
- layer_item = instance.data["layer"]
- metadata = stub.read(layer_item)
- self.log.info("metadata instance {}".format(metadata))
- layer_name = None
- if metadata.get("uuid"):
- layer_data = stub.get_layer(metadata["uuid"])
- self.log.info("layer_data {}".format(layer_data))
- if layer_data:
- layer_name = re.sub(invalid_chars,
- replace_char,
- layer_data.name)
+ self.log.debug("validate_naming instance {}".format(instance))
+ current_layer_state = stub.get_layer(instance.data["layer"].id)
+ self.log.debug("current_layer{}".format(current_layer_state))
- stub.rename_layer(instance.data["uuid"], layer_name)
+ layer_meta = stub.read(current_layer_state)
+ instance_id = (layer_meta.get("instance_id") or
+ layer_meta.get("uuid"))
+ if not instance_id:
+ self.log.warning("Unable to repair, cannot find layer")
+ continue
+
+ layer_name = re.sub(invalid_chars,
+ replace_char,
+ current_layer_state.name)
+
+ stub.rename_layer(current_layer_state.id, layer_name)
subset_name = re.sub(invalid_chars, replace_char,
instance.data["subset"])
- layer_item.name = layer_name or subset_name
- metadata["subset"] = subset_name
- stub.imprint(layer_item, metadata)
+ layer_meta["subset"] = subset_name
+ stub.imprint(instance_id, layer_meta)
return True
@@ -73,11 +75,18 @@ class ValidateNaming(pyblish.api.InstancePlugin):
help_msg = ' Use Repair action (A) in Pyblish to fix it.'
msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"],
help_msg)
- assert not re.search(self.invalid_chars, instance.data["name"]), msg
+
+ formatting_data = {"msg": msg}
+ if re.search(self.invalid_chars, instance.data["name"]):
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"],
help_msg)
- assert not re.search(self.invalid_chars, instance.data["subset"]), msg
+ formatting_data = {"msg": msg}
+ if re.search(self.invalid_chars, instance.data["subset"]):
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
@classmethod
def get_replace_chars(cls):
diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py
index 40abfb1bbd..01f2323157 100644
--- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py
+++ b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py
@@ -1,6 +1,7 @@
import collections
import pyblish.api
import openpype.api
+from openpype.pipeline import PublishXmlValidationError
class ValidateSubsetUniqueness(pyblish.api.ContextPlugin):
@@ -27,4 +28,10 @@ class ValidateSubsetUniqueness(pyblish.api.ContextPlugin):
if count > 1]
msg = ("Instance subset names {} are not unique. ".format(non_unique) +
"Remove duplicates via SubsetManager.")
- assert not non_unique, msg
+ formatting_data = {
+ "non_unique": ",".join(non_unique)
+ }
+
+ if non_unique:
+ raise PublishXmlValidationError(self, msg,
+ formatting_data=formatting_data)
diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py
index bfa9dcf73a..2bf3917e2f 100644
--- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py
+++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py
@@ -248,7 +248,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
self.log.debug("collecting sequence: {}".format(collections))
instance.data["frameStart"] = int(component["frameStart"])
instance.data["frameEnd"] = int(component["frameEnd"])
- instance.data["fps"] = int(component["fps"])
+ if component.get("fps"):
+ instance.data["fps"] = int(component["fps"])
ext = component["ext"]
if ext.startswith("."):
diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py
index d954c04c60..9ff779636a 100644
--- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py
+++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py
@@ -1,7 +1,12 @@
-"""Loads batch context from json and continues in publish process.
+"""Parses batch context from json and continues in publish process.
Provides:
context -> Loaded batch file.
+ - asset
+ - task (task name)
+ - taskType
+ - project_name
+ - variant
"""
import os
@@ -25,7 +30,7 @@ class CollectBatchData(pyblish.api.ContextPlugin):
# must be really early, context values are only in json file
order = pyblish.api.CollectorOrder - 0.495
label = "Collect batch data"
- host = ["webpublisher"]
+ hosts = ["webpublisher"]
def process(self, context):
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
@@ -61,6 +66,7 @@ class CollectBatchData(pyblish.api.ContextPlugin):
context.data["task"] = task_name
context.data["taskType"] = task_type
context.data["project_name"] = project_name
+ context.data["variant"] = batch_data["variant"]
self._set_ctx_path(batch_data)
diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
index 84a1f63418..bdd3caccfd 100644
--- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
+++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
@@ -40,7 +40,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
# must be really early, context values are only in json file
order = pyblish.api.CollectorOrder - 0.490
label = "Collect rendered frames"
- host = ["webpublisher"]
+ hosts = ["webpublisher"]
targets = ["filespublish"]
# from Settings
@@ -61,6 +61,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
task_name = context.data["task"]
task_type = context.data["taskType"]
project_name = context.data["project_name"]
+ variant = context.data["variant"]
for task_dir in task_subfolders:
task_data = parse_json(os.path.join(task_dir,
"manifest.json"))
@@ -76,7 +77,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
extension.replace(".", ''))
subset_name = get_subset_name_with_asset_doc(
- family, task_data["variant"], task_name, asset_doc,
+ family, variant, task_name, asset_doc,
project_name=project_name, host_name="webpublisher"
)
version = self._get_last_version(asset_name, subset_name) + 1
diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py
index cb6ed8481c..a56521891b 100644
--- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py
+++ b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py
@@ -8,7 +8,7 @@ from openpype.lib import (
run_subprocess,
get_transcode_temp_directory,
- convert_for_ffmpeg,
+ convert_input_paths_for_ffmpeg,
should_convert_for_ffmpeg
)
@@ -59,11 +59,9 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
if do_convert:
convert_dir = get_transcode_temp_directory()
filename = os.path.basename(full_input_path)
- convert_for_ffmpeg(
- full_input_path,
+ convert_input_paths_for_ffmpeg(
+ [full_input_path],
convert_dir,
- None,
- None,
self.log
)
full_input_path = os.path.join(convert_dir, filename)
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index b57e469f5b..29719b63bd 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -105,6 +105,7 @@ from .transcoding import (
get_transcode_temp_directory,
should_convert_for_ffmpeg,
convert_for_ffmpeg,
+ convert_input_paths_for_ffmpeg,
get_ffprobe_data,
get_ffprobe_streams,
get_ffmpeg_codec_args,
@@ -276,6 +277,7 @@ __all__ = [
"get_transcode_temp_directory",
"should_convert_for_ffmpeg",
"convert_for_ffmpeg",
+ "convert_input_paths_for_ffmpeg",
"get_ffprobe_data",
"get_ffprobe_streams",
"get_ffmpeg_codec_args",
diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py
index fe202824a7..3d81f6d794 100644
--- a/openpype/lib/abstract_collect_render.py
+++ b/openpype/lib/abstract_collect_render.py
@@ -31,6 +31,7 @@ class RenderInstance(object):
source = attr.ib() # path to source scene file
label = attr.ib() # label to show in GUI
subset = attr.ib() # subset name
+ task = attr.ib() # task name
asset = attr.ib() # asset name (AVALON_ASSET)
attachTo = attr.ib() # subset name to attach render to
setMembers = attr.ib() # list of nodes/members producing render output
@@ -139,7 +140,9 @@ class AbstractCollectRender(pyblish.api.ContextPlugin):
try:
if "workfile" in instance.data["families"]:
instance.data["publish"] = True
- if "renderFarm" in instance.data["families"]:
+ # TODO merge renderFarm and render.farm
+ if ("renderFarm" in instance.data["families"] or
+ "render.farm" in instance.data["families"]):
instance.data["remove"] = True
except KeyError:
# be tolerant if 'families' is missing.
diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py
index 139fb7edde..3d57ee4b91 100644
--- a/openpype/lib/avalon_context.py
+++ b/openpype/lib/avalon_context.py
@@ -1967,3 +1967,119 @@ def get_last_workfile(
return os.path.normpath(os.path.join(workdir, filename))
return filename
+
+
+@with_avalon
+def get_linked_ids_for_representations(project_name, repre_ids, dbcon=None,
+ link_type=None, max_depth=0):
+ """Returns list of linked ids of particular type (if provided).
+
+ Goes from representations to version, back to representations
+ Args:
+ project_name (str)
+ repre_ids (list) or (ObjectId)
+ dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection
+ with Session.
+ link_type (str): ['reference', '..]
+ max_depth (int): limit how many levels of recursion
+ Returns:
+ (list) of ObjectId - linked representations
+ """
+ # Create new dbcon if not passed and use passed project name
+ if not dbcon:
+ from avalon.api import AvalonMongoDB
+ dbcon = AvalonMongoDB()
+ dbcon.Session["AVALON_PROJECT"] = project_name
+ # Validate that passed dbcon has same project
+ elif dbcon.Session["AVALON_PROJECT"] != project_name:
+ raise ValueError("Passed connection does not have right project")
+
+ if not isinstance(repre_ids, list):
+ repre_ids = [repre_ids]
+
+ version_ids = dbcon.distinct("parent", {
+ "_id": {"$in": repre_ids},
+ "type": "representation"
+ })
+
+ match = {
+ "_id": {"$in": version_ids},
+ "type": "version"
+ }
+
+ graph_lookup = {
+ "from": project_name,
+ "startWith": "$data.inputLinks.id",
+ "connectFromField": "data.inputLinks.id",
+ "connectToField": "_id",
+ "as": "outputs_recursive",
+ "depthField": "depth"
+ }
+ if max_depth != 0:
+ # We offset by -1 since 0 basically means no recursion
+ # but the recursion only happens after the initial lookup
+ # for outputs.
+ graph_lookup["maxDepth"] = max_depth - 1
+
+ pipeline_ = [
+ # Match
+ {"$match": match},
+ # Recursive graph lookup for inputs
+ {"$graphLookup": graph_lookup}
+ ]
+
+ result = dbcon.aggregate(pipeline_)
+ referenced_version_ids = _process_referenced_pipeline_result(result,
+ link_type)
+
+ ref_ids = dbcon.distinct(
+ "_id",
+ filter={
+ "parent": {"$in": list(referenced_version_ids)},
+ "type": "representation"
+ }
+ )
+
+ return list(ref_ids)
+
+
+def _process_referenced_pipeline_result(result, link_type):
+ """Filters result from pipeline for particular link_type.
+
+ Pipeline cannot use link_type directly in a query.
+ Returns:
+ (list)
+ """
+ referenced_version_ids = set()
+ correctly_linked_ids = set()
+ for item in result:
+ input_links = item["data"].get("inputLinks", [])
+ correctly_linked_ids = _filter_input_links(input_links,
+ link_type,
+ correctly_linked_ids)
+
+ # outputs_recursive in random order, sort by depth
+ outputs_recursive = sorted(item.get("outputs_recursive", []),
+ key=lambda d: d["depth"])
+
+ for output in outputs_recursive:
+ if output["_id"] not in correctly_linked_ids: # leaf
+ continue
+
+ correctly_linked_ids = _filter_input_links(
+ output["data"].get("inputLinks", []),
+ link_type,
+ correctly_linked_ids)
+
+ referenced_version_ids.add(output["_id"])
+
+ return referenced_version_ids
+
+
+def _filter_input_links(input_links, link_type, correctly_linked_ids):
+ for input_link in input_links:
+ if not link_type or input_link["type"] == link_type:
+ correctly_linked_ids.add(input_link.get("id") or
+ input_link.get("_id")) # legacy
+
+ return correctly_linked_ids
diff --git a/openpype/lib/log.py b/openpype/lib/log.py
index f33385e0ba..2cdb7ec8e4 100644
--- a/openpype/lib/log.py
+++ b/openpype/lib/log.py
@@ -216,8 +216,8 @@ class PypeLogger:
# Collection name under database in Mongo
log_collection_name = "logs"
- # OPENPYPE_DEBUG
- pype_debug = 0
+ # Logging level - OPENPYPE_LOG_LEVEL
+ log_level = None
# Data same for all record documents
process_data = None
@@ -231,10 +231,7 @@ class PypeLogger:
logger = logging.getLogger(name or "__main__")
- if cls.pype_debug > 0:
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
+ logger.setLevel(cls.log_level)
add_mongo_handler = cls.use_mongo_logging
add_console_handler = True
@@ -333,6 +330,9 @@ class PypeLogger:
# Define if should logging to mongo be used
use_mongo_logging = bool(log4mongo is not None)
+ if use_mongo_logging:
+ use_mongo_logging = os.environ.get("OPENPYPE_LOG_TO_SERVER") == "1"
+
# Set mongo id for process (ONLY ONCE)
if use_mongo_logging and cls.mongo_process_id is None:
try:
@@ -357,8 +357,16 @@ class PypeLogger:
# Store result to class definition
cls.use_mongo_logging = use_mongo_logging
- # Define if is in OPENPYPE_DEBUG mode
- cls.pype_debug = int(os.getenv("OPENPYPE_DEBUG") or "0")
+ # Define what is logging level
+ log_level = os.getenv("OPENPYPE_LOG_LEVEL")
+ if not log_level:
+ # Check OPENPYPE_DEBUG for backwards compatibility
+ op_debug = os.getenv("OPENPYPE_DEBUG")
+ if op_debug and int(op_debug) > 0:
+ log_level = 10
+ else:
+ log_level = 20
+ cls.log_level = int(log_level)
if not os.environ.get("OPENPYPE_MONGO"):
cls.use_mongo_logging = False
diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py
index c2fecf6628..fcec5d4216 100644
--- a/openpype/lib/transcoding.py
+++ b/openpype/lib/transcoding.py
@@ -382,6 +382,11 @@ def should_convert_for_ffmpeg(src_filepath):
return False
+# Deprecated since 2022 4 20
+# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse
+# first frame for all frames and changes filenames when input
+# is sequence.
+# - use 'convert_input_paths_for_ffmpeg' instead
def convert_for_ffmpeg(
first_input_path,
output_dir,
@@ -409,6 +414,12 @@ def convert_for_ffmpeg(
if logger is None:
logger = logging.getLogger(__name__)
+ logger.warning((
+ "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is"
+ " deprecated function of conversion for FFMpeg. Please replace usage"
+ " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'"
+ ))
+
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
@@ -516,6 +527,130 @@ def convert_for_ffmpeg(
run_subprocess(oiio_cmd, logger=logger)
+def convert_input_paths_for_ffmpeg(
+ input_paths,
+ output_dir,
+ logger=None
+):
+ """Contert source file to format supported in ffmpeg.
+
+ Currently can convert only exrs. The input filepaths should be files
+ with same type. Information about input is loaded only from first found
+ file.
+
+ Filenames of input files are kept so make sure that output directory
+ is not the same directory as input files have.
+ - This way it can handle gaps and can keep input filenames without handling
+ frame template
+
+ Args:
+ input_paths (str): Paths that should be converted. It is expected that
+ contains single file or image sequence of samy type.
+ output_dir (str): Path to directory where output will be rendered.
+ Must not be same as input's directory.
+ logger (logging.Logger): Logger used for logging.
+
+ Raises:
+ ValueError: If input filepath has extension not supported by function.
+ Currently is supported only ".exr" extension.
+ """
+ if logger is None:
+ logger = logging.getLogger(__name__)
+
+ first_input_path = input_paths[0]
+ ext = os.path.splitext(first_input_path)[1].lower()
+ if ext != ".exr":
+ raise ValueError((
+ "Function 'convert_for_ffmpeg' currently support only"
+ " \".exr\" extension. Got \"{}\"."
+ ).format(ext))
+
+ input_info = get_oiio_info_for_input(first_input_path)
+
+ # Change compression only if source compression is "dwaa" or "dwab"
+ # - they're not supported in ffmpeg
+ compression = input_info["attribs"].get("compression")
+ if compression in ("dwaa", "dwab"):
+ compression = "none"
+
+ # Collect channels to export
+ channel_names = input_info["channelnames"]
+ review_channels = get_convert_rgb_channels(channel_names)
+ if review_channels is None:
+ raise ValueError(
+ "Couldn't find channels that can be used for conversion."
+ )
+
+ red, green, blue, alpha = review_channels
+ input_channels = [red, green, blue]
+ channels_arg = "R={},G={},B={}".format(red, green, blue)
+ if alpha is not None:
+ channels_arg += ",A={}".format(alpha)
+ input_channels.append(alpha)
+ input_channels_str = ",".join(input_channels)
+
+ for input_path in input_paths:
+ # Prepare subprocess arguments
+ oiio_cmd = [
+ get_oiio_tools_path(),
+
+ # Don't add any additional attributes
+ "--nosoftwareattrib",
+ ]
+ # Add input compression if available
+ if compression:
+ oiio_cmd.extend(["--compression", compression])
+
+ oiio_cmd.extend([
+ # Tell oiiotool which channels should be loaded
+ # - other channels are not loaded to memory so helps to
+ # avoid memory leak issues
+ "-i:ch={}".format(input_channels_str), input_path,
+ # Tell oiiotool which channels should be put to top stack
+ # (and output)
+ "--ch", channels_arg
+ ])
+
+ for attr_name, attr_value in input_info["attribs"].items():
+ if not isinstance(attr_value, str):
+ continue
+
+ # Remove attributes that have string value longer than allowed
+ # length for ffmpeg or when containt unallowed symbols
+ erase_reason = "Missing reason"
+ erase_attribute = False
+ if len(attr_value) > MAX_FFMPEG_STRING_LEN:
+ erase_reason = "has too long value ({} chars).".format(
+ len(attr_value)
+ )
+
+ if erase_attribute:
+ for char in NOT_ALLOWED_FFMPEG_CHARS:
+ if char in attr_value:
+ erase_attribute = True
+ erase_reason = (
+ "contains unsupported character \"{}\"."
+ ).format(char)
+ break
+
+ if erase_attribute:
+ # Set attribute to empty string
+ logger.info((
+ "Removed attribute \"{}\" from metadata because {}."
+ ).format(attr_name, erase_reason))
+ oiio_cmd.extend(["--eraseattrib", attr_name])
+
+ # Add last argument - path to output
+ base_filename = os.path.basename(input_path)
+ output_path = os.path.join(output_dir, base_filename)
+ oiio_cmd.extend([
+ "-o", output_path
+ ])
+
+ logger.debug("Conversion command: {}".format(" ".join(oiio_cmd)))
+ run_subprocess(oiio_cmd, logger=logger)
+
+
# FFMPEG functions
def get_ffprobe_data(path_to_file, logger=None):
"""Load data about entered filepath via ffprobe.
diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
index 9e0b0be15d..306237c78c 100644
--- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
@@ -285,6 +285,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
else:
payload["JobInfo"]["JobDependency0"] = job["_id"]
+ if instance.data.get("suspend_publish"):
+ payload["JobInfo"]["InitialStatus"] = "Suspended"
+
index = 0
for key in environment:
if key.upper() in self.enviro_filter:
diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
index 0ed12bd03e..81f38e0c39 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py
@@ -1,6 +1,8 @@
import os
-from openpype_modules.ftrack.lib import BaseAction, statics_icon
+import collections
+import copy
from openpype.api import Anatomy
+from openpype_modules.ftrack.lib import BaseAction, statics_icon
class CreateFolders(BaseAction):
@@ -9,55 +11,59 @@ class CreateFolders(BaseAction):
icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg")
def discover(self, session, entities, event):
- if len(entities) != 1:
- return False
-
- not_allowed = ["assetversion", "project"]
- if entities[0].entity_type.lower() in not_allowed:
- return False
-
- return True
+ for entity_item in event["data"]["selection"]:
+ if entity_item.get("entityType").lower() in ("task", "show"):
+ return True
+ return False
def interface(self, session, entities, event):
if event["data"].get("values", {}):
return
- entity = entities[0]
- without_interface = True
- for child in entity["children"]:
- if child["object_type"]["name"].lower() != "task":
- without_interface = False
+
+ with_interface = False
+ for entity in entities:
+ if entity.entity_type.lower() != "task":
+ with_interface = True
break
- self.without_interface = without_interface
- if without_interface:
+
+ if "values" not in event["data"]:
+ event["data"]["values"] = {}
+
+ event["data"]["values"]["with_interface"] = with_interface
+ if not with_interface:
return
+
title = "Create folders"
entity_name = entity["name"]
msg = (
"
Do you want create folders also"
- " for all children of \"{}\"?
"
+ " for all children of your selection?"
)
if entity.entity_type.lower() == "project":
entity_name = entity["full_name"]
msg = msg.replace(" also", "")
msg += "
(Project root won't be created if not checked)
"
- items = []
- item_msg = {
- "type": "label",
- "value": msg.format(entity_name)
- }
- item_label = {
- "type": "label",
- "value": "With all chilren entities"
- }
- item = {
- "name": "children_included",
- "type": "boolean",
- "value": False
- }
- items.append(item_msg)
- items.append(item_label)
- items.append(item)
+ items = [
+ {
+ "type": "label",
+ "value": msg.format(entity_name)
+ },
+ {
+ "type": "label",
+ "value": "With all chilren entities"
+ },
+ {
+ "name": "children_included",
+ "type": "boolean",
+ "value": False
+ },
+ {
+ "type": "hidden",
+ "name": "with_interface",
+ "value": with_interface
+ }
+ ]
return {
"items": items,
@@ -66,30 +72,47 @@ class CreateFolders(BaseAction):
def launch(self, session, entities, event):
'''Callback method for custom action.'''
+
+ if "values" not in event["data"]:
+ return
+
+ with_interface = event["data"]["values"]["with_interface"]
with_childrens = True
- if self.without_interface is False:
- if "values" not in event["data"]:
- return
+ if with_interface:
with_childrens = event["data"]["values"]["children_included"]
- entity = entities[0]
- if entity.entity_type.lower() == "project":
- proj = entity
- else:
- proj = entity["project"]
- project_name = proj["full_name"]
- project_code = proj["name"]
+ filtered_entities = []
+ for entity in entities:
+ low_context_type = entity["context_type"].lower()
+ if low_context_type in ("task", "show"):
+ if not with_childrens and low_context_type == "show":
+ continue
+ filtered_entities.append(entity)
- if entity.entity_type.lower() == 'project' and with_childrens is False:
+ if not filtered_entities:
return {
- 'success': True,
- 'message': 'Nothing was created'
+ "success": True,
+ "message": 'Nothing was created'
}
- all_entities = []
- all_entities.append(entity)
- if with_childrens:
- all_entities = self.get_notask_children(entity)
+ project_entity = self.get_project_from_entity(filtered_entities[0])
+
+ project_name = project_entity["full_name"]
+ project_code = project_entity["name"]
+
+ task_entities = []
+ other_entities = []
+ self.get_all_entities(
+ session, entities, task_entities, other_entities
+ )
+ hierarchy = self.get_entities_hierarchy(
+ session, task_entities, other_entities
+ )
+ task_types = session.query("select id, name from Type").all()
+ task_type_names_by_id = {
+ task_type["id"]: task_type["name"]
+ for task_type in task_types
+ }
anatomy = Anatomy(project_name)
@@ -97,77 +120,67 @@ class CreateFolders(BaseAction):
work_template = anatomy.templates
for key in work_keys:
work_template = work_template[key]
- work_has_apps = "{app" in work_template
publish_keys = ["publish", "folder"]
publish_template = anatomy.templates
for key in publish_keys:
publish_template = publish_template[key]
- publish_has_apps = "{app" in publish_template
+
+ project_data = {
+ "project": {
+ "name": project_name,
+ "code": project_code
+ }
+ }
collected_paths = []
- for entity in all_entities:
- if entity.entity_type.lower() == "project":
- continue
- ent_data = {
- "project": {
- "name": project_name,
- "code": project_code
- }
- }
+ for item in hierarchy:
+ parent_entity, task_entities = item
- ent_data["asset"] = entity["name"]
+ parent_data = copy.deepcopy(project_data)
- parents = entity["link"][1:-1]
+ parents = parent_entity["link"][1:-1]
hierarchy_names = [p["name"] for p in parents]
- hierarchy = ""
+ hierarchy = "/".join(hierarchy_names)
+
if hierarchy_names:
- hierarchy = os.path.sep.join(hierarchy_names)
- ent_data["hierarchy"] = hierarchy
+ parent_name = hierarchy_names[-1]
+ else:
+ parent_name = project_name
- tasks_created = False
- for child in entity["children"]:
- if child["object_type"]["name"].lower() != "task":
- continue
- tasks_created = True
- task_data = ent_data.copy()
- task_data["task"] = child["name"]
+ parent_data.update({
+ "asset": parent_entity["name"],
+ "hierarchy": hierarchy,
+ "parent": parent_name
+ })
- apps = []
-
- # Template wok
- if work_has_apps:
- app_data = task_data.copy()
- for app in apps:
- app_data["app"] = app
- collected_paths.append(self.compute_template(
- anatomy, app_data, work_keys
- ))
- else:
- collected_paths.append(self.compute_template(
- anatomy, task_data, work_keys
- ))
-
- # Template publish
- if publish_has_apps:
- app_data = task_data.copy()
- for app in apps:
- app_data["app"] = app
- collected_paths.append(self.compute_template(
- anatomy, app_data, publish_keys
- ))
- else:
- collected_paths.append(self.compute_template(
- anatomy, task_data, publish_keys
- ))
-
- if not tasks_created:
+ if not task_entities:
# create path for entity
collected_paths.append(self.compute_template(
- anatomy, ent_data, work_keys
+ anatomy, parent_data, work_keys
))
collected_paths.append(self.compute_template(
- anatomy, ent_data, publish_keys
+ anatomy, parent_data, publish_keys
+ ))
+ continue
+
+ for task_entity in task_entities:
+ task_type_id = task_entity["type_id"]
+ task_type_name = task_type_names_by_id[task_type_id]
+ task_data = copy.deepcopy(parent_data)
+ task_data["task"] = {
+ "name": task_entity["name"],
+ "type": task_type_name
+ }
+
+ # Template wok
+ collected_paths.append(self.compute_template(
+ anatomy, task_data, work_keys
+ ))
+
+ # Template publish
+ collected_paths.append(self.compute_template(
+ anatomy, task_data, publish_keys
))
if len(collected_paths) == 0:
@@ -188,14 +201,65 @@ class CreateFolders(BaseAction):
"message": "Successfully created project folders."
}
- def get_notask_children(self, entity):
+ def get_all_entities(
+ self, session, entities, task_entities, other_entities
+ ):
+ if not entities:
+ return
+
+ no_task_entities = []
+ for entity in entities:
+ if entity.entity_type.lower() == "task":
+ task_entities.append(entity)
+ else:
+ no_task_entities.append(entity)
+
+ if not no_task_entities:
+ return task_entities
+
+ other_entities.extend(no_task_entities)
+
+ no_task_entity_ids = [entity["id"] for entity in no_task_entities]
+ next_entities = session.query((
+ "select id, parent_id"
+ " from TypedContext where parent_id in ({})"
+ ).format(self.join_query_keys(no_task_entity_ids))).all()
+
+ self.get_all_entities(
+ session, next_entities, task_entities, other_entities
+ )
+
+ def get_entities_hierarchy(self, session, task_entities, other_entities):
+ task_entity_ids = [entity["id"] for entity in task_entities]
+ full_task_entities = session.query((
+ "select id, name, type_id, parent_id"
+ " from TypedContext where id in ({})"
+ ).format(self.join_query_keys(task_entity_ids)))
+ task_entities_by_parent_id = collections.defaultdict(list)
+ for entity in full_task_entities:
+ parent_id = entity["parent_id"]
+ task_entities_by_parent_id[parent_id].append(entity)
+
output = []
- if entity.entity_type.lower() == "task":
+ if not task_entities_by_parent_id:
return output
- output.append(entity)
- for child in entity["children"]:
- output.extend(self.get_notask_children(child))
+ other_ids = set()
+ for entity in other_entities:
+ other_ids.add(entity["id"])
+ other_ids |= set(task_entities_by_parent_id.keys())
+
+ parent_entities = session.query((
+ "select id, name from TypedContext where id in ({})"
+ ).format(self.join_query_keys(other_ids))).all()
+
+ for parent_entity in parent_entities:
+ parent_id = parent_entity["id"]
+ output.append((
+ parent_entity,
+ task_entities_by_parent_id[parent_id]
+ ))
+
return output
def compute_template(self, anatomy, data, anatomy_keys):
diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
index 94f359c317..ebea8872f9 100644
--- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
+++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py
@@ -1,6 +1,4 @@
-import os
import re
-import json
from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype.api import get_project_basic_paths, create_project_folders
diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py
index e89113a86c..5c6d6352d2 100644
--- a/openpype/modules/ftrack/ftrack_server/lib.py
+++ b/openpype/modules/ftrack/ftrack_server/lib.py
@@ -31,10 +31,13 @@ TOPIC_STATUS_SERVER = "openpype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result"
-def check_ftrack_url(url, log_errors=True):
+def check_ftrack_url(url, log_errors=True, logger=None):
"""Checks if Ftrack server is responding"""
+ if logger is None:
+ logger = Logger.get_logger(__name__)
+
if not url:
- print('ERROR: Ftrack URL is not set!')
+ logger.error("Ftrack URL is not set!")
return None
url = url.strip('/ ')
@@ -48,15 +51,15 @@ def check_ftrack_url(url, log_errors=True):
result = requests.get(url, allow_redirects=False)
except requests.exceptions.RequestException:
if log_errors:
- print('ERROR: Entered Ftrack URL is not accesible!')
+ logger.error("Entered Ftrack URL is not accesible!")
return False
if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers):
if log_errors:
- print('ERROR: Entered Ftrack URL is not accesible!')
+ logger.error("Entered Ftrack URL is not accesible!")
return False
- print('DEBUG: Ftrack server {} is accessible.'.format(url))
+ logger.debug("Ftrack server {} is accessible.".format(url))
return url
@@ -133,7 +136,7 @@ class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
is_collection_created = False
- pypelog = Logger().get_logger("Session Processor")
+ pypelog = Logger.get_logger("Session Processor")
def __init__(self, *args, **kwargs):
self.mongo_url = None
diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py
index 82a79daf3b..cdc37588cd 100644
--- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py
+++ b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py
@@ -119,7 +119,7 @@ class OpenPypeContextSelector:
# app names and versions, but since app_name is not used
# currently down the line (but it is required by OP publish command
# right now).
- self.context["app_name"] = "maya/2020"
+ # self.context["app_name"] = "maya/2022"
return True
@staticmethod
@@ -139,7 +139,8 @@ class OpenPypeContextSelector:
env = {"AVALON_PROJECT": str(self.context.get("project")),
"AVALON_ASSET": str(self.context.get("asset")),
"AVALON_TASK": str(self.context.get("task")),
- "AVALON_APP_NAME": str(self.context.get("app_name"))}
+ # "AVALON_APP_NAME": str(self.context.get("app_name"))
+ }
print(">>> setting environment:")
for k, v in env.items():
@@ -184,7 +185,7 @@ selector = OpenPypeContextSelector()
selector.context["project"] = os.getenv("AVALON_PROJECT")
selector.context["asset"] = os.getenv("AVALON_ASSET")
selector.context["task"] = os.getenv("AVALON_TASK")
-selector.context["app_name"] = os.getenv("AVALON_APP_NAME")
+# selector.context["app_name"] = os.getenv("AVALON_APP_NAME")
# if anything inside is None, scratch the whole thing and
# ask user for context.
diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py
index 7d4e3ccc96..45ff8bc4d1 100644
--- a/openpype/modules/sync_server/sync_server_module.py
+++ b/openpype/modules/sync_server/sync_server_module.py
@@ -4,7 +4,7 @@ from datetime import datetime
import threading
import platform
import copy
-from collections import deque
+from collections import deque, defaultdict
from openpype.modules import OpenPypeModule
@@ -25,7 +25,7 @@ from openpype.settings.lib import (
from .providers.local_drive import LocalDriveHandler
from .providers import lib
-from .utils import time_function, SyncStatus
+from .utils import time_function, SyncStatus, SiteAlreadyPresentError
log = PypeLogger().get_logger("SyncServer")
@@ -133,21 +133,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
def add_site(self, collection, representation_id, site_name=None,
force=False):
"""
- Adds new site to representation to be synced.
+ Adds new site to representation to be synced.
- 'collection' must have synchronization enabled (globally or
- project only)
+ 'collection' must have synchronization enabled (globally or
+ project only)
- Used as a API endpoint from outside applications (Loader etc)
+ Used as a API endpoint from outside applications (Loader etc).
- Args:
- collection (string): project name (must match DB)
- representation_id (string): MongoDB _id value
- site_name (string): name of configured and active site
- force (bool): reset site if exists
+ Use 'force' to reset existing site.
- Returns:
- throws ValueError if any issue
+ Args:
+ collection (string): project name (must match DB)
+ representation_id (string): MongoDB _id value
+ site_name (string): name of configured and active site
+ force (bool): reset site if exists
+
+ Throws:
+ SiteAlreadyPresentError - if adding already existing site and
+ not 'force'
+ ValueError - other errors (repre not found, misconfiguration)
"""
if not self.get_sync_project_setting(collection):
raise ValueError("Project not configured")
@@ -157,9 +161,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
self.reset_site_on_representation(collection,
representation_id,
- site_name=site_name, force=force)
+ site_name=site_name,
+ force=force)
- # public facing API
def remove_site(self, collection, representation_id, site_name,
remove_local_files=False):
"""
@@ -186,6 +190,151 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
if remove_local_files:
self._remove_local_file(collection, representation_id, site_name)
+ def compute_resource_sync_sites(self, project_name):
+ """Get available resource sync sites state for publish process.
+
+ Returns dict with prepared state of sync sites for 'project_name'.
+ It checks if Site Sync is enabled, handles alternative sites.
+ Publish process stores this dictionary as a part of representation
+ document in DB.
+
+ Example:
+ [
+ {
+ 'name': '42abbc09-d62a-44a4-815c-a12cd679d2d7',
+ 'created_dt': datetime.datetime(2022, 3, 30, 12, 16, 9, 778637)
+ },
+ {'name': 'studio'},
+ {'name': 'SFTP'}
+ ] -- representation is published locally, artist or Settings have set
+ remote site as 'studio'. 'SFTP' is alternate site to 'studio'. Eg.
+ whenever file is on 'studio', it is also on 'SFTP'.
+ """
+
+ def create_metadata(name, created=True):
+ """Create sync site metadata for site with `name`"""
+ metadata = {"name": name}
+ if created:
+ metadata["created_dt"] = datetime.now()
+ return metadata
+
+ if (
+ not self.sync_system_settings["enabled"] or
+ not self.sync_project_settings[project_name]["enabled"]):
+ return [create_metadata(self.DEFAULT_SITE)]
+
+ local_site = self.get_active_site(project_name)
+ remote_site = self.get_remote_site(project_name)
+
+ # Attached sites metadata by site name
+ # That is the local site, remote site, the always accesible sites
+ # and their alternate sites (alias of sites with different protocol)
+ attached_sites = dict()
+ attached_sites[local_site] = create_metadata(local_site)
+
+ if remote_site and remote_site not in attached_sites:
+ attached_sites[remote_site] = create_metadata(remote_site,
+ created=False)
+
+ attached_sites = self._add_alternative_sites(attached_sites)
+ # add skeleton for sites where it should be always synced to
+ # usually it would be a backup site which is handled by separate
+ # background process
+ for site in self._get_always_accessible_sites(project_name):
+ if site not in attached_sites:
+ attached_sites[site] = create_metadata(site, created=False)
+
+ return list(attached_sites.values())
+
+ def _get_always_accessible_sites(self, project_name):
+ """Sites that synced to as a part of background process.
+
+ Artist machine doesn't handle those, explicit Tray with that site name
+ as a local id must be running.
+ Example is dropbox site serving as a backup solution
+ """
+ always_accessible_sites = (
+ self.get_sync_project_setting(project_name)["config"].
+ get("always_accessible_on", [])
+ )
+ return [site.strip() for site in always_accessible_sites]
+
+ def _add_alternative_sites(self, attached_sites):
+ """Add skeleton document for alternative sites
+
+ Each new configured site in System Setting could serve as a alternative
+ site, it's a kind of alias. It means that files on 'a site' are
+ physically accessible also on 'a alternative' site.
+ Example is sftp site serving studio files via sftp protocol, physically
+ file is only in studio, sftp server has this location mounted.
+ """
+ additional_sites = self.sync_system_settings.get("sites", {})
+
+ alt_site_pairs = self._get_alt_site_pairs(additional_sites)
+
+ for site_name in additional_sites.keys():
+ # Get alternate sites (stripped names) for this site name
+ alt_sites = alt_site_pairs.get(site_name)
+ alt_sites = [site.strip() for site in alt_sites]
+ alt_sites = set(alt_sites)
+
+ # If no alternative sites we don't need to add
+ if not alt_sites:
+ continue
+
+ # Take a copy of data of the first alternate site that is already
+ # defined as an attached site to match the same state.
+ match_meta = next((attached_sites[site] for site in alt_sites
+ if site in attached_sites), None)
+ if not match_meta:
+ continue
+
+ alt_site_meta = copy.deepcopy(match_meta)
+ alt_site_meta["name"] = site_name
+
+ # Note: We change mutable `attached_site` dict in-place
+ attached_sites[site_name] = alt_site_meta
+
+ return attached_sites
+
+ def _get_alt_site_pairs(self, conf_sites):
+ """Returns dict of site and its alternative sites.
+
+ If `site` has alternative site, it means that alt_site has 'site' as
+ alternative site
+ Args:
+ conf_sites (dict)
+ Returns:
+ (dict): {'site': [alternative sites]...}
+ """
+ alt_site_pairs = defaultdict(set)
+ for site_name, site_info in conf_sites.items():
+ alt_sites = set(site_info.get("alternative_sites", []))
+ alt_site_pairs[site_name].update(alt_sites)
+
+ for alt_site in alt_sites:
+ alt_site_pairs[alt_site].add(site_name)
+
+ for site_name, alt_sites in alt_site_pairs.items():
+ sites_queue = deque(alt_sites)
+ while sites_queue:
+ alt_site = sites_queue.popleft()
+
+ # safety against wrong config
+ # {"SFTP": {"alternative_site": "SFTP"}
+ if alt_site == site_name or alt_site not in alt_site_pairs:
+ continue
+
+ for alt_alt_site in alt_site_pairs[alt_site]:
+ if (
+ alt_alt_site != site_name
+ and alt_alt_site not in alt_sites
+ ):
+ alt_sites.add(alt_alt_site)
+ sites_queue.append(alt_alt_site)
+
+ return alt_site_pairs
+
def clear_project(self, collection, site_name):
"""
Clear 'collection' of 'site_name' and its local files
@@ -209,36 +358,38 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
def create_validate_project_task(self, collection, site_name):
"""Adds metadata about project files validation on a queue.
- This process will loop through all representation and check if
- their files actually exist on an active site.
+ This process will loop through all representation and check if
+ their files actually exist on an active site.
- This might be useful for edge cases when artists is switching
- between sites, remote site is actually physically mounted and
- active site has same file urls etc.
+ It also checks if site is set in DB, but file is physically not
+ present
- Task will run on a asyncio loop, shouldn't be blocking.
+ This might be useful for edge cases when artists is switching
+ between sites, remote site is actually physically mounted and
+ active site has same file urls etc.
+
+ Task will run on a asyncio loop, shouldn't be blocking.
"""
task = {
"type": "validate",
"project_name": collection,
- "func": lambda: self.validate_project(collection, site_name)
+ "func": lambda: self.validate_project(collection, site_name,
+ reset_missing=True)
}
self.projects_processed.add(collection)
self.long_running_tasks.append(task)
- def validate_project(self, collection, site_name, remove_missing=False):
- """
- Validate 'collection' of 'site_name' and its local files
+ def validate_project(self, collection, site_name, reset_missing=False):
+ """Validate 'collection' of 'site_name' and its local files
- If file present and not marked with a 'site_name' in DB, DB is
- updated with site name and file modified date.
+ If file present and not marked with a 'site_name' in DB, DB is
+ updated with site name and file modified date.
- Args:
- module (SyncServerModule)
- collection (string): project name
- site_name (string): active site name
- remove_missing (bool): if True remove sites in DB if missing
- physically
+ Args:
+ collection (string): project name
+ site_name (string): active site name
+ reset_missing (bool): if True reset site in DB if missing
+ physically
"""
self.log.debug("Validation of {} for {} started".format(collection,
site_name))
@@ -253,29 +404,32 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
return
sites_added = 0
- sites_removed = 0
+ sites_reset = 0
for repre in representations:
repre_id = repre["_id"]
for repre_file in repre.get("files", []):
try:
- has_site = site_name in [site["name"]
- for site in repre_file["sites"]]
- except TypeError:
+ is_on_site = site_name in [site["name"]
+ for site in repre_file["sites"]
+ if (site.get("created_dt") and
+ not site.get("error"))]
+ except (TypeError, AttributeError):
self.log.debug("Structure error in {}".format(repre_id))
continue
- if has_site and not remove_missing:
- continue
-
file_path = repre_file.get("path", "")
local_file_path = self.get_local_file_path(collection,
site_name,
file_path)
- if local_file_path and os.path.exists(local_file_path):
- self.log.debug("Adding site {} for {}".format(site_name,
- repre_id))
- if not has_site:
+ file_exists = (local_file_path and
+ os.path.exists(local_file_path))
+ if not is_on_site:
+ if file_exists:
+ self.log.debug(
+ "Adding site {} for {}".format(site_name,
+ repre_id))
+
query = {
"_id": repre_id
}
@@ -283,27 +437,27 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
os.path.getmtime(local_file_path))
elem = {"name": site_name,
"created_dt": created_dt}
- self._add_site(collection, query, [repre], elem,
+ self._add_site(collection, query, repre, elem,
site_name=site_name,
- file_id=repre_file["_id"])
+ file_id=repre_file["_id"],
+ force=True)
sites_added += 1
else:
- if has_site and remove_missing:
- self.log.debug("Removing site {} for {}".
+ if not file_exists and reset_missing:
+ self.log.debug("Resetting site {} for {}".
format(site_name, repre_id))
- self.reset_provider_for_file(collection,
- repre_id,
- file_id=repre_file["_id"],
- remove=True)
- sites_removed += 1
+ self.reset_site_on_representation(
+ collection, repre_id, site_name=site_name,
+ file_id=repre_file["_id"])
+ sites_reset += 1
if sites_added % 100 == 0:
self.log.debug("Sites added {}".format(sites_added))
self.log.debug("Validation of {} for {} ended".format(collection,
site_name))
- self.log.info("Sites added {}, sites removed {}".format(sites_added,
- sites_removed))
+ self.log.info("Sites added {}, sites reset {}".format(sites_added,
+ reset_missing))
def pause_representation(self, collection, representation_id, site_name):
"""
@@ -821,7 +975,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
self.log.debug("Adding alternate {} to {}".format(
alt_site, representation["_id"]))
self._add_site(collection, query,
- [representation], elem,
+ representation, elem,
alt_site, file_id=file_id, force=True)
""" End of Public API """
@@ -1425,14 +1579,16 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
pause (bool or None): if True - pause, False - unpause
force (bool): hard reset - currently only for add_site
- Returns:
- throws ValueError
+ Raises:
+ SiteAlreadyPresentError - if adding already existing site and
+ not 'force'
+ ValueError - other errors (repre not found, misconfiguration)
"""
query = {
"_id": ObjectId(representation_id)
}
- representation = list(self.connection.database[collection].find(query))
+ representation = self.connection.database[collection].find_one(query)
if not representation:
raise ValueError("Representation {} not found in {}".
format(representation_id, collection))
@@ -1463,7 +1619,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
representation, site_name, pause)
else: # add new site to all files for representation
self._add_site(collection, query, representation, elem, site_name,
- force)
+ force=force)
def _update_site(self, collection, query, update, arr_filter):
"""
@@ -1518,7 +1674,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
Throws ValueError if 'site_name' not found on 'representation'
"""
found = False
- for repre_file in representation.pop().get("files"):
+ for repre_file in representation.get("files"):
for site in repre_file.get("sites"):
if site.get("name") == site_name:
found = True
@@ -1544,7 +1700,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
"""
found = False
site = None
- for repre_file in representation.pop().get("files"):
+ for repre_file in representation.get("files"):
for site in repre_file.get("sites"):
if site["name"] == site_name:
found = True
@@ -1576,29 +1732,34 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
Adds 'site_name' to 'representation' on 'collection'
Args:
- representation (list of 1 dict)
+ representation (dict)
file_id (ObjectId)
Use 'force' to remove existing or raises ValueError
"""
- reseted_existing = False
- for repre_file in representation.pop().get("files"):
+ reset_existing = False
+ files = representation.get("files", [])
+ if not files:
+ log.debug("No files for {}".format(representation["_id"]))
+ return
+
+ for repre_file in files:
if file_id and file_id != repre_file["_id"]:
continue
for site in repre_file.get("sites"):
if site["name"] == site_name:
- if force:
+ if force or site.get("error"):
self._reset_site_for_file(collection, query,
elem, repre_file["_id"],
site_name)
- reseted_existing = True
+ reset_existing = True
else:
msg = "Site {} already present".format(site_name)
log.info(msg)
- raise ValueError(msg)
+ raise SiteAlreadyPresentError(msg)
- if reseted_existing:
+ if reset_existing:
return
if not file_id:
@@ -1762,7 +1923,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
(int) - number of failed attempts
"""
_, rec = self._get_site_rec(file.get("sites", []), provider)
- return rec.get("tries", 0)
+ return self._get_tries_count_from_rec(rec)
def _get_progress_dict(self, progress):
"""
diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py
index 85e4e03f77..03f362202f 100644
--- a/openpype/modules/sync_server/utils.py
+++ b/openpype/modules/sync_server/utils.py
@@ -8,6 +8,11 @@ class ResumableError(Exception):
pass
+class SiteAlreadyPresentError(Exception):
+ """Representation has already site skeleton present."""
+ pass
+
+
class SyncStatus:
DO_NOTHING = 0
DO_UPLOAD = 1
diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py
index e67b21105c..2e441fbf27 100644
--- a/openpype/pipeline/__init__.py
+++ b/openpype/pipeline/__init__.py
@@ -12,7 +12,6 @@ from .create import (
Creator,
AutoCreator,
CreatedInstance,
-
CreatorError,
LegacyCreator,
@@ -102,6 +101,7 @@ __all__ = (
"Creator",
"AutoCreator",
"CreatedInstance",
+ "CreatorError",
"CreatorError",
diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py
index 36bccd427e..cbe19da064 100644
--- a/openpype/pipeline/create/creator_plugins.py
+++ b/openpype/pipeline/create/creator_plugins.py
@@ -89,7 +89,9 @@ class BaseCreator:
@property
def log(self):
if self._log is None:
- self._log = logging.getLogger(self.__class__.__name__)
+ from openpype.api import Logger
+
+ self._log = Logger.get_logger(self.__class__.__name__)
return self._log
def _add_instance_to_context(self, instance):
diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py
index 95001691e2..55fda55d17 100644
--- a/openpype/plugins/load/add_site.py
+++ b/openpype/plugins/load/add_site.py
@@ -1,9 +1,19 @@
from openpype.modules import ModulesManager
from openpype.pipeline import load
+from openpype.lib.avalon_context import get_linked_ids_for_representations
+from openpype.modules.sync_server.utils import SiteAlreadyPresentError
class AddSyncSite(load.LoaderPlugin):
- """Add sync site to representation"""
+ """Add sync site to representation
+
+ If family of synced representation is 'workfile', it looks for all
+ representations which are referenced (loaded) in workfile with content of
+ 'inputLinks'.
+ It doesn't do any checks for site, most common use case is when artist is
+ downloading workfile to his local site, but it might be helpful when
+ artist is re-uploading broken representation on remote site also.
+ """
representations = ["*"]
families = ["*"]
@@ -12,21 +22,42 @@ class AddSyncSite(load.LoaderPlugin):
icon = "download"
color = "#999999"
+ _sync_server = None
+ is_add_site_loader = True
+
+ @property
+ def sync_server(self):
+ if not self._sync_server:
+ manager = ModulesManager()
+ self._sync_server = manager.modules_by_name["sync_server"]
+
+ return self._sync_server
+
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Adding {} to representation: {}".format(
data["site_name"], data["_id"]))
- self.add_site_to_representation(data["project_name"],
- data["_id"],
- data["site_name"])
- self.log.debug("Site added.")
+ family = context["representation"]["context"]["family"]
+ project_name = data["project_name"]
+ repre_id = data["_id"]
+ site_name = data["site_name"]
- @staticmethod
- def add_site_to_representation(project_name, representation_id, site_name):
- """Adds new site to representation_id, resets if exists"""
- manager = ModulesManager()
- sync_server = manager.modules_by_name["sync_server"]
- sync_server.add_site(project_name, representation_id, site_name,
- force=True)
+ self.sync_server.add_site(project_name, repre_id, site_name,
+ force=True)
+
+ if family == "workfile":
+ links = get_linked_ids_for_representations(project_name,
+ [repre_id],
+ link_type="reference")
+ for link_repre_id in links:
+ try:
+ self.sync_server.add_site(project_name, link_repre_id,
+ site_name,
+ force=False)
+ except SiteAlreadyPresentError:
+ # do not add/reset working site for references
+ self.log.debug("Site present", exc_info=True)
+
+ self.log.debug("Site added.")
def filepath_from_context(self, context):
"""No real file loading"""
diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py
index adffec9986..c5f442b2f5 100644
--- a/openpype/plugins/load/remove_site.py
+++ b/openpype/plugins/load/remove_site.py
@@ -12,22 +12,26 @@ class RemoveSyncSite(load.LoaderPlugin):
icon = "download"
color = "#999999"
+ _sync_server = None
+ is_remove_site_loader = True
+
+ @property
+ def sync_server(self):
+ if not self._sync_server:
+ manager = ModulesManager()
+ self._sync_server = manager.modules_by_name["sync_server"]
+
+ return self._sync_server
+
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Removing {} on representation: {}".format(
data["site_name"], data["_id"]))
- self.remove_site_on_representation(data["project_name"],
- data["_id"],
- data["site_name"])
+ self.sync_server.remove_site(data["project_name"],
+ data["_id"],
+ data["site_name"],
+ True)
self.log.debug("Site added.")
- @staticmethod
- def remove_site_on_representation(project_name, representation_id,
- site_name):
- manager = ModulesManager()
- sync_server = manager.modules_by_name["sync_server"]
- sync_server.remove_site(project_name, representation_id,
- site_name, True)
-
def filepath_from_context(self, context):
"""No real file loading"""
return ""
diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py
index b2f757f108..f6ead98809 100644
--- a/openpype/plugins/publish/collect_from_create_context.py
+++ b/openpype/plugins/publish/collect_from_create_context.py
@@ -26,7 +26,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
# Update global data to context
context.data.update(create_context.context_data_to_store())
-
+ context.data["newPublishing"] = True
# Update context data
for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"):
value = create_context.dbcon.Session.get(key)
diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py
index 41c84103a6..544c763b52 100644
--- a/openpype/plugins/publish/extract_burnin.py
+++ b/openpype/plugins/publish/extract_burnin.py
@@ -16,7 +16,7 @@ from openpype.lib import (
run_openpype_process,
get_transcode_temp_directory,
- convert_for_ffmpeg,
+ convert_input_paths_for_ffmpeg,
should_convert_for_ffmpeg,
CREATE_NO_WINDOW
@@ -187,8 +187,13 @@ class ExtractBurnin(openpype.api.Extractor):
repre_files = repre["files"]
if isinstance(repre_files, (tuple, list)):
filename = repre_files[0]
+ src_filepaths = [
+ os.path.join(src_repre_staging_dir, filename)
+ for filename in repre_files
+ ]
else:
filename = repre_files
+ src_filepaths = [os.path.join(src_repre_staging_dir, filename)]
first_input_path = os.path.join(src_repre_staging_dir, filename)
# Determine if representation requires pre conversion for ffmpeg
@@ -209,11 +214,9 @@ class ExtractBurnin(openpype.api.Extractor):
new_staging_dir = get_transcode_temp_directory()
repre["stagingDir"] = new_staging_dir
- convert_for_ffmpeg(
- first_input_path,
+ convert_input_paths_for_ffmpeg(
+ src_filepaths,
new_staging_dir,
- _temp_data["frameStart"],
- _temp_data["frameEnd"],
self.log
)
diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py
index 468ed96199..d6d6854092 100644
--- a/openpype/plugins/publish/extract_jpeg_exr.py
+++ b/openpype/plugins/publish/extract_jpeg_exr.py
@@ -8,7 +8,7 @@ from openpype.lib import (
path_to_subprocess_arg,
get_transcode_temp_directory,
- convert_for_ffmpeg,
+ convert_input_paths_for_ffmpeg,
should_convert_for_ffmpeg
)
@@ -79,11 +79,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if do_convert:
convert_dir = get_transcode_temp_directory()
filename = os.path.basename(full_input_path)
- convert_for_ffmpeg(
- full_input_path,
+ convert_input_paths_for_ffmpeg(
+ [full_input_path],
convert_dir,
- None,
- None,
self.log
)
full_input_path = os.path.join(convert_dir, filename)
diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py
index d569d82762..f2473839d9 100644
--- a/openpype/plugins/publish/extract_review.py
+++ b/openpype/plugins/publish/extract_review.py
@@ -18,7 +18,7 @@ from openpype.lib import (
path_to_subprocess_arg,
should_convert_for_ffmpeg,
- convert_for_ffmpeg,
+ convert_input_paths_for_ffmpeg,
get_transcode_temp_directory
)
import speedcopy
@@ -194,16 +194,20 @@ class ExtractReview(pyblish.api.InstancePlugin):
src_repre_staging_dir = repre["stagingDir"]
# Receive filepath to first file in representation
first_input_path = None
+ input_filepaths = []
if not self.input_is_sequence(repre):
first_input_path = os.path.join(
src_repre_staging_dir, repre["files"]
)
+ input_filepaths.append(first_input_path)
else:
for filename in repre["files"]:
- first_input_path = os.path.join(
+ filepath = os.path.join(
src_repre_staging_dir, filename
)
- break
+ input_filepaths.append(filepath)
+ if first_input_path is None:
+ first_input_path = filepath
# Skip if file is not set
if first_input_path is None:
@@ -230,13 +234,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
new_staging_dir = get_transcode_temp_directory()
repre["stagingDir"] = new_staging_dir
- frame_start = instance.data["frameStart"]
- frame_end = instance.data["frameEnd"]
- convert_for_ffmpeg(
- first_input_path,
+ convert_input_paths_for_ffmpeg(
+ input_filepaths,
new_staging_dir,
- frame_start,
- frame_end,
self.log
)
diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_new.py
index 891d47f471..bf13a4050e 100644
--- a/openpype/plugins/publish/integrate_new.py
+++ b/openpype/plugins/publish/integrate_new.py
@@ -113,7 +113,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"usdOverride",
"simpleUnrealTexture"
]
- exclude_families = ["clip"]
+ exclude_families = ["clip", "render.farm"]
db_representation_context_keys = [
"project", "asset", "task", "subset", "version", "representation",
"family", "hierarchy", "task", "username"
@@ -131,11 +131,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
subset_grouping_profiles = None
def process(self, instance):
- self.integrated_file_sizes = {}
- if [ef for ef in self.exclude_families
- if instance.data["family"] in ef]:
- return
+ for ef in self.exclude_families:
+ if (
+ instance.data["family"] == ef or
+ ef in instance.data["families"]):
+ self.log.debug("Excluded family '{}' in '{}' or {}".format(
+ ef, instance.data["family"], instance.data["families"]))
+ return
+ self.integrated_file_sizes = {}
try:
self.register(instance)
self.log.info("Integrated Asset in to the database ...")
@@ -228,7 +232,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations")
- assert repres, "Instance has no files to transfer"
+ repres = instance.data.get("representations")
+ msg = "Instance {} has no files to transfer".format(
+ instance.data["family"])
+ assert repres, msg
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py
index e0c8847040..d945a1f697 100644
--- a/openpype/pype_commands.py
+++ b/openpype/pype_commands.py
@@ -25,7 +25,7 @@ class PypeCommands:
Most of its methods are called by :mod:`cli` module.
"""
@staticmethod
- def launch_tray(debug=False):
+ def launch_tray():
PypeLogger.set_process_name("Tray")
from openpype.tools import tray
@@ -125,13 +125,14 @@ class PypeCommands:
if not any(paths):
raise RuntimeError("No publish paths specified")
- env = get_app_environments_for_context(
- os.environ["AVALON_PROJECT"],
- os.environ["AVALON_ASSET"],
- os.environ["AVALON_TASK"],
- os.environ["AVALON_APP_NAME"]
- )
- os.environ.update(env)
+ if os.getenv("AVALON_APP_NAME"):
+ env = get_app_environments_for_context(
+ os.environ["AVALON_PROJECT"],
+ os.environ["AVALON_ASSET"],
+ os.environ["AVALON_TASK"],
+ os.environ["AVALON_APP_NAME"]
+ )
+ os.environ.update(env)
pyblish.api.register_host("shell")
diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json
index 1c86509155..7a3f49452e 100644
--- a/openpype/settings/defaults/project_anatomy/imageio.json
+++ b/openpype/settings/defaults/project_anatomy/imageio.json
@@ -185,8 +185,8 @@
"linux": []
},
"renderSpace": "ACEScg",
- "viewName": "ACES 1.0 SDR-video",
- "displayName": "sRGB"
+ "displayName": "sRGB",
+ "viewName": "ACES 1.0 SDR-video"
},
"colorManagementPreference": {
"configFilePath": {
diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/openpype/settings/defaults/project_settings/aftereffects.json
index 6a9a399069..8083aa0972 100644
--- a/openpype/settings/defaults/project_settings/aftereffects.json
+++ b/openpype/settings/defaults/project_settings/aftereffects.json
@@ -1,4 +1,11 @@
{
+ "create": {
+ "RenderCreator": {
+ "defaults": [
+ "Main"
+ ]
+ }
+ },
"publish": {
"ValidateSceneSettings": {
"enabled": true,
diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json
index deade08c0b..a846a596c2 100644
--- a/openpype/settings/defaults/project_settings/ftrack.json
+++ b/openpype/settings/defaults/project_settings/ftrack.json
@@ -349,6 +349,18 @@
"tasks": [],
"add_ftrack_family": true,
"advanced_filtering": []
+ },
+ {
+ "hosts": [
+ "photoshop"
+ ],
+ "families": [
+ "review"
+ ],
+ "task_types": [],
+ "tasks": [],
+ "add_ftrack_family": true,
+ "advanced_filtering": []
}
]
},
diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json
index 58659d5d41..7317a3da1c 100644
--- a/openpype/settings/defaults/project_settings/global.json
+++ b/openpype/settings/defaults/project_settings/global.json
@@ -315,6 +315,7 @@
"workfile"
],
"hosts": [
+ "aftereffects",
"tvpaint"
],
"task_types": [],
diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json
index bc91a5ea8a..e36232d3f7 100644
--- a/openpype/settings/defaults/project_settings/standalonepublisher.json
+++ b/openpype/settings/defaults/project_settings/standalonepublisher.json
@@ -141,6 +141,14 @@
"defaults": [],
"help": "Texture files with Unreal naming convention"
},
+ "create_vdb": {
+ "name": "vdb",
+ "label": "VDB Volumetric Data",
+ "family": "vdbcache",
+ "icon": "cloud",
+ "defaults": [],
+ "help": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids"
+ },
"__dynamic_keys_labels__": {
"create_workfile": "Workfile",
"create_model": "Model",
@@ -154,7 +162,8 @@
"create_render": "Render",
"create_mov_batch": "Batch Mov",
"create_texture_batch": "Batch Texture",
- "create_simple_unreal_texture": "Simple Unreal Texture"
+ "create_simple_unreal_texture": "Simple Unreal Texture",
+ "create_vdb": "VDB Cache"
}
},
"publish": {
diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json
index e1785f8709..a06947ba77 100644
--- a/openpype/settings/defaults/system_settings/general.json
+++ b/openpype/settings/defaults/system_settings/general.json
@@ -7,6 +7,7 @@
"global": []
}
},
+ "log_to_server": true,
"disk_mapping": {
"windows": [],
"linux": [],
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json
index 4c4cd225ab..1a3eaef540 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json
@@ -5,6 +5,29 @@
"label": "AfterEffects",
"is_file": true,
"children": [
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "create",
+ "label": "Creator plugins",
+ "children": [
+ {
+ "type": "dict",
+ "collapsible": true,
+ "key": "RenderCreator",
+ "label": "Create render",
+ "children": [
+ {
+ "type": "list",
+ "key": "defaults",
+ "label": "Default Variants",
+ "object_type": "text",
+ "docstring": "Fill default variant(s) (like 'Main' or 'Default') used in subset name creation."
+ }
+ ]
+ }
+ ]
+ },
{
"type": "dict",
"collapsible": true,
diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json
index fcab4cd5d8..0090c54386 100644
--- a/openpype/settings/entities/schemas/system_schema/schema_general.json
+++ b/openpype/settings/entities/schemas/system_schema/schema_general.json
@@ -40,6 +40,11 @@
{
"type": "splitter"
},
+ {
+ "type": "boolean",
+ "key": "log_to_server",
+ "label": "Log to mongo"
+ },
{
"type": "dict",
"key": "disk_mapping",
diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py
index 0c94623a64..c99fc6080b 100644
--- a/openpype/settings/handlers.py
+++ b/openpype/settings/handlers.py
@@ -324,6 +324,7 @@ class MongoSettingsHandler(SettingsHandler):
global_general_keys = (
"openpype_path",
"admin_password",
+ "log_to_server",
"disk_mapping",
"production_version",
"staging_version"
diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py
index 1c3ec089f6..871704e13c 100644
--- a/openpype/tools/project_manager/project_manager/model.py
+++ b/openpype/tools/project_manager/project_manager/model.py
@@ -7,6 +7,11 @@ from pymongo import UpdateOne, DeleteOne
from Qt import QtCore, QtGui
+from openpype.lib import (
+ CURRENT_DOC_SCHEMAS,
+ PypeLogger,
+)
+
from .constants import (
IDENTIFIER_ROLE,
ITEM_TYPE_ROLE,
@@ -18,8 +23,6 @@ from .constants import (
)
from .style import ResourceCache
-from openpype.lib import CURRENT_DOC_SCHEMAS
-
class ProjectModel(QtGui.QStandardItemModel):
"""Load possible projects to modify from MongoDB.
@@ -185,6 +188,7 @@ class HierarchyModel(QtCore.QAbstractItemModel):
for key in self.multiselection_columns
}
+ self._log = None
# TODO Reset them on project change
self._current_project = None
self._root_item = None
@@ -194,6 +198,12 @@ class HierarchyModel(QtCore.QAbstractItemModel):
self._reset_root_item()
+ @property
+ def log(self):
+ if self._log is None:
+ self._log = PypeLogger.get_logger("ProjectManagerModel")
+ return self._log
+
@property
def items_by_id(self):
return self._items_by_id
@@ -1367,6 +1377,9 @@ class HierarchyModel(QtCore.QAbstractItemModel):
to_process = collections.deque()
to_process.append(project_item)
+ created_count = 0
+ updated_count = 0
+ removed_count = 0
bulk_writes = []
while to_process:
parent = to_process.popleft()
@@ -1381,6 +1394,7 @@ class HierarchyModel(QtCore.QAbstractItemModel):
insert_list.append(item)
elif item.data(REMOVED_ROLE):
+ removed_count += 1
if item.data(HIERARCHY_CHANGE_ABLE_ROLE):
bulk_writes.append(DeleteOne(
{"_id": item.asset_id}
@@ -1394,6 +1408,7 @@ class HierarchyModel(QtCore.QAbstractItemModel):
else:
update_data = item.update_data()
if update_data:
+ updated_count += 1
bulk_writes.append(UpdateOne(
{"_id": item.asset_id},
update_data
@@ -1406,11 +1421,21 @@ class HierarchyModel(QtCore.QAbstractItemModel):
result = project_col.insert_many(new_docs)
for idx, mongo_id in enumerate(result.inserted_ids):
+ created_count += 1
insert_list[idx].mongo_id = mongo_id
+ if sum([created_count, updated_count, removed_count]) == 0:
+ self.log.info("Nothing has changed")
+ return
+
if bulk_writes:
project_col.bulk_write(bulk_writes)
+ self.log.info((
+ "Save finished."
+ " Created {} | Updated {} | Removed {} asset documents"
+ ).format(created_count, updated_count, removed_count))
+
self.refresh_project()
def copy_mime_data(self, indexes):
@@ -1819,12 +1844,16 @@ class AssetItem(BaseItem):
}
query_projection = {
"_id": 1,
- "data.tasks": 1,
- "data.visualParent": 1,
- "schema": 1,
-
"name": 1,
+ "schema": 1,
"type": 1,
+ "parent": 1,
+
+ "data.visualParent": 1,
+ "data.parents": 1,
+
+ "data.tasks": 1,
+
"data.frameStart": 1,
"data.frameEnd": 1,
"data.fps": 1,
@@ -1835,7 +1864,7 @@ class AssetItem(BaseItem):
"data.clipIn": 1,
"data.clipOut": 1,
"data.pixelAspect": 1,
- "data.tools_env": 1
+ "data.tools_env": 1,
}
def __init__(self, asset_doc):
diff --git a/openpype/tools/standalonepublish/publish.py b/openpype/tools/standalonepublish/publish.py
index 582e7eccf8..e1e9edebb9 100644
--- a/openpype/tools/standalonepublish/publish.py
+++ b/openpype/tools/standalonepublish/publish.py
@@ -1,14 +1,14 @@
import os
import sys
-import openpype
import pyblish.api
+from openpype.pipeline import install_openpype_plugins
from openpype.tools.utils.host_tools import show_publish
def main(env):
# Registers pype's Global pyblish plugins
- openpype.install()
+ install_openpype_plugins()
# Register additional paths
addition_paths_str = env.get("PUBLISH_PATHS") or ""
diff --git a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py
index c1c59d65b6..e6c7328e88 100644
--- a/openpype/tools/standalonepublish/widgets/widget_drop_frame.py
+++ b/openpype/tools/standalonepublish/widgets/widget_drop_frame.py
@@ -37,6 +37,10 @@ class DropDataFrame(QtWidgets.QFrame):
"video_file": video_extensions
}
+ sequence_types = [
+ ".bgeo", ".vdb"
+ ]
+
def __init__(self, parent):
super().__init__()
self.parent_widget = parent
@@ -176,7 +180,7 @@ class DropDataFrame(QtWidgets.QFrame):
non_collectionable_paths = []
for path in in_paths:
ext = os.path.splitext(path)[1]
- if ext in self.image_extensions:
+ if ext in self.image_extensions or ext in self.sequence_types:
collectionable_paths.append(path)
else:
non_collectionable_paths.append(path)
@@ -289,7 +293,7 @@ class DropDataFrame(QtWidgets.QFrame):
def get_file_data(self, data):
filepath = data['files'][0]
ext = data['ext'].lower()
- output = {}
+ output = {"fps": None}
file_info = None
if 'file_info' in data:
diff --git a/openpype/tools/standalonepublish/widgets/widget_family_desc.py b/openpype/tools/standalonepublish/widgets/widget_family_desc.py
index 79681615b9..2095b332bd 100644
--- a/openpype/tools/standalonepublish/widgets/widget_family_desc.py
+++ b/openpype/tools/standalonepublish/widgets/widget_family_desc.py
@@ -52,6 +52,7 @@ class FamilyDescriptionWidget(QtWidgets.QWidget):
family.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
help = QtWidgets.QLabel("help")
+ help.setWordWrap(True)
help.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
label_layout.addWidget(family)
diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py
index 8e2044482a..20fea6600b 100644
--- a/openpype/tools/utils/lib.py
+++ b/openpype/tools/utils/lib.py
@@ -727,11 +727,11 @@ def is_sync_loader(loader):
def is_remove_site_loader(loader):
- return hasattr(loader, "remove_site_on_representation")
+ return hasattr(loader, "is_remove_site_loader")
def is_add_site_loader(loader):
- return hasattr(loader, "add_site_to_representation")
+ return hasattr(loader, "is_add_site_loader")
class WrappedCallbackItem:
diff --git a/openpype/version.py b/openpype/version.py
index a2916925b4..662adf28ca 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.9.5-nightly.1"
+__version__ = "3.10.0-nightly.2"
diff --git a/openpype/widgets/popup.py b/openpype/widgets/popup.py
index e661d3d293..9fc33ccbb8 100644
--- a/openpype/widgets/popup.py
+++ b/openpype/widgets/popup.py
@@ -1,16 +1,19 @@
import sys
-import logging
import contextlib
from Qt import QtCore, QtWidgets
-log = logging.getLogger(__name__)
-
class Popup(QtWidgets.QDialog):
+ """A Popup that moves itself to bottom right of screen on show event.
- on_show = QtCore.Signal()
+ The UI contains a message label and a red highlighted button to "show"
+ or perform another custom action from this pop-up.
+
+ """
+
+ on_clicked = QtCore.Signal()
def __init__(self, parent=None, *args, **kwargs):
super(Popup, self).__init__(parent=parent, *args, **kwargs)
@@ -19,32 +22,34 @@ class Popup(QtWidgets.QDialog):
# Layout
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(10, 5, 10, 10)
+
+ # Increase spacing slightly for readability
+ layout.setSpacing(10)
+
message = QtWidgets.QLabel("")
message.setStyleSheet("""
QLabel {
font-size: 12px;
}
""")
- show = QtWidgets.QPushButton("Show")
- show.setSizePolicy(QtWidgets.QSizePolicy.Maximum,
+ button = QtWidgets.QPushButton("Show")
+ button.setSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Maximum)
- show.setStyleSheet("""QPushButton { background-color: #BB0000 }""")
+ button.setStyleSheet("""QPushButton { background-color: #BB0000 }""")
layout.addWidget(message)
- layout.addWidget(show)
+ layout.addWidget(button)
- # Size
+ # Default size
self.resize(400, 40)
- geometry = self.calculate_window_geometry()
- self.setGeometry(geometry)
self.widgets = {
"message": message,
- "show": show,
+ "button": button,
}
# Signals
- show.clicked.connect(self._on_show_clicked)
+ button.clicked.connect(self._on_clicked)
# Set default title
self.setWindowTitle("Popup")
@@ -52,7 +57,10 @@ class Popup(QtWidgets.QDialog):
def setMessage(self, message):
self.widgets['message'].setText(message)
- def _on_show_clicked(self):
+ def setButtonText(self, text):
+ self.widgets["button"].setText(text)
+
+ def _on_clicked(self):
"""Callback for when the 'show' button is clicked.
Raises the parent (if any)
@@ -63,11 +71,19 @@ class Popup(QtWidgets.QDialog):
self.close()
# Trigger the signal
- self.on_show.emit()
+ self.on_clicked.emit()
if parent:
parent.raise_()
+ def showEvent(self, event):
+
+ # Position popup based on contents on show event
+ geo = self.calculate_window_geometry()
+ self.setGeometry(geo)
+
+ return super(Popup, self).showEvent(event)
+
def calculate_window_geometry(self):
"""Respond to status changes
@@ -104,45 +120,29 @@ class Popup(QtWidgets.QDialog):
return QtCore.QRect(x, y, width, height)
-class Popup2(Popup):
+class PopupUpdateKeys(Popup):
+ """Popup with Update Keys checkbox (intended for Maya)"""
- on_show = QtCore.Signal()
+ on_clicked_state = QtCore.Signal(bool)
def __init__(self, parent=None, *args, **kwargs):
Popup.__init__(self, parent=parent, *args, **kwargs)
layout = self.layout()
- # Add toggle
+ # Insert toggle for Update keys
toggle = QtWidgets.QCheckBox("Update Keys")
layout.insertWidget(1, toggle)
self.widgets["toggle"] = toggle
+ self.on_clicked.connect(self.emit_click_with_state)
+
layout.insertStretch(1, 1)
- # Update button text
- fix = self.widgets["show"]
- fix.setText("Fix")
-
- def calculate_window_geometry(self):
- """Respond to status changes
-
- On creation, align window with screen bottom right.
-
- """
- parent_widget = self.parent()
-
- desktop = QtWidgets.QApplication.desktop()
- if parent_widget:
- screen = desktop.screenNumber(parent_widget)
- else:
- screen = desktop.screenNumber(desktop.cursor().pos())
- center_point = desktop.screenGeometry(screen).center()
-
- frame_geo = self.frameGeometry()
- frame_geo.moveCenter(center_point)
-
- return frame_geo
+ def emit_click_with_state(self):
+ """Emit the on_clicked_state signal with the toggled state"""
+ checked = self.widgets["toggle"].isChecked()
+ self.on_clicked_state.emit(checked)
@contextlib.contextmanager
diff --git a/pyproject.toml b/pyproject.toml
index 0d1d5dc9a3..f32e385e80 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
-version = "3.9.5-nightly.1" # OpenPype
+version = "3.10.0-nightly.2" # OpenPype
description = "Open VFX and Animation pipeline with support."
authors = ["OpenPype Team "]
license = "MIT License"
diff --git a/start.py b/start.py
index 8944da4ba0..38eb9e9bf4 100644
--- a/start.py
+++ b/start.py
@@ -191,6 +191,51 @@ else:
if os.getenv("OPENPYPE_HEADLESS_MODE") != "1":
os.environ.pop("OPENPYPE_HEADLESS_MODE", None)
+# Enabled logging debug mode when "--debug" is passed
+if "--verbose" in sys.argv:
+ expected_values = (
+ "Expected: notset, debug, info, warning, error, critical"
+ " or integer [0-50]."
+ )
+ idx = sys.argv.index("--verbose")
+ sys.argv.pop(idx)
+ if idx < len(sys.argv):
+ value = sys.argv.pop(idx)
+ else:
+ raise RuntimeError((
+ "Expect value after \"--verbose\" argument. {}"
+ ).format(expected_values))
+
+ log_level = None
+ low_value = value.lower()
+ if low_value.isdigit():
+ log_level = int(low_value)
+ elif low_value == "notset":
+ log_level = 0
+ elif low_value == "debug":
+ log_level = 10
+ elif low_value == "info":
+ log_level = 20
+ elif low_value == "warning":
+ log_level = 30
+ elif low_value == "error":
+ log_level = 40
+ elif low_value == "critical":
+ log_level = 50
+
+ if log_level is None:
+ raise RuntimeError((
+ "Unexpected value after \"--verbose\" argument \"{}\". {}"
+ ).format(value, expected_values))
+
+ os.environ["OPENPYPE_LOG_LEVEL"] = str(log_level)
+
+# Enable debug mode, may affect log level if log level is not defined
+if "--debug" in sys.argv:
+ sys.argv.remove("--debug")
+ os.environ["OPENPYPE_DEBUG"] = "1"
+
+
import igniter # noqa: E402
from igniter import BootstrapRepos # noqa: E402
from igniter.tools import (
@@ -910,6 +955,16 @@ def boot():
_print(">>> run disk mapping command ...")
run_disk_mapping_commands(global_settings)
+ # Logging to server enabled/disabled
+ log_to_server = global_settings.get("log_to_server", True)
+ if log_to_server:
+ os.environ["OPENPYPE_LOG_TO_SERVER"] = "1"
+ log_to_server_msg = "ON"
+ else:
+ os.environ.pop("OPENPYPE_LOG_TO_SERVER", None)
+ log_to_server_msg = "OFF"
+ _print(f">>> Logging to server is turned {log_to_server_msg}")
+
# Get openpype path from database and set it to environment so openpype can
# find its versions there and bootstrap them.
openpype_path = get_openpype_path_from_settings(global_settings)
diff --git a/tests/integration/conftest.py b/tests/conftest.py
similarity index 100%
rename from tests/integration/conftest.py
rename to tests/conftest.py
diff --git a/tests/lib/assert_classes.py b/tests/lib/assert_classes.py
index 7f4d8efc10..9a94f89fd0 100644
--- a/tests/lib/assert_classes.py
+++ b/tests/lib/assert_classes.py
@@ -24,13 +24,14 @@ class DBAssert:
else:
args[key] = val
+ no_of_docs = dbcon.count_documents(args)
+
+ msg = None
args.pop("type")
detail_str = " "
if args:
detail_str = " with '{}'".format(args)
- msg = None
- no_of_docs = dbcon.count_documents(args)
if expected != no_of_docs:
msg = "Not expected no of '{}'{}."\
"Expected {}, found {}".format(queried_type,
diff --git a/tests/lib/testing_classes.py b/tests/lib/testing_classes.py
index 0a9da1aca8..7dfbf6fd0d 100644
--- a/tests/lib/testing_classes.py
+++ b/tests/lib/testing_classes.py
@@ -273,8 +273,6 @@ class PublishTest(ModuleUnitTest):
)
os.environ["AVALON_SCHEMA"] = schema_path
- import openpype
- openpype.install()
os.environ["OPENPYPE_EXECUTABLE"] = sys.executable
from openpype.lib import ApplicationManager
diff --git a/tests/unit/openpype/modules/sync_server/test_module_api.py b/tests/unit/openpype/modules/sync_server/test_module_api.py
new file mode 100644
index 0000000000..a484977758
--- /dev/null
+++ b/tests/unit/openpype/modules/sync_server/test_module_api.py
@@ -0,0 +1,64 @@
+"""Test file for Sync Server, tests API methods, currently for integrate_new
+
+ File:
+ creates temporary directory and downloads .zip file from GDrive
+ unzips .zip file
+ uses content of .zip file (MongoDB's dumps) to import to new databases
+ with use of 'monkeypatch_session' modifies required env vars
+ temporarily
+ runs battery of tests checking that site operation for Sync Server
+ module are working
+ removes temporary folder
+ removes temporary databases (?)
+"""
+import pytest
+
+from tests.lib.testing_classes import ModuleUnitTest
+
+
+class TestModuleApi(ModuleUnitTest):
+
+ REPRESENTATION_ID = "60e578d0c987036c6a7b741d"
+
+ TEST_FILES = [("1eCwPljuJeOI8A3aisfOIBKKjcmIycTEt",
+ "test_site_operations.zip", '')]
+
+ @pytest.fixture(scope="module")
+ def setup_sync_server_module(self, dbcon):
+ """Get sync_server_module from ModulesManager"""
+ from openpype.modules import ModulesManager
+
+ manager = ModulesManager()
+ sync_server = manager.modules_by_name["sync_server"]
+ yield sync_server
+
+ def test_get_alt_site_pairs(self, setup_sync_server_module):
+ conf_sites = {"SFTP": {"alternative_sites": ["studio"]},
+ "studio2": {"alternative_sites": ["studio"]}}
+
+ ret = setup_sync_server_module._get_alt_site_pairs(conf_sites)
+ expected = {"SFTP": {"studio", "studio2"},
+ "studio": {"SFTP", "studio2"},
+ "studio2": {"studio", "SFTP"}}
+ assert ret == expected, "Not matching result"
+
+ def test_get_alt_site_pairs_deep(self, setup_sync_server_module):
+ conf_sites = {"A": {"alternative_sites": ["C"]},
+ "B": {"alternative_sites": ["C"]},
+ "C": {"alternative_sites": ["D"]},
+ "D": {"alternative_sites": ["A"]},
+ "F": {"alternative_sites": ["G"]},
+ "G": {"alternative_sites": ["F"]},
+ }
+
+ ret = setup_sync_server_module._get_alt_site_pairs(conf_sites)
+ expected = {"A": {"B", "C", "D"},
+ "B": {"A", "C", "D"},
+ "C": {"A", "B", "D"},
+ "D": {"A", "B", "C"},
+ "F": {"G"},
+ "G": {"F"}}
+ assert ret == expected, "Not matching result"
+
+
+test_case = TestModuleApi()
diff --git a/website/docs/admin_openpype_commands.md b/website/docs/admin_openpype_commands.md
index 74cb895ac9..53b4799d6e 100644
--- a/website/docs/admin_openpype_commands.md
+++ b/website/docs/admin_openpype_commands.md
@@ -24,7 +24,11 @@ openpype_console --use-version=3.0.0-foo+bar
`--list-versions [--use-staging]` - to list available versions.
-`--validate-version` to validate integrity of given version
+`--validate-version` - to validate integrity of given version
+
+`--verbose` `` - change log verbose level of OpenPype loggers
+
+`--debug` - set debug flag affects logging
For more information [see here](admin_use.md#run-openpype).
@@ -47,13 +51,9 @@ For more information [see here](admin_use.md#run-openpype).
---
### `tray` arguments {#tray-arguments}
-| Argument | Description |
-| --- | --- |
-| `--debug` | print verbose information useful for debugging (works with `openpype_console`) |
-To launch Tray with debugging information:
```shell
-openpype_console tray --debug
+openpype_console tray
```
---
### `launch` arguments {#eventserver-arguments}
@@ -62,7 +62,6 @@ option to specify them.
| Argument | Description |
| --- | --- |
-| `--debug` | print debug info |
| `--ftrack-url` | URL to ftrack server (can be set with `FTRACK_SERVER`) |
| `--ftrack-user` |user name to log in to ftrack (can be set with `FTRACK_API_USER`) |
| `--ftrack-api-key` | ftrack api key (can be set with `FTRACK_API_KEY`) |
@@ -98,12 +97,16 @@ pype launch --app python --project my_project --asset my_asset --task my_task
---
### `publish` arguments {#publish-arguments}
+Run publishing based on metadata passed in json file e.g. on farm.
+
| Argument | Description |
| --- | --- |
-| `--debug` | print more verbose information |
+| `--targets` | define publishing targets (e.g. "farm") |
+| `--gui` (`-g`) | Show publishing |
+| Positional argument | Path to metadata json file |
```shell
-pype publish
+openpype publish --targes farm
```
---
diff --git a/website/docs/admin_use.md b/website/docs/admin_use.md
index 178241ad19..f84905c486 100644
--- a/website/docs/admin_use.md
+++ b/website/docs/admin_use.md
@@ -69,6 +69,22 @@ stored in `checksums` file.
Add `--headless` to run OpenPype without graphical UI (useful on server or on automated tasks, etc.)
:::
+`--verbose` `` - change log verbose level of OpenPype loggers.
+
+Level value can be integer in range `0-50` or one of enum strings `"notset" (0)`, `"debug" (10)`, `"info" (20)`, `"warning" (30)`, `"error" (40)`, `"ciritcal" (50)`. Value is stored to `OPENPYPE_LOG_LEVEL` environment variable for next processes.
+
+```shell
+openpype_console --verbose debug
+```
+
+`--debug` - set debug flag affects logging
+
+Enable debug flag for OpenPype process. Change value of environment variable `OPENPYPE_DEBUG` to `"1"`. At this moment affects only OpenPype loggers. Argument `--verbose` or environment variable `OPENPYPE_LOG_LEVEL` are used in preference to affect log level.
+
+```shell
+openpype_console --debug
+```
+
### Details
When you run OpenPype from executable, few check are made:
diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md
index f516c3a6e0..dc6f1696e7 100644
--- a/website/docs/artist_hosts_hiero.md
+++ b/website/docs/artist_hosts_hiero.md
@@ -94,6 +94,8 @@ This tool will set any defined colorspace definition from OpenPype `Settings / P
With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor.
+
+
### Preparing timeline for conversion to instances
Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recommend following.
1. Merge all tracks which supposed to be one and they are multiply only because of editor's style
@@ -191,3 +193,12 @@ If you wish to change any individual properties of the shot then you are able to
+
+### Publishing Effects from Hiero to Nuke
+This video shows a way to publish shot look as effect from Hiero to Nuke.
+
+
+
+### Assembling edit from published shot versions
+
+
diff --git a/website/docs/artist_hosts_nuke_tut.md b/website/docs/artist_hosts_nuke_tut.md
index eefb213dd2..296fdf44d5 100644
--- a/website/docs/artist_hosts_nuke_tut.md
+++ b/website/docs/artist_hosts_nuke_tut.md
@@ -89,6 +89,8 @@ This menu item will set correct Colorspace definitions for you. All has to be co
- set preview LUT to your viewers
- set correct colorspace to all discovered Read nodes (following expression set in settings)
+See [Nuke Color Management](artist_hosts_nuke_tut.md#nuke-color-management)
+
@@ -144,6 +146,8 @@ This tool will append all available subsets into an actual node graph. It will l
This QuickStart is short introduction to what OpenPype can do for you. It attempts to make an overview for compositing artists, and simplifies processes that are better described in specific parts of the documentation.
+
+
### Launch Nuke - Shot and Task Context
OpenPype has to know what shot and task you are working on. You need to run Nuke in context of the task, using Ftrack Action or OpenPype Launcher to select the task and run Nuke.
@@ -226,6 +230,11 @@ This will create a Group with a Write node inside.
You can configure write node parameters in **Studio Settings → Project → Anatomy → Color Management and Output Formats → Nuke → Nodes**
:::
+### Create Prerender Node
+Creating Prerender is very similar to creating OpenPype managed Write node.
+
+
+
#### What Nuke Publish Does
From Artist perspective, Nuke publish gathers all the stuff found in the Nuke script with Publish checkbox set to on, exports stuff and raises the Nuke script (workfile) version.
@@ -315,6 +324,8 @@ Main disadvantage of this approach is that you can render only one version of yo
When making quick farm publishes, like making two versions with different color correction, care must be taken to let the first job (first version) completely finish before the second version starts rendering.
+
+
### Managing Versions

@@ -323,15 +334,30 @@ OpenPype checks all the assets loaded to Nuke on script open. All out of date as
Use Manage to switch versions for loaded assets.
+### Loading Effects
+This video show how to publish effect from Hiero / Nuke Studio, and use the effect in Nuke.
+
+
+
+
+
+### Nuke Color Management
+
+
+
## Troubleshooting
### Fixing Validate Containers
+If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version.
+

-If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version.
+
### Fixing Validate Version
If your Pyblish dialog fails on Validate Version, you might be trying to publish already published version. Rise your version in the OpenPype WorkFiles SaveAs.
-Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot.
\ No newline at end of file
+Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot.
+
+
diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md
index a140170c49..36670054ee 100644
--- a/website/docs/artist_hosts_photoshop.md
+++ b/website/docs/artist_hosts_photoshop.md
@@ -111,3 +111,67 @@ You can switch to a previous version of the image or update to the latest.


+
+
+### New Publisher
+
+All previous screenshot came from regular [pyblish](https://pyblish.com/) process, there is also a different UI available. This process extends existing implementation and adds new functionalities.
+
+To test this in Photoshop, the artist needs first to enable experimental `New publisher` in Settings. (Tray > Settings > Experimental tools)
+
+
+New dialog opens after clicking on `Experimental tools` button in Openpype extension menu.
+
+
+After you click on this button, this dialog will show up.
+
+
+
+You can see the first instance, called `workfileYourTaskName`. (Name depends on studio naming convention for Photoshop's workfiles.). This instance is so called "automatic",
+it was created without instigation by the artist. You shouldn't delete this instance as it might hold necessary values for future publishing, but you can choose to skip it
+from publishing (by toggling the pill button inside of the rectangular object denoting instance).
+
+New publisher allows publishing into different context, just click on a workfile instance, update `Variant`, `Asset` or `Task` in the form in the middle and don't forget to click on the 'Confirm' button.
+
+Similarly to the old publishing approach, you need to create instances for everything you want to publish. You will initiate by clicking on the '+' sign in the bottom left corner.
+
+
+
+In this dialog you can select the family for the published layer or group. Currently only 'image' is implemented.
+
+On right hand side you can see creator attributes:
+- `Create only for selected` - mimics `Use selected` option of regular publish
+- `Create separate instance for each selected` - if separate instance should be created for each layer if multiple selected
+
+
+
+Here you can see a newly created instance of image family. (Name depends on studio naming convention for image family.) You can disable instance from publishing in the same fashion as a workfile instance.
+You could also decide delete instance by selecting it and clicking on a trashcan icon (next to plus button on left button)
+
+Buttons on the bottom right are for:
+- `Refresh publishing` - set publishing process to starting position - useful if previous publish failed, or you changed configuration of a publish
+- `Stop/pause publishing` - if you would like to pause publishing process at any time
+- `Validate` - if you would like to run only collecting and validating phases (nothing will be published yet)
+- `Publish` - standard way how to kick off full publishing process
+
+In the unfortunate case of some error during publishing, you would receive this kind of error dialog.
+
+
+
+In this case there is an issue that you are publishing two or more instances with the same subset name ('imageMaing'). If the error is recoverable by the artist, you should
+see helpful information in a `How to repair?` section or fix it automatically by clicking on a 'Wrench' button on the right if present.
+
+If you would like to ask for help admin or support, you could use any of the three buttons on bottom left:
+- `Copy report` - stash full publishing log to a clipboard
+- `Export and save report` - save log into a file for sending it via mail or any communication tool
+- `Show details` - switches into a more detailed list of published instances and plugins. Similar to the old pyblish list.
+
+If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.)
+
+New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and
+could be used right away.
+
+If you would create instances in a new publisher, you cannot use them in the old approach though!
+
+If you would hit on unexpected behaviour with old instances, contact support first, then you could try some steps to recover your publish. Delete instances in New publisher UI, or try `Subset manager` in the extension menu.
+Nuclear option is to purge workfile metadata in `File > File Info > Origin > Headline`. This is only for most determined daredevils though!
diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance.png b/website/docs/assets/artist_photoshop_new_publisher_instance.png
new file mode 100644
index 0000000000..723a032c94
Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance.png differ
diff --git a/website/docs/assets/artist_photoshop_new_publisher_instance_created.png b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png
new file mode 100644
index 0000000000..0cf6d1d18c
Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_instance_created.png differ
diff --git a/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png
new file mode 100644
index 0000000000..e34497b77d
Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_publish_failed.png differ
diff --git a/website/docs/assets/artist_photoshop_new_publisher_workfile.png b/website/docs/assets/artist_photoshop_new_publisher_workfile.png
new file mode 100644
index 0000000000..006206519f
Binary files /dev/null and b/website/docs/assets/artist_photoshop_new_publisher_workfile.png differ
diff --git a/website/docs/assets/experimental_tools_menu.png b/website/docs/assets/experimental_tools_menu.png
new file mode 100644
index 0000000000..79fa8d3655
Binary files /dev/null and b/website/docs/assets/experimental_tools_menu.png differ
diff --git a/website/docs/assets/experimental_tools_settings.png b/website/docs/assets/experimental_tools_settings.png
new file mode 100644
index 0000000000..4d514e8a8f
Binary files /dev/null and b/website/docs/assets/experimental_tools_settings.png differ
diff --git a/website/yarn.lock b/website/yarn.lock
index e01f0c4ef2..04b9dd658b 100644
--- a/website/yarn.lock
+++ b/website/yarn.lock
@@ -2311,9 +2311,9 @@ asap@~2.0.3:
integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=
async@^2.6.2:
- version "2.6.3"
- resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff"
- integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==
+ version "2.6.4"
+ resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221"
+ integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==
dependencies:
lodash "^4.17.14"