diff --git a/.github/workflows/automate-projects.yml b/.github/workflows/automate-projects.yml deleted file mode 100644 index b605071c2d..0000000000 --- a/.github/workflows/automate-projects.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Automate Projects - -on: - issues: - types: [opened, labeled] -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - -jobs: - assign_one_project: - runs-on: ubuntu-latest - name: Assign to One Project - steps: - - name: Assign NEW bugs to triage - uses: srggrs/assign-one-project-github-action@1.2.0 - if: contains(github.event.issue.labels.*.name, 'bug') - with: - project: 'https://github.com/pypeclub/pype/projects/2' - column_name: 'Needs triage' diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml index 4b52dfc30d..3cbee51472 100644 --- a/.github/workflows/milestone_assign.yml +++ b/.github/workflows/milestone_assign.yml @@ -13,7 +13,7 @@ jobs: if: github.event.pull_request.milestone == null uses: zoispag/action-assign-milestone@v1 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" milestone: 'next-minor' run_if_develop: @@ -24,5 +24,5 @@ jobs: if: github.event.pull_request.milestone == null uses: zoispag/action-assign-milestone@v1 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - milestone: 'next-patch' \ No newline at end of file + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" + milestone: 'next-patch' diff --git a/.github/workflows/milestone_create.yml b/.github/workflows/milestone_create.yml index b56ca81dc1..632704e64a 100644 --- a/.github/workflows/milestone_create.yml +++ b/.github/workflows/milestone_create.yml @@ -12,7 +12,7 @@ jobs: uses: "WyriHaximus/github-action-get-milestones@master" id: milestones env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') id: querymilestone @@ -31,7 +31,7 @@ jobs: with: title: 'next-patch' env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" generate-next-minor: runs-on: ubuntu-latest @@ -40,7 +40,7 @@ jobs: uses: "WyriHaximus/github-action-get-milestones@master" id: milestones env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') id: querymilestone @@ -59,4 +59,4 @@ jobs: with: title: 'next-minor' env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" diff --git a/.github/workflows/nightly_merge.yml b/.github/workflows/nightly_merge.yml index 1d36c89cc7..1776d7a464 100644 --- a/.github/workflows/nightly_merge.yml +++ b/.github/workflows/nightly_merge.yml @@ -14,10 +14,10 @@ jobs: - name: ๐Ÿš› Checkout Code uses: actions/checkout@v2 - - name: ๐Ÿ”จ Merge develop to main + - name: ๐Ÿ”จ Merge develop to main uses: everlytic/branch-merge@1.1.0 with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'develop' target_branch: 'main' commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' @@ -26,4 +26,4 @@ jobs: uses: benc-uk/workflow-dispatch@v1 with: workflow: Nightly Prerelease - token: ${{ secrets.ADMIN_TOKEN }} \ No newline at end of file + token: ${{ secrets.YNPUT_BOT_TOKEN }} diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 94bbe48156..571b0339e1 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -25,43 +25,15 @@ jobs: - name: ๐Ÿ”Ž Determine next version type id: version_type run: | - TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=type::$TYPE + TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "type=${TYPE}" >> $GITHUB_OUTPUT - name: ๐Ÿ’‰ Inject new version into files id: version if: steps.version_type.outputs.type != 'skip' run: | - RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=next_tag::$RESULT - - # - name: "โœ๏ธ Generate full changelog" - # if: steps.version_type.outputs.type != 'skip' - # id: generate-full-changelog - # uses: heinrichreimer/github-changelog-generator-action@v2.3 - # with: - # token: ${{ secrets.ADMIN_TOKEN }} - # addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - # issues: false - # issuesWoLabels: false - # sinceTag: "3.12.0" - # maxIssues: 100 - # pullRequests: true - # prWoLabels: false - # author: false - # unreleased: true - # compareLink: true - # stripGeneratorNotice: true - # verbose: true - # unreleasedLabel: ${{ steps.version.outputs.next_tag }} - # excludeTagsRegex: "CI/.+" - # releaseBranch: "main" - - - name: "๐Ÿ–จ๏ธ Print changelog to console" - if: steps.version_type.outputs.type != 'skip' - run: cat CHANGELOG.md + NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT - name: ๐Ÿ’พ Commit and Tag id: git_commit @@ -80,7 +52,7 @@ jobs: - name: Push to protected main branch uses: CasperWA/push-protected@v2.10.0 with: - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} branch: main tags: true unprotect_reviews: true @@ -89,7 +61,7 @@ jobs: uses: everlytic/branch-merge@1.1.0 if: steps.version_type.outputs.type != 'skip' with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'main' target_branch: 'develop' commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7e3b6eb05c..0b4c8af2c7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,34 +26,12 @@ jobs: - name: ๐Ÿ’‰ Inject new version into files id: version run: | - echo ::set-output name=current_version::${GITHUB_REF#refs/*/} - RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) - LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release) + NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) + LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release) - echo ::set-output name=last_release::$LASTRELEASE - echo ::set-output name=release_tag::$RESULT - - # - name: "โœ๏ธ Generate full changelog" - # if: steps.version.outputs.release_tag != 'skip' - # id: generate-full-changelog - # uses: heinrichreimer/github-changelog-generator-action@v2.3 - # with: - # token: ${{ secrets.ADMIN_TOKEN }} - # addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - # issues: false - # issuesWoLabels: false - # sinceTag: "3.12.0" - # maxIssues: 100 - # pullRequests: true - # prWoLabels: false - # author: false - # unreleased: true - # compareLink: true - # stripGeneratorNotice: true - # verbose: true - # futureRelease: ${{ steps.version.outputs.release_tag }} - # excludeTagsRegex: "CI/.+" - # releaseBranch: "main" + echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT + echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT + echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT - name: ๐Ÿ’พ Commit and Tag id: git_commit @@ -70,43 +48,17 @@ jobs: if: steps.version.outputs.release_tag != 'skip' uses: CasperWA/push-protected@v2.10.0 with: - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} branch: main tags: true unprotect_reviews: true - - name: "โœ๏ธ Generate last changelog" - if: steps.version.outputs.release_tag != 'skip' - id: generate-last-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: ${{ steps.version.outputs.last_release }} - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - futureRelease: ${{ steps.version.outputs.release_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" - stripHeaders: true - base: 'none' - - - name: ๐Ÿš€ Github Release if: steps.version.outputs.release_tag != 'skip' uses: ncipollo/release-action@v1 with: - body: ${{ steps.generate-last-changelog.outputs.changelog }} tag: ${{ steps.version.outputs.release_tag }} - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} - name: โ˜  Delete Pre-release if: steps.version.outputs.release_tag != 'skip' @@ -118,7 +70,7 @@ jobs: if: steps.version.outputs.release_tag != 'skip' uses: everlytic/branch-merge@1.1.0 with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'main' target_branch: 'develop' commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml index 0e6c242bd6..064a4d47e0 100644 --- a/.github/workflows/test_build.yml +++ b/.github/workflows/test_build.yml @@ -28,7 +28,7 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - + - name: ๐Ÿงต Install Requirements shell: pwsh run: | @@ -64,27 +64,3 @@ jobs: run: | export SKIP_THIRD_PARTY_VALIDATION="1" ./tools/build.sh - - # MacOS-latest: - - # runs-on: macos-latest - # strategy: - # matrix: - # python-version: [3.9] - - # steps: - # - name: ๐Ÿš› Checkout Code - # uses: actions/checkout@v2 - - # - name: Set up Python - # uses: actions/setup-python@v2 - # with: - # python-version: ${{ matrix.python-version }} - - # - name: ๐Ÿงต Install Requirements - # run: | - # ./tools/create_env.sh - - # - name: ๐Ÿ”จ Build - # run: | - # ./tools/build.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 890df4613e..eec388924e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,4 +9,4 @@ repos: - id: check-yaml - id: check-added-large-files - id: no-commit-to-branch - args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-]+)$).*' ] + args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ] diff --git a/openpype/api.py b/openpype/api.py deleted file mode 100644 index b60cd21d2b..0000000000 --- a/openpype/api.py +++ /dev/null @@ -1,112 +0,0 @@ -from .settings import ( - get_system_settings, - get_project_settings, - get_current_project_settings, - get_anatomy_settings, - - SystemSettings, - ProjectSettings -) -from .lib import ( - PypeLogger, - Logger, - Anatomy, - execute, - run_subprocess, - version_up, - get_asset, - get_workdir_data, - get_version_from_path, - get_last_version_from_path, - get_app_environments_for_context, - source_hash, - get_latest_version, - get_local_site_id, - change_openpype_mongo_url, - create_project_folders, - get_project_basic_paths -) - -from .lib.mongo import ( - get_default_components -) - -from .lib.applications import ( - ApplicationManager -) - -from .lib.avalon_context import ( - BuildWorkfile -) - -from . import resources - -from .plugin import ( - Extractor, - - ValidatePipelineOrder, - ValidateContentsOrder, - ValidateSceneOrder, - ValidateMeshOrder, -) - -# temporary fix, might -from .action import ( - get_errored_instances_from_context, - RepairAction, - RepairContextAction -) - - -__all__ = [ - "get_system_settings", - "get_project_settings", - "get_current_project_settings", - "get_anatomy_settings", - "get_project_basic_paths", - - "SystemSettings", - "ProjectSettings", - - "PypeLogger", - "Logger", - "Anatomy", - "execute", - "get_default_components", - "ApplicationManager", - "BuildWorkfile", - - # Resources - "resources", - - # plugin classes - "Extractor", - # ordering - "ValidatePipelineOrder", - "ValidateContentsOrder", - "ValidateSceneOrder", - "ValidateMeshOrder", - # action - "get_errored_instances_from_context", - "RepairAction", - "RepairContextAction", - - # get contextual data - "version_up", - "get_asset", - "get_workdir_data", - "get_version_from_path", - "get_last_version_from_path", - "get_app_environments_for_context", - "source_hash", - - "run_subprocess", - "get_latest_version", - - "get_local_site_id", - "change_openpype_mongo_url", - - "get_project_basic_paths", - "create_project_folders" - -] diff --git a/openpype/client/entity_links.py b/openpype/client/entity_links.py index e42ac58aff..b74b4ce7f6 100644 --- a/openpype/client/entity_links.py +++ b/openpype/client/entity_links.py @@ -164,7 +164,6 @@ def get_linked_representation_id( # Recursive graph lookup for inputs {"$graphLookup": graph_lookup} ] - conn = get_project_connection(project_name) result = conn.aggregate(query_pipeline) referenced_version_ids = _process_referenced_pipeline_result( @@ -213,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type): for output in sorted(outputs_recursive, key=lambda o: o["depth"]): output_links = output.get("data", {}).get("inputLinks") - if not output_links: + if not output_links and output["type"] != "hero_version": continue # Leaf @@ -232,6 +231,9 @@ def _process_referenced_pipeline_result(result, link_type): def _filter_input_links(input_links, link_type, correctly_linked_ids): + if not input_links: # to handle hero versions + return + for input_link in input_links: if link_type and input_link["type"] != link_type: continue diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py index 3609620917..1c8746c559 100644 --- a/openpype/hooks/pre_add_last_workfile_arg.py +++ b/openpype/hooks/pre_add_last_workfile_arg.py @@ -1,4 +1,5 @@ import os + from openpype.lib import PreLaunchHook @@ -40,5 +41,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): self.log.info("Current context does not have any workfile yet.") return + # Determine whether to open workfile post initialization. + if self.host_name == "maya": + key = "open_workfile_post_initialization" + if self.data["project_settings"]["maya"][key]: + self.log.debug("Opening workfile post initialization.") + self.data["env"]["OPENPYPE_" + key.upper()] = "1" + return + # Add path to workfile to arguments self.launch_context.launch_args.append(last_workfile) diff --git a/openpype/host/dirmap.py b/openpype/host/dirmap.py index 88d68f27bf..347c5fbf85 100644 --- a/openpype/host/dirmap.py +++ b/openpype/host/dirmap.py @@ -8,6 +8,7 @@ exists is used. import os from abc import ABCMeta, abstractmethod +import platform import six @@ -187,11 +188,19 @@ class HostDirmap(object): self.log.debug("local overrides {}".format(active_overrides)) self.log.debug("remote overrides {}".format(remote_overrides)) + current_platform = platform.system().lower() for root_name, active_site_dir in active_overrides.items(): remote_site_dir = ( remote_overrides.get(root_name) or sync_settings["sites"][remote_site]["root"][root_name] ) + + if isinstance(remote_site_dir, dict): + remote_site_dir = remote_site_dir.get(current_platform) + + if not remote_site_dir: + continue + if os.path.isdir(active_site_dir): if "destination-path" not in mapping: mapping["destination-path"] = [] diff --git a/openpype/host/host.py b/openpype/host/host.py index 94416bb39a..d2335c0062 100644 --- a/openpype/host/host.py +++ b/openpype/host/host.py @@ -1,3 +1,4 @@ +import os import logging import contextlib from abc import ABCMeta, abstractproperty @@ -100,6 +101,30 @@ class HostBase(object): pass + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return os.environ.get("AVALON_PROJECT") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return os.environ.get("AVALON_ASSET") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return os.environ.get("AVALON_TASK") + def get_current_context(self): """Get current context information. @@ -111,19 +136,14 @@ class HostBase(object): Default implementation returns values from 'legacy_io.Session'. Returns: - dict: Context with 3 keys 'project_name', 'asset_name' and - 'task_name'. All of them can be 'None'. + Dict[str, Union[str, None]]: Context with 3 keys 'project_name', + 'asset_name' and 'task_name'. All of them can be 'None'. """ - from openpype.pipeline import legacy_io - - if legacy_io.is_installed(): - legacy_io.install() - return { - "project_name": legacy_io.Session["AVALON_PROJECT"], - "asset_name": legacy_io.Session["AVALON_ASSET"], - "task_name": legacy_io.Session["AVALON_TASK"] + "project_name": self.get_current_project_name(), + "asset_name": self.get_current_asset_name(), + "task_name": self.get_current_task_name() } def get_context_title(self): diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 8d38288257..c20b0ec51b 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -6,14 +6,19 @@ from openpype.hosts.aftereffects import api from openpype.pipeline import ( Creator, CreatedInstance, - CreatorError, - legacy_io, + CreatorError ) from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class RenderCreator(Creator): + """Creates 'render' instance for publishing. + + Result of 'render' instance is video or sequence of images for particular + composition based of configuration in its RenderQueue. + """ identifier = "render" label = "Render" family = "render" @@ -28,45 +33,6 @@ class RenderCreator(Creator): ["RenderCreator"] ["defaults"]) - def get_icon(self): - return resources.get_openpype_splash_filepath() - - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - # legacy instances have family=='render' or 'renderLocal', use them - creator_id = (instance_data.get("creator_identifier") or - instance_data.get("family", '').replace("Local", '')) - if creator_id == self.identifier: - instance_data = self._handle_legacy(instance_data) - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - for created_inst, _changes in update_list: - api.get_stub().imprint(created_inst.get("instance_id"), - created_inst.data_to_store()) - subset_change = _changes.get("subset") - if subset_change: - api.get_stub().rename_item(created_inst.data["members"][0], - subset_change[1]) - - def remove_instances(self, instances): - for instance in instances: - self._remove_instance_from_context(instance) - self.host.remove_instance(instance) - - subset = instance.data["subset"] - comp_id = instance.data["members"][0] - comp = api.get_stub().get_item(comp_id) - if comp: - new_comp_name = comp.name.replace(subset, '') - if not new_comp_name: - new_comp_name = "dummyCompName" - api.get_stub().rename_item(comp_id, - new_comp_name) - def create(self, subset_name_from_ui, data, pre_create_data): stub = api.get_stub() # only after After Effects is up if pre_create_data.get("use_selection"): @@ -82,10 +48,19 @@ class RenderCreator(Creator): "if 'useSelection' or create at least " "one composition." ) - + use_composition_name = (pre_create_data.get("use_composition_name") or + len(comps) > 1) for comp in comps: - if pre_create_data.get("use_composition_name"): - composition_name = comp.name + if use_composition_name: + if "{composition}" not in subset_name_from_ui.lower(): + subset_name_from_ui += "{Composition}" + + composition_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + comp.name + ) + dynamic_fill = prepare_template_data({"composition": composition_name}) subset_name = subset_name_from_ui.format(**dynamic_fill) @@ -129,8 +104,72 @@ class RenderCreator(Creator): ] return output + def get_icon(self): + return resources.get_openpype_splash_filepath() + + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + # legacy instances have family=='render' or 'renderLocal', use them + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family", '').replace("Local", '')) + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + api.get_stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + subset_change = _changes.get("subset") + if subset_change: + api.get_stub().rename_item(created_inst.data["members"][0], + subset_change.new_value) + + def remove_instances(self, instances): + for instance in instances: + self._remove_instance_from_context(instance) + self.host.remove_instance(instance) + + subset = instance.data["subset"] + comp_id = instance.data["members"][0] + comp = api.get_stub().get_item(comp_id) + if comp: + new_comp_name = comp.name.replace(subset, '') + if not new_comp_name: + new_comp_name = "dummyCompName" + api.get_stub().rename_item(comp_id, + new_comp_name) + def get_detail_description(self): - return """Creator for Render instances""" + return """Creator for Render instances + + Main publishable item in AfterEffects will be of `render` family. + Result of this item (instance) is picture sequence or video that could + be a final delivery product or loaded and used in another DCCs. + + Select single composition and create instance of 'render' family or + turn off 'Use selection' to create instance for all compositions. + + 'Use composition name in subset' allows to explicitly add composition + name into created subset name. + + Position of composition name could be set in + `project_settings/global/tools/creator/subset_name_profiles` with some + form of '{composition}' placeholder. + + Composition name will be used implicitly if multiple composition should + be handled at same time. + + If {composition} placeholder is not us 'subset_name_profiles' + composition name will be capitalized and set at the end of subset name + if necessary. + + If composition name should be used, it will be cleaned up of characters + that would cause an issue in published file names. + """ def get_dynamic_data(self, variant, task_name, asset_doc, project_name, host_name, instance): @@ -155,7 +194,7 @@ class RenderCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("creator_attributes"): is_old_farm = instance_data["family"] != "renderLocal" diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py index c698af896b..2e7b9d4a7e 100644 --- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io, + CreatedInstance ) from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances @@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) diff --git a/openpype/hosts/blender/plugins/load/import_workfile.py b/openpype/hosts/blender/plugins/load/import_workfile.py index 618fb83e31..bbdf1c7ea0 100644 --- a/openpype/hosts/blender/plugins/load/import_workfile.py +++ b/openpype/hosts/blender/plugins/load/import_workfile.py @@ -44,7 +44,7 @@ class AppendBlendLoader(plugin.AssetLoader): """ representations = ["blend"] - families = ["*"] + families = ["workfile"] label = "Append Workfile" order = 9 @@ -68,7 +68,7 @@ class ImportBlendLoader(plugin.AssetLoader): """ representations = ["blend"] - families = ["*"] + families = ["workfile"] label = "Import Workfile" order = 9 diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py index 84b9dd1a6e..48c267fd18 100644 --- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py +++ b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py @@ -19,7 +19,6 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["camera"] - version = (0, 1, 0) label = "Zero Keyframe" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py index cee855671d..edf47193be 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py @@ -14,7 +14,6 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - category = "geometry" label = "Mesh Has UV's" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] optional = True diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py index 45ac08811d..618feb95c1 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py @@ -14,7 +14,6 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - category = "geometry" label = "Mesh No Negative Scale" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py index f5dc9fdd5c..1a98ec4c1d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py +++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -19,7 +19,6 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model", "rig"] - version = (0, 1, 0) label = "No Colons in names" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py index 742826d3d9..66ef731e6e 100644 --- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py @@ -21,7 +21,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - version = (0, 1, 0) label = "Transform Zero" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index d5294d61c2..5082217db0 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor): # create staging dir path staging_dir = self.staging_dir(instance) + # append staging dir for later cleanup + instance.context.data["cleanupFullPaths"].append(staging_dir) + # add default preset type for thumbnail and reviewable video # update them with settings and override in case the same # are found in there @@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor): "Path `{}` is containing more that one clip".format(path) ) return clips[0] - - def staging_dir(self, instance): - """Provide a temporary directory in which to store extracted files - - Upon calling this method the staging directory is stored inside - the instance.data['stagingDir'] - """ - staging_dir = instance.data.get('stagingDir', None) - openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR") - - if not staging_dir: - if openpype_temp_dir and os.path.exists(openpype_temp_dir): - staging_dir = os.path.normpath( - tempfile.mkdtemp( - prefix="pyblish_tmp_", - dir=openpype_temp_dir - ) - ) - else: - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data['stagingDir'] = staging_dir - - instance.context.data["cleanupFullPaths"].append(staging_dir) - - return staging_dir diff --git a/openpype/hosts/harmony/plugins/publish/extract_render.py b/openpype/hosts/harmony/plugins/publish/extract_render.py index 2f8169248e..c29864bb28 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_render.py +++ b/openpype/hosts/harmony/plugins/publish/extract_render.py @@ -108,9 +108,9 @@ class ExtractRender(pyblish.api.InstancePlugin): output = process.communicate()[0] if process.returncode != 0: - raise ValueError(output.decode("utf-8")) + raise ValueError(output.decode("utf-8", errors="backslashreplace")) - self.log.debug(output.decode("utf-8")) + self.log.debug(output.decode("utf-8", errors="backslashreplace")) # Generate representations. extension = collection.tail[1:] diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index 4ca6b50702..f0985973a6 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -113,7 +113,7 @@ class HoudiniCreatorBase(object): Dict[str, Any]: Shared data dictionary. """ - if shared_data.get("houdini_cached_subsets") is not None: + if shared_data.get("houdini_cached_subsets") is None: cache = dict() cache_legacy = dict() @@ -225,12 +225,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase): self._add_instance_to_context(created_instance) def update_instances(self, update_list): - for created_inst, _changes in update_list: + for created_inst, changes in update_list: instance_node = hou.node(created_inst.get("instance_node")) new_values = { - key: new_value - for key, (_old_value, new_value) in _changes.items() + key: changes[key].new_value + for key in changes.changed_keys } imprint( instance_node, diff --git a/openpype/hosts/houdini/api/shelves.py b/openpype/hosts/houdini/api/shelves.py index 3ccab964cd..ebd668e9e4 100644 --- a/openpype/hosts/houdini/api/shelves.py +++ b/openpype/hosts/houdini/api/shelves.py @@ -1,4 +1,5 @@ import os +import re import logging import platform @@ -66,7 +67,7 @@ def generate_shelves(): ) continue - mandatory_attributes = {'name', 'script'} + mandatory_attributes = {'label', 'script'} for tool_definition in shelf_definition.get('tools_list'): # We verify that the name and script attibutes of the tool # are set @@ -152,31 +153,32 @@ def get_or_create_tool(tool_definition, shelf): Returns: hou.Tool: The tool updated or the new one """ - existing_tools = shelf.tools() - tool_label = tool_definition.get('label') + tool_label = tool_definition.get("label") + if not tool_label: + log.warning("Skipped shelf without label") + return + + script_path = tool_definition["script"] + if not script_path or not os.path.exists(script_path): + log.warning("This path doesn't exist - {}".format(script_path)) + return + + existing_tools = shelf.tools() existing_tool = next( (tool for tool in existing_tools if tool.label() == tool_label), None ) + + with open(script_path) as stream: + script = stream.read() + + tool_definition["script"] = script + if existing_tool: - tool_definition.pop('name', None) - tool_definition.pop('label', None) + tool_definition.pop("label", None) existing_tool.setData(**tool_definition) return existing_tool - tool_name = tool_label.replace(' ', '_').lower() - - if not os.path.exists(tool_definition['script']): - log.warning( - "This path doesn't exist - {}".format(tool_definition['script']) - ) - return - - with open(tool_definition['script']) as f: - script = f.read() - tool_definition.update({'script': script}) - - new_tool = hou.shelves.newTool(name=tool_name, **tool_definition) - - return new_tool + tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower() + return hou.shelves.newTool(name=tool_name, **tool_definition) diff --git a/openpype/hosts/max/addon.py b/openpype/hosts/max/addon.py index d3245bbc7e..9d6ab5a8b3 100644 --- a/openpype/hosts/max/addon.py +++ b/openpype/hosts/max/addon.py @@ -12,6 +12,11 @@ class MaxAddon(OpenPypeModule, IHostAddon): def initialize(self, module_settings): self.enabled = True + def add_implementation_envs(self, env, _app): + # Remove auto screen scale factor for Qt + # - let 3dsmax decide it's value + env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None) + def get_workfile_extensions(self): return [".max"] diff --git a/openpype/hosts/max/api/plugin.py b/openpype/hosts/max/api/plugin.py index 4788bfd383..c16d9e61ec 100644 --- a/openpype/hosts/max/api/plugin.py +++ b/openpype/hosts/max/api/plugin.py @@ -78,12 +78,12 @@ class MaxCreator(Creator, MaxCreatorBase): self._add_instance_to_context(created_instance) def update_instances(self, update_list): - for created_inst, _changes in update_list: + for created_inst, changes in update_list: instance_node = created_inst.get("instance_node") new_values = { - key: new_value - for key, (_old_value, new_value) in _changes.items() + key: changes[key].new_value + for key in changes.changed_keys } imprint( instance_node, diff --git a/openpype/hosts/max/startup/startup.py b/openpype/hosts/max/startup/startup.py index 37bcef5db1..0d3135a16f 100644 --- a/openpype/hosts/max/startup/startup.py +++ b/openpype/hosts/max/startup/startup.py @@ -1,4 +1,13 @@ # -*- coding: utf-8 -*- +import os +import sys + +# this might happen in some 3dsmax version where PYTHONPATH isn't added +# to sys.path automatically +for path in os.environ["PYTHONPATH"].split(os.pathsep): + if path and path not in sys.path: + sys.path.append(path) + from openpype.hosts.max.api import MaxHost from openpype.pipeline import install_host diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index 4a36406632..19ad18d824 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -4,6 +4,7 @@ from maya import cmds from openpype.client import get_asset_by_name, get_project from openpype.pipeline import legacy_io +from . import lib class ToolWindows: @@ -59,25 +60,11 @@ def edit_shader_definitions(): def reset_frame_range(): """Set frame range to current asset""" - # Set FPS first - fps = {15: 'game', - 24: 'film', - 25: 'pal', - 30: 'ntsc', - 48: 'show', - 50: 'palf', - 60: 'ntscf', - 23.98: '23.976fps', - 23.976: '23.976fps', - 29.97: '29.97fps', - 47.952: '47.952fps', - 47.95: '47.952fps', - 59.94: '59.94fps', - 44100: '44100fps', - 48000: '48000fps' - }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal") - cmds.currentUnit(time=fps) + fps = lib.convert_to_maya_fps( + float(legacy_io.Session.get("AVALON_FPS", 25)) + ) + lib.set_scene_fps(fps) # Set frame start/end project_name = legacy_io.active_project() diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 25842a4776..b920428b20 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -5,6 +5,7 @@ import sys import platform import uuid import math +import re import json import logging @@ -254,11 +255,6 @@ def read(node): return data -def _get_mel_global(name): - """Return the value of a mel global variable""" - return mel.eval("$%s = $%s;" % (name, name)) - - def matrix_equals(a, b, tolerance=1e-10): """ Compares two matrices with an imperfection tolerance @@ -624,15 +620,15 @@ class delete_after(object): cmds.delete(self._nodes) +def get_current_renderlayer(): + return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) + + def get_renderer(layer): with renderlayer(layer): return cmds.getAttr("defaultRenderGlobals.currentRenderer") -def get_current_renderlayer(): - return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) - - @contextlib.contextmanager def no_undo(flush=False): """Disable the undo queue during the context @@ -1373,27 +1369,6 @@ def set_id(node, unique_id, overwrite=False): cmds.setAttr(attr, unique_id, type="string") -# endregion ID -def get_reference_node(path): - """ - Get the reference node when the path is found being used in a reference - Args: - path (str): the file path to check - - Returns: - node (str): name of the reference node in question - """ - try: - node = cmds.file(path, query=True, referenceNode=True) - except RuntimeError: - log.debug('File is not referenced : "{}"'.format(path)) - return - - reference_path = cmds.referenceQuery(path, filename=True) - if os.path.normpath(path) == os.path.normpath(reference_path): - return node - - def set_attribute(attribute, value, node): """Adjust attributes based on the value from the attribute data @@ -1995,8 +1970,6 @@ def get_id_from_sibling(node, history_only=True): return first_id - -# Project settings def set_scene_fps(fps, update=True): """Set FPS from project configuration @@ -2009,28 +1982,21 @@ def set_scene_fps(fps, update=True): """ - fps_mapping = {'15': 'game', - '24': 'film', - '25': 'pal', - '30': 'ntsc', - '48': 'show', - '50': 'palf', - '60': 'ntscf', - '23.98': '23.976fps', - '23.976': '23.976fps', - '29.97': '29.97fps', - '47.952': '47.952fps', - '47.95': '47.952fps', - '59.94': '59.94fps', - '44100': '44100fps', - '48000': '48000fps'} - - # pull from mapping - # this should convert float string to float and int to int - # so 25.0 is converted to 25, but 23.98 will be still float. - dec, ipart = math.modf(fps) - if dec == 0.0: - fps = int(ipart) + fps_mapping = { + '15': 'game', + '24': 'film', + '25': 'pal', + '30': 'ntsc', + '48': 'show', + '50': 'palf', + '60': 'ntscf', + '23.976023976023978': '23.976fps', + '29.97002997002997': '29.97fps', + '47.952047952047955': '47.952fps', + '59.94005994005994': '59.94fps', + '44100': '44100fps', + '48000': '48000fps' + } unit = fps_mapping.get(str(fps), None) if unit is None: @@ -2150,7 +2116,9 @@ def set_context_settings(): asset_data = asset_doc.get("data", {}) # Set project fps - fps = asset_data.get("fps", project_data.get("fps", 25)) + fps = convert_to_maya_fps( + asset_data.get("fps", project_data.get("fps", 25)) + ) legacy_io.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) @@ -2172,15 +2140,12 @@ def validate_fps(): """ - fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"] - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2) + expected_fps = convert_to_maya_fps( + get_current_project_asset(fields=["data.fps"])["data"]["fps"] + ) + current_fps = mel.eval('currentTimeUnitToFPS()') - fps_match = current_fps == fps + fps_match = current_fps == expected_fps if not fps_match and not IS_HEADLESS: from openpype.widgets import popup @@ -2189,14 +2154,19 @@ def validate_fps(): dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) dialog.setWindowTitle("Maya scene does not match project FPS") - dialog.setMessage("Scene %i FPS does not match project %i FPS" % - (current_fps, fps)) + dialog.setMessage( + "Scene {} FPS does not match project {} FPS".format( + current_fps, expected_fps + ) + ) dialog.setButtonText("Fix") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] update = toggle.isChecked() - dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update)) + dialog.on_clicked_state.connect( + lambda: set_scene_fps(expected_fps, update) + ) dialog.show() @@ -3379,3 +3349,116 @@ def iter_visible_nodes_in_range(nodes, start, end): def get_attribute_input(attr): connections = cmds.listConnections(attr, plugs=True, destination=False) return connections[0] if connections else None + + +def convert_to_maya_fps(fps): + """Convert any fps to supported Maya framerates.""" + float_framerates = [ + 23.976023976023978, + # WTF is 29.97 df vs fps? + 29.97002997002997, + 47.952047952047955, + 59.94005994005994 + ] + # 44100 fps evaluates as 41000.0. Why? Omitting for now. + int_framerates = [ + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 15, + 16, + 20, + 24, + 25, + 30, + 40, + 48, + 50, + 60, + 75, + 80, + 90, + 100, + 120, + 125, + 150, + 200, + 240, + 250, + 300, + 375, + 400, + 500, + 600, + 750, + 1200, + 1500, + 2000, + 3000, + 6000, + 48000 + ] + + # If input fps is a whole number we'll return. + if float(fps).is_integer(): + # Validate fps is part of Maya's fps selection. + if fps not in int_framerates: + raise ValueError( + "Framerate \"{}\" is not supported in Maya".format(fps) + ) + return fps + else: + # Differences to supported float frame rates. + differences = [] + for i in float_framerates: + differences.append(abs(i - fps)) + + # Validate difference does not stray too far from supported framerates. + min_difference = min(differences) + min_index = differences.index(min_difference) + supported_framerate = float_framerates[min_index] + if min_difference > 0.1: + raise ValueError( + "Framerate \"{}\" strays too far from any supported framerate" + " in Maya. Closest supported framerate is \"{}\"".format( + fps, supported_framerate + ) + ) + + return supported_framerate + + +def write_xgen_file(data, filepath): + """Overwrites data in .xgen files. + + Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath". + + Args: + data (dict): Dictionary of key, value. Key matches with xgen file. + For example: + {"xgDataPath": "some/path"} + filepath (string): Absolute path of .xgen file. + """ + # Generate regex lookup for line to key basically + # match any of the keys in `\t{key}\t\t` + keys = "|".join(re.escape(key) for key in data.keys()) + re_keys = re.compile("^\t({})\t\t".format(keys)) + + lines = [] + with open(filepath, "r") as f: + for line in f: + match = re_keys.match(line) + if match: + key = match.group(1) + value = data[key] + line = "\t{}\t\t{}\n".format(key, value) + + lines.append(line) + + with open(filepath, "w") as f: + f.writelines(lines) diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 67109e9958..791475173f 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -50,7 +50,6 @@ def install(): parent="MayaWindow" ) - renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 3798170671..5323717fa7 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -514,6 +514,9 @@ def check_lock_on_current_file(): # add the lock file when opening the file filepath = current_file() + # Skip if current file is 'untitled' + if not filepath: + return if is_workfile_locked(filepath): # add lockfile dialog @@ -680,10 +683,12 @@ def before_workfile_save(event): def after_workfile_save(event): workfile_name = event["filename"] - if handle_workfile_locks(): - if workfile_name: - if not is_workfile_locked(workfile_name): - create_workfile_lock(workfile_name) + if ( + handle_workfile_locks() + and workfile_name + and not is_workfile_locked(workfile_name) + ): + create_workfile_lock(workfile_name) class MayaDirmap(HostDirmap): diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 82df85a8be..916fddd923 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -300,6 +300,39 @@ class ReferenceLoader(Loader): str(representation["_id"]), type="string") + # When an animation or pointcache gets connected to an Xgen container, + # the compound attribute "xgenContainers" gets created. When animation + # containers gets updated we also need to update the cacheFileName on + # the Xgen collection. + compound_name = "xgenContainers" + if cmds.objExists("{}.{}".format(node, compound_name)): + import xgenm + container_amount = cmds.getAttr( + "{}.{}".format(node, compound_name), size=True + ) + # loop through all compound children + for i in range(container_amount): + attr = "{}.{}[{}].container".format(node, compound_name, i) + objectset = cmds.listConnections(attr)[0] + reference_node = cmds.sets(objectset, query=True)[0] + palettes = cmds.ls( + cmds.referenceQuery(reference_node, nodes=True), + type="xgmPalette" + ) + for palette in palettes: + for description in xgenm.descriptions(palette): + xgenm.setAttr( + "cacheFileName", + path.replace("\\", "/"), + palette, + description, + "SplinePrimitive" + ) + + # Refresh UI and viewport. + de = xgenm.xgGlobal.DescriptionEditor + de.refresh("Full") + def remove(self, container): """Remove an existing `container` from Maya scene diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index 3416c98793..2f550e787a 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -2,7 +2,7 @@ import json from maya import cmds -from openpype.pipeline import registered_host +from openpype.pipeline import registered_host, get_current_asset_name from openpype.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, @@ -41,10 +41,27 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): )) cmds.sets(name=PLACEHOLDER_SET, empty=True) - cmds.file(path, i=True, returnNewNodes=True) + new_nodes = cmds.file(path, i=True, returnNewNodes=True) cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) + imported_sets = cmds.ls(new_nodes, set=True) + if not imported_sets: + return True + + # update imported sets information + asset_name = get_current_asset_name() + for node in imported_sets: + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": + continue + if not cmds.attributeQuery("asset", node=node, exists=True): + continue + + cmds.setAttr( + "{}.asset".format(node), asset_name, type="string") + return True diff --git a/openpype/hosts/maya/plugins/create/create_ass.py b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py similarity index 84% rename from openpype/hosts/maya/plugins/create/create_ass.py rename to openpype/hosts/maya/plugins/create/create_arnold_scene_source.py index 935a068ca5..2afb897e94 100644 --- a/openpype/hosts/maya/plugins/create/create_ass.py +++ b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -6,7 +6,7 @@ from openpype.hosts.maya.api import ( from maya import cmds -class CreateAss(plugin.Creator): +class CreateArnoldSceneSource(plugin.Creator): """Arnold Scene Source""" name = "ass" @@ -29,7 +29,7 @@ class CreateAss(plugin.Creator): maskOperator = False def __init__(self, *args, **kwargs): - super(CreateAss, self).__init__(*args, **kwargs) + super(CreateArnoldSceneSource, self).__init__(*args, **kwargs) # Add animation data self.data.update(lib.collect_animation_data()) @@ -52,7 +52,7 @@ class CreateAss(plugin.Creator): self.data["maskOperator"] = self.maskOperator def process(self): - instance = super(CreateAss, self).process() + instance = super(CreateArnoldSceneSource, self).process() nodes = [] @@ -61,6 +61,6 @@ class CreateAss(plugin.Creator): cmds.sets(nodes, rm=instance) - assContent = cmds.sets(name="content_SET") - assProxy = cmds.sets(name="proxy_SET", empty=True) + assContent = cmds.sets(name=instance + "_content_SET") + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) cmds.sets([assContent, assProxy], forceElement=instance) diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index cdec140ea8..63c0490dc7 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + from openpype.hosts.maya.api import ( lib, plugin @@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator): # Default to not send to farm. self.data["farm"] = False self.data["priority"] = 50 + + def process(self): + instance = super(CreatePointCache, self).process() + + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) + cmds.sets(assProxy, forceElement=instance) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 8375149442..387b7321b9 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -54,6 +54,7 @@ class CreateRender(plugin.Creator): tileRendering (bool): Instance is set to tile rendering mode. We won't submit actual render, but we'll make publish job to wait for Tile Assembly job done and then publish. + strict_error_checking (bool): Enable/disable error checking on DL See Also: https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup @@ -271,6 +272,9 @@ class CreateRender(plugin.Creator): secondary_pool = pool_setting["secondary_pool"] self.data["secondaryPool"] = self._set_default_pool(pool_names, secondary_pool) + strict_error_checking = maya_submit_dl.get("strict_error_checking", + True) + self.data["strict_error_checking"] = strict_error_checking if muster_enabled: self.log.info(">>> Loading Muster credentials ...") diff --git a/openpype/hosts/maya/plugins/create/create_xgen.py b/openpype/hosts/maya/plugins/create/create_xgen.py index 8672c06a1e..70e23cf47b 100644 --- a/openpype/hosts/maya/plugins/create/create_xgen.py +++ b/openpype/hosts/maya/plugins/create/create_xgen.py @@ -2,9 +2,9 @@ from openpype.hosts.maya.api import plugin class CreateXgen(plugin.Creator): - """Xgen interactive export""" + """Xgen""" name = "xgen" - label = "Xgen Interactive" + label = "Xgen" family = "xgen" icon = "pagelines" diff --git a/openpype/hosts/maya/plugins/inventory/connect_geometry.py b/openpype/hosts/maya/plugins/inventory/connect_geometry.py new file mode 100644 index 0000000000..a12487cf7e --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/connect_geometry.py @@ -0,0 +1,153 @@ +from maya import cmds + +from openpype.pipeline import InventoryAction, get_representation_context +from openpype.hosts.maya.api.lib import get_id + + +class ConnectGeometry(InventoryAction): + """Connect geometries within containers. + + Source container will connect to the target containers, by searching for + matching geometry IDs (cbid). + Source containers are of family; "animation" and "pointcache". + The connection with be done with a live world space blendshape. + """ + + label = "Connect Geometry" + icon = "link" + color = "white" + + def process(self, containers): + # Validate selection is more than 1. + message = ( + "Only 1 container selected. 2+ containers needed for this action." + ) + if len(containers) == 1: + self.display_warning(message) + return + + # Categorize containers by family. + containers_by_family = {} + for container in containers: + family = get_representation_context( + container["representation"] + )["subset"]["data"]["family"] + try: + containers_by_family[family].append(container) + except KeyError: + containers_by_family[family] = [container] + + # Validate to only 1 source container. + source_containers = containers_by_family.get("animation", []) + source_containers += containers_by_family.get("pointcache", []) + source_container_namespaces = [ + x["namespace"] for x in source_containers + ] + message = ( + "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " + "\"animation\" or \"pointcache\".".format( + len(source_containers), source_container_namespaces + ) + ) + if len(source_containers) != 1: + self.display_warning(message) + return + + source_object = source_containers[0]["objectName"] + + # Collect matching geometry transforms based cbId attribute. + target_containers = [] + for family, containers in containers_by_family.items(): + if family in ["animation", "pointcache"]: + continue + + target_containers.extend(containers) + + source_data = self.get_container_data(source_object) + matches = [] + node_types = set() + for target_container in target_containers: + target_data = self.get_container_data( + target_container["objectName"] + ) + node_types.update(target_data["node_types"]) + for id, transform in target_data["ids"].items(): + source_match = source_data["ids"].get(id) + if source_match: + matches.append([source_match, transform]) + + # Message user about what is about to happen. + if not matches: + self.display_warning("No matching geometries found.") + return + + message = "Connecting geometries:\n\n" + for match in matches: + message += "{} > {}\n".format(match[0], match[1]) + + choice = self.display_warning(message, show_cancel=True) + if choice is False: + return + + # Setup live worldspace blendshape connection. + for source, target in matches: + blendshape = cmds.blendShape(source, target)[0] + cmds.setAttr(blendshape + ".origin", 0) + cmds.setAttr(blendshape + "." + target.split(":")[-1], 1) + + # Update Xgen if in any of the containers. + if "xgmPalette" in node_types: + cmds.xgmPreview() + + def get_container_data(self, container): + """Collects data about the container nodes. + + Args: + container (dict): Container instance. + + Returns: + data (dict): + "node_types": All node types in container nodes. + "ids": If the node is a mesh, we collect its parent transform + id. + """ + data = {"node_types": set(), "ids": {}} + ref_node = cmds.sets(container, query=True, nodesOnly=True)[0] + for node in cmds.referenceQuery(ref_node, nodes=True): + node_type = cmds.nodeType(node) + data["node_types"].add(node_type) + + # Only interested in mesh transforms for connecting geometry with + # blendshape. + if node_type != "mesh": + continue + + transform = cmds.listRelatives(node, parent=True)[0] + data["ids"][get_id(transform)] = transform + + return data + + def display_warning(self, message, show_cancel=False): + """Show feedback to user. + + Returns: + bool + """ + + from Qt import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + if show_cancel: + buttons = accept | QtWidgets.QMessageBox.Cancel + else: + buttons = accept + + state = QtWidgets.QMessageBox.warning( + None, + "", + message, + buttons=buttons, + defaultButton=accept + ) + + return state == accept diff --git a/openpype/hosts/maya/plugins/inventory/connect_xgen.py b/openpype/hosts/maya/plugins/inventory/connect_xgen.py new file mode 100644 index 0000000000..933a1b4025 --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/connect_xgen.py @@ -0,0 +1,168 @@ +from maya import cmds +import xgenm + +from openpype.pipeline import ( + InventoryAction, get_representation_context, get_representation_path +) + + +class ConnectXgen(InventoryAction): + """Connect Xgen with an animation or pointcache. + """ + + label = "Connect Xgen" + icon = "link" + color = "white" + + def process(self, containers): + # Validate selection is more than 1. + message = ( + "Only 1 container selected. 2+ containers needed for this action." + ) + if len(containers) == 1: + self.display_warning(message) + return + + # Categorize containers by family. + containers_by_family = {} + for container in containers: + family = get_representation_context( + container["representation"] + )["subset"]["data"]["family"] + try: + containers_by_family[family].append(container) + except KeyError: + containers_by_family[family] = [container] + + # Validate to only 1 source container. + source_containers = containers_by_family.get("animation", []) + source_containers += containers_by_family.get("pointcache", []) + source_container_namespaces = [ + x["namespace"] for x in source_containers + ] + message = ( + "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " + "\"animation\" or \"pointcache\".".format( + len(source_containers), source_container_namespaces + ) + ) + if len(source_containers) != 1: + self.display_warning(message) + return + + source_container = source_containers[0] + source_object = source_container["objectName"] + + # Validate source representation is an alembic. + source_path = get_representation_path( + get_representation_context( + source_container["representation"] + )["representation"] + ).replace("\\", "/") + message = "Animation container \"{}\" is not an alembic:\n{}".format( + source_container["namespace"], source_path + ) + if not source_path.endswith(".abc"): + self.display_warning(message) + return + + # Target containers. + target_containers = [] + for family, containers in containers_by_family.items(): + if family in ["animation", "pointcache"]: + continue + + target_containers.extend(containers) + + # Inform user of connections from source representation to target + # descriptions. + descriptions_data = [] + connections_msg = "" + for target_container in target_containers: + reference_node = cmds.sets( + target_container["objectName"], query=True + )[0] + palettes = cmds.ls( + cmds.referenceQuery(reference_node, nodes=True), + type="xgmPalette" + ) + for palette in palettes: + for description in xgenm.descriptions(palette): + descriptions_data.append([palette, description]) + connections_msg += "\n{}/{}".format(palette, description) + + message = "Connecting \"{}\" to:\n".format( + source_container["namespace"] + ) + message += connections_msg + choice = self.display_warning(message, show_cancel=True) + if choice is False: + return + + # Recreate "xgenContainers" attribute to reset. + compound_name = "xgenContainers" + attr = "{}.{}".format(source_object, compound_name) + if cmds.objExists(attr): + cmds.deleteAttr(attr) + + cmds.addAttr( + source_object, + longName=compound_name, + attributeType="compound", + numberOfChildren=1, + multi=True + ) + + # Connect target containers. + for target_container in target_containers: + cmds.addAttr( + source_object, + longName="container", + attributeType="message", + parent=compound_name + ) + index = target_containers.index(target_container) + cmds.connectAttr( + target_container["objectName"] + ".message", + source_object + ".{}[{}].container".format( + compound_name, index + ) + ) + + # Setup cache on Xgen + object = "SplinePrimitive" + for palette, description in descriptions_data: + xgenm.setAttr("useCache", "true", palette, description, object) + xgenm.setAttr("liveMode", "false", palette, description, object) + xgenm.setAttr( + "cacheFileName", source_path, palette, description, object + ) + + # Refresh UI and viewport. + de = xgenm.xgGlobal.DescriptionEditor + de.refresh("Full") + + def display_warning(self, message, show_cancel=False): + """Show feedback to user. + + Returns: + bool + """ + + from Qt import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + if show_cancel: + buttons = accept | QtWidgets.QMessageBox.Cancel + else: + buttons = accept + + state = QtWidgets.QMessageBox.warning( + None, + "", + message, + buttons=buttons, + defaultButton=accept + ) + + return state == accept diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 98c8192294..2574624dbb 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -93,7 +93,20 @@ class ImportMayaLoader(load.LoaderPlugin): """ representations = ["ma", "mb", "obj"] - families = ["*"] + families = [ + "model", + "pointcache", + "proxyAbc", + "animation", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "camera", + "rig", + "camerarig", + "staticMesh" + ] label = "Import" order = 10 diff --git a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py deleted file mode 100644 index 70866a3ba6..0000000000 --- a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py +++ /dev/null @@ -1,132 +0,0 @@ -import os - -from openpype.pipeline import ( - legacy_io, - load, - get_representation_path -) -from openpype.settings import get_project_settings - - -class AlembicStandinLoader(load.LoaderPlugin): - """Load Alembic as Arnold Standin""" - - families = ["animation", "model", "proxyAbc", "pointcache"] - representations = ["abc"] - - label = "Import Alembic as Arnold Standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - - version = context["version"] - version_data = version.get("data", {}) - family = version["data"]["families"] - self.log.info("version_data: {}\n".format(version_data)) - self.log.info("family: {}\n".format(family)) - frameStart = version_data.get("frameStart", None) - - asset = context["asset"]["name"] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings["maya"]["load"]["colors"] - fps = legacy_io.Session["AVALON_FPS"] - c = colors.get(family[0]) - if c is not None: - r = (float(c[0]) / 255) - g = (float(c[1]) / 255) - b = (float(c[2]) / 255) - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - r, g, b) - - transform_name = label + "_ABC" - - standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0] - standin = cmds.listRelatives(standinShape, parent=True, - typ="transform") - standin = cmds.rename(standin, transform_name) - standinShape = cmds.listRelatives(standin, children=True)[0] - - cmds.parent(standin, root) - - # Set the standin filepath - cmds.setAttr(standinShape + ".dso", self.fname, type="string") - cmds.setAttr(standinShape + ".abcFPS", float(fps)) - - if frameStart is None: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - elif "model" in family: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - else: - cmds.setAttr(standinShape + ".useFrameExtension", 1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - fps = legacy_io.Session["AVALON_FPS"] - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - self.log.info("container:{}".format(container)) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.abcFPS.set(float(fps)) - if "modelMain" in container['objectName']: - standin.useFrameExtension.set(0) - else: - standin.useFrameExtension.set(1) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py new file mode 100644 index 0000000000..ab69d62ef5 --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py @@ -0,0 +1,218 @@ +import os +import clique + +import maya.cmds as cmds +import mtoa.ui.arnoldmenu + +from openpype.settings import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.maya.api.lib import ( + unique_namespace, get_attribute_input, maintained_selection +) +from openpype.hosts.maya.api.pipeline import containerise + + +def is_sequence(files): + sequence = False + collections, remainder = clique.assemble(files) + if collections: + sequence = True + + return sequence + + +class ArnoldStandinLoader(load.LoaderPlugin): + """Load as Arnold standin""" + + families = ["ass", "animation", "model", "proxyAbc", "pointcache"] + representations = ["ass", "abc"] + + label = "Load as Arnold standin" + order = -5 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, options): + version = context['version'] + version_data = version.get("data", {}) + + self.log.info("version_data: {}\n".format(version_data)) + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + # Set color. + settings = get_project_settings(context["project"]["name"]) + color = settings['maya']['load']['colors'].get('ass') + if color is not None: + cmds.setAttr(root + ".useOutlinerColor", True) + cmds.setAttr( + root + ".outlinerColor", color[0], color[1], color[2] + ) + + with maintained_selection(): + # Create transform with shape + transform_name = label + "_standin" + + standin_shape = mtoa.ui.arnoldmenu.createStandIn() + standin = cmds.listRelatives(standin_shape, parent=True)[0] + standin = cmds.rename(standin, transform_name) + standin_shape = cmds.listRelatives(standin, shapes=True)[0] + + cmds.parent(standin, root) + + # Set the standin filepath + path, operator = self._setup_proxy( + standin_shape, self.fname, namespace + ) + cmds.setAttr(standin_shape + ".dso", path, type="string") + sequence = is_sequence(os.listdir(os.path.dirname(self.fname))) + cmds.setAttr(standin_shape + ".useFrameExtension", sequence) + + nodes = [root, standin] + if operator is not None: + nodes.append(operator) + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def get_next_free_multi_index(self, attr_name): + """Find the next unconnected multi index at the input attribute.""" + for index in range(10000000): + connection_info = cmds.connectionInfo( + "{}[{}]".format(attr_name, index), + sourceFromDestination=True + ) + if len(connection_info or []) == 0: + return index + + def _get_proxy_path(self, path): + basename_split = os.path.basename(path).split(".") + proxy_basename = ( + basename_split[0] + "_proxy." + ".".join(basename_split[1:]) + ) + proxy_path = "/".join([os.path.dirname(path), proxy_basename]) + return proxy_basename, proxy_path + + def _setup_proxy(self, shape, path, namespace): + proxy_basename, proxy_path = self._get_proxy_path(path) + + options_node = "defaultArnoldRenderOptions" + merge_operator = get_attribute_input(options_node + ".operator") + if merge_operator is None: + merge_operator = cmds.createNode("aiMerge") + cmds.connectAttr( + merge_operator + ".message", options_node + ".operator" + ) + + merge_operator = merge_operator.split(".")[0] + + string_replace_operator = cmds.createNode( + "aiStringReplace", name=namespace + ":string_replace_operator" + ) + node_type = "alembic" if path.endswith(".abc") else "procedural" + cmds.setAttr( + string_replace_operator + ".selection", + "*.(@node=='{}')".format(node_type), + type="string" + ) + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + cmds.connectAttr( + string_replace_operator + ".out", + "{}.inputs[{}]".format( + merge_operator, + self.get_next_free_multi_index(merge_operator + ".inputs") + ) + ) + + # We setup the string operator no matter whether there is a proxy or + # not. This makes it easier to update since the string operator will + # always be created. Return original path to use for standin. + if not os.path.exists(proxy_path): + return path, string_replace_operator + + return proxy_path, string_replace_operator + + def update(self, container, representation): + # Update the standin + members = cmds.sets(container['objectName'], query=True) + for member in members: + if cmds.nodeType(member) == "aiStringReplace": + string_replace_operator = member + + shapes = cmds.listRelatives(member, shapes=True) + if not shapes: + continue + if cmds.nodeType(shapes[0]) == "aiStandIn": + standin = shapes[0] + + path = get_representation_path(representation) + proxy_basename, proxy_path = self._get_proxy_path(path) + + # Whether there is proxy or so, we still update the string operator. + # If no proxy exists, the string operator wont replace anything. + cmds.setAttr( + string_replace_operator + ".match", + "resources/" + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + dso_path = path + if os.path.exists(proxy_path): + dso_path = proxy_path + cmds.setAttr(standin + ".dso", dso_path, type="string") + + sequence = is_sequence(os.listdir(os.path.dirname(path))) + cmds.setAttr(standin + ".useFrameExtension", sequence) + + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py deleted file mode 100644 index 5db6fc3dfa..0000000000 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -import clique - -from openpype.settings import get_project_settings -from openpype.pipeline import ( - load, - get_representation_path -) -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.plugin import get_reference_node -from openpype.hosts.maya.api.lib import ( - maintained_selection, - unique_namespace -) -from openpype.hosts.maya.api.pipeline import containerise - - -class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Load Arnold Proxy as reference""" - - families = ["ass"] - representations = ["ass"] - - label = "Reference .ASS standin with Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - - import maya.cmds as cmds - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "ass" - - with maintained_selection(): - - groupName = "{}:{}".format(namespace, name) - path = self.fname - proxyPath_base = os.path.splitext(path)[0] - - if frameStart is not None: - proxyPath_base = os.path.splitext(proxyPath_base)[0] - - publish_folder = os.path.split(path)[0] - files_in_folder = os.listdir(publish_folder) - collections, remainder = clique.assemble(files_in_folder) - - if collections: - hashes = collections[0].padding * '#' - coll = collections[0].format('{head}[index]{tail}') - filename = coll.replace('[index]', hashes) - - path = os.path.join(publish_folder, filename) - - proxyPath = proxyPath_base + ".ma" - - project_name = context["project"]["name"] - file_url = self.prepare_root_value(proxyPath, - project_name) - - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.makeIdentity(groupName, apply=False, rotate=True, - translate=True, scale=True) - - # Set attributes - proxyShape = pm.ls(nodes, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - settings = get_project_settings(project_name) - colors = settings['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) - ) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - from maya import cmds - import pymel.core as pm - - node = container["objectName"] - - representation["context"].pop("frame", None) - path = get_representation_path(representation) - print(path) - # path = self.fname - print(self.fname) - proxyPath = os.path.splitext(path)[0] + ".ma" - print(proxyPath) - - # Get reference node from container members - members = cmds.sets(node, query=True, nodesOnly=True) - reference_node = get_reference_node(members) - - assert os.path.exists(proxyPath), "%s does not exist." % proxyPath - - try: - file_url = self.prepare_root_value(proxyPath, - representation["context"] - ["project"] - ["name"]) - content = cmds.file(file_url, - loadReference=reference_node, - type="mayaAscii", - returnNewNodes=True) - - # Set attributes - proxyShape = pm.ls(content, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - except RuntimeError as exc: - # When changing a reference to a file that has load errors the - # command will raise an error even if the file is still loaded - # correctly (e.g. when raising errors on Arnold attributes) - # When the file is loaded and has content, we consider it's fine. - if not cmds.referenceQuery(reference_node, isLoaded=True): - raise - - content = cmds.referenceQuery(reference_node, - nodes=True, - dagPath=True) - if not content: - raise - - self.log.warning("Ignoring file read error:\n%s", exc) - - # Add new nodes of the reference to the container - cmds.sets(content, forceElement=node) - - # Remove any placeHolderList attribute entries from the set that - # are remaining from nodes being removed from the referenced file. - members = cmds.sets(node, query=True) - invalid = [x for x in members if ".placeHolderList" in x] - if invalid: - cmds.sets(invalid, remove=node) - - # Update metadata - cmds.setAttr("{}.representation".format(node), - str(representation["_id"]), - type="string") - - -class AssStandinLoader(load.LoaderPlugin): - """Load .ASS file as standin""" - - families = ["ass"] - representations = ["ass"] - - label = "Load .ASS file as standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - asset = context['asset']['name'] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # cmds.loadPlugin("gpuCache", quiet=True) - - # Root group - label = "{}:{}".format(namespace, name) - root = pm.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings['maya']['load']['colors'] - - c = colors.get('ass') - if c is not None: - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - c[0], c[1], c[2]) - - # Create transform with shape - transform_name = label + "_ASS" - # transform = pm.createNode("transform", name=transform_name, - # parent=root) - - standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn()) - standin = standinShape.getParent() - standin.rename(transform_name) - - pm.parent(standin, root) - - # Set the standin filepath - standinShape.dso.set(self.fname) - if frameStart is not None: - standinShape.useFrameExtension.set(1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - - files_in_path = os.listdir(os.path.split(path)[0]) - sequence = 0 - collections, remainder = clique.assemble(files_in_path) - if collections: - sequence = 1 - - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.useFrameExtension.set(sequence) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 96d7d5d3b2..858c9b709e 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -25,9 +25,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "camera", "rig", "camerarig", - "xgen", "staticMesh", "mvLook"] + representations = ["ma", "abc", "fbx", "mb"] label = "Reference" diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 720a132aa7..64184f9e7b 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -81,10 +81,11 @@ class VRayProxyLoader(load.LoaderPlugin): c = colors.get(family) if c is not None: cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) - cmds.setAttr("{0}.outlinerColor".format(group_node), - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) + cmds.setAttr( + "{0}.outlinerColor".format(group_node), + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255) ) return containerise( @@ -101,7 +102,7 @@ class VRayProxyLoader(load.LoaderPlugin): assert cmds.objExists(node), "Missing container" members = cmds.sets(node, query=True) or [] - vraymeshes = cmds.ls(members, type="VRayMesh") + vraymeshes = cmds.ls(members, type="VRayProxy") assert vraymeshes, "Cannot find VRayMesh in container" # get all representations for this version diff --git a/openpype/hosts/maya/plugins/load/load_xgen.py b/openpype/hosts/maya/plugins/load/load_xgen.py new file mode 100644 index 0000000000..1600cd49bd --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_xgen.py @@ -0,0 +1,173 @@ +import os + +import maya.cmds as cmds +import xgenm + +from Qt import QtWidgets + +import openpype.hosts.maya.api.plugin +from openpype.hosts.maya.api.lib import ( + maintained_selection, + get_container_members, + attribute_values, + write_xgen_file +) +from openpype.hosts.maya.api import current_file +from openpype.pipeline import get_representation_path + + +class XgenLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): + """Load Xgen as reference""" + + families = ["xgen"] + representations = ["ma", "mb"] + + label = "Reference Xgen" + icon = "code-fork" + color = "orange" + + def get_xgen_xgd_paths(self, palette): + _, maya_extension = os.path.splitext(current_file()) + xgen_file = current_file().replace( + maya_extension, + "__{}.xgen".format(palette.replace("|", "").replace(":", "__")) + ) + xgd_file = xgen_file.replace(".xgen", ".xgd") + return xgen_file, xgd_file + + def process_reference(self, context, name, namespace, options): + # Validate workfile has a path. + if current_file() is None: + QtWidgets.QMessageBox.warning( + None, + "", + "Current workfile has not been saved. Please save the workfile" + " before loading an Xgen." + ) + return + + maya_filepath = self.prepare_root_value( + self.fname, context["project"]["name"] + ) + + # Reference xgen. Xgen does not like being referenced in under a group. + new_nodes = [] + + with maintained_selection(): + nodes = cmds.file( + maya_filepath, + namespace=namespace, + sharedReferenceFile=False, + reference=True, + returnNewNodes=True + ) + + xgen_palette = cmds.ls( + nodes, type="xgmPalette", long=True + )[0].replace("|", "") + + xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) + self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) + + # Change the cache and disk values of xgDataPath and xgProjectPath + # to ensure paths are setup correctly. + project_path = os.path.dirname(current_file()).replace("\\", "/") + xgenm.setAttr("xgProjectPath", project_path, xgen_palette) + data_path = "${{PROJECT}}xgen/collections/{};{}".format( + xgen_palette.replace(":", "__ns__"), + xgenm.getAttr("xgDataPath", xgen_palette) + ) + xgenm.setAttr("xgDataPath", data_path, xgen_palette) + + data = {"xgProjectPath": project_path, "xgDataPath": data_path} + write_xgen_file(data, xgen_file) + + # This create an expression attribute of float. If we did not add + # any changes to collection, then Xgen does not create an xgd file + # on save. This gives errors when launching the workfile again due + # to trying to find the xgd file. + name = "custom_float_ignore" + if name not in xgenm.customAttrs(xgen_palette): + xgenm.addCustomAttr( + "custom_float_ignore", xgen_palette + ) + + shapes = cmds.ls(nodes, shapes=True, long=True) + + new_nodes = (list(set(nodes) - set(shapes))) + + self[:] = new_nodes + + return new_nodes + + def set_palette_attributes(self, xgen_palette, xgen_file, xgd_file): + cmds.setAttr( + "{}.xgBaseFile".format(xgen_palette), + os.path.basename(xgen_file), + type="string" + ) + cmds.setAttr( + "{}.xgFileName".format(xgen_palette), + os.path.basename(xgd_file), + type="string" + ) + cmds.setAttr("{}.xgExportAsDelta".format(xgen_palette), True) + + def update(self, container, representation): + """Workflow for updating Xgen. + + - Copy and potentially overwrite the workspace .xgen file. + - Export changes to delta file. + - Set collection attributes to not include delta files. + - Update xgen maya file reference. + - Apply the delta file changes. + - Reset collection attributes to include delta files. + + We have to do this workflow because when using referencing of the xgen + collection, Maya implicitly imports the Xgen data from the xgen file so + we dont have any control over when adding the delta file changes. + + There is an implicit increment of the xgen and delta files, due to + using the workfile basename. + """ + + container_node = container["objectName"] + members = get_container_members(container_node) + xgen_palette = cmds.ls( + members, type="xgmPalette", long=True + )[0].replace("|", "") + xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) + + # Export current changes to apply later. + xgenm.createDelta(xgen_palette.replace("|", ""), xgd_file) + + self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) + + maya_file = get_representation_path(representation) + _, extension = os.path.splitext(maya_file) + new_xgen_file = maya_file.replace(extension, ".xgen") + data_path = "" + with open(new_xgen_file, "r") as f: + for line in f: + if line.startswith("\txgDataPath"): + line = line.rstrip() + data_path = line.split("\t")[-1] + break + + project_path = os.path.dirname(current_file()).replace("\\", "/") + data_path = "${{PROJECT}}xgen/collections/{};{}".format( + xgen_palette.replace(":", "__ns__"), + data_path + ) + data = {"xgProjectPath": project_path, "xgDataPath": data_path} + write_xgen_file(data, xgen_file) + + attribute_data = { + "{}.xgFileName".format(xgen_palette): os.path.basename(xgen_file), + "{}.xgBaseFile".format(xgen_palette): "", + "{}.xgExportAsDelta".format(xgen_palette): False + } + with attribute_values(attribute_data): + super().update(container, representation) + + xgenm.applyDelta(xgen_palette.replace("|", ""), xgd_file) diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py similarity index 60% rename from openpype/hosts/maya/plugins/publish/collect_ass.py rename to openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py index b5e05d6665..0415808b7a 100644 --- a/openpype/hosts/maya/plugins/publish/collect_ass.py +++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -1,19 +1,18 @@ from maya import cmds -from openpype.pipeline.publish import KnownPublishError import pyblish.api -class CollectAssData(pyblish.api.InstancePlugin): - """Collect Ass data.""" +class CollectArnoldSceneSource(pyblish.api.InstancePlugin): + """Collect Arnold Scene Source data.""" # Offset to be after renderable camera collection. order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Ass' + label = "Collect Arnold Scene Source" families = ["ass"] def process(self, instance): - objsets = instance.data['setMembers'] + objsets = instance.data["setMembers"] for objset in objsets: objset = str(objset) @@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin): if members is None: self.log.warning("Skipped empty instance: \"%s\" " % objset) continue - if "content_SET" in objset: - instance.data['setMembers'] = members - self.log.debug('content members: {}'.format(members)) - elif objset.startswith("proxy_SET"): - if len(members) != 1: - msg = "You have multiple proxy meshes, please only use one" - raise KnownPublishError(msg) - instance.data['proxy'] = members - self.log.debug('proxy members: {}'.format(members)) + if objset.endswith("content_SET"): + instance.data["setMembers"] = cmds.ls(members, long=True) + self.log.debug("content members: {}".format(members)) + elif objset.endswith("proxy_SET"): + instance.data["proxy"] = cmds.ls(members, long=True) + self.log.debug("proxy members: {}".format(members)) # Use camera in object set if present else default to render globals # camera. diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py b/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py index 1250ea438f..122fabe8a1 100644 --- a/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py +++ b/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py @@ -12,7 +12,6 @@ class CollectMayaWorkspace(pyblish.api.ContextPlugin): label = "Maya Workspace" hosts = ['maya'] - version = (0, 1, 0) def process(self, context): workspace = cmds.workspace(rootDirectory=True, query=True) diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/openpype/hosts/maya/plugins/publish/collect_pointcache.py index a841341f72..332992ca92 100644 --- a/openpype/hosts/maya/plugins/publish/collect_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/collect_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + import pyblish.api @@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin): def process(self, instance): if instance.data.get("farm"): instance.data["families"].append("publish.farm") + + proxy_set = None + for node in instance.data["setMembers"]: + if cmds.nodeType(node) != "objectSet": + continue + members = cmds.sets(node, query=True) + if members is None: + self.log.warning("Skipped empty objectset: \"%s\" " % node) + continue + if node.endswith("proxy_SET"): + proxy_set = node + instance.data["proxy"] = [] + instance.data["proxyRoots"] = [] + for member in members: + instance.data["proxy"].extend(cmds.ls(member, long=True)) + instance.data["proxyRoots"].extend( + cmds.ls(member, long=True) + ) + instance.data["proxy"].extend( + cmds.listRelatives(member, shapes=True, fullPath=True) + ) + self.log.debug( + "proxy members: {}".format(instance.data["proxy"]) + ) + + if proxy_set: + instance.remove(proxy_set) + instance.data["setMembers"].remove(proxy_set) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index b1ad3ca58e..f2b5262187 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -42,7 +42,6 @@ Provides: import re import os import platform -import json from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -318,6 +317,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 "renderSetupIncludeLights": render_instance.data.get( "renderSetupIncludeLights" + ), + "strict_error_checking": render_instance.data.get( + "strict_error_checking", True ) } diff --git a/openpype/hosts/maya/plugins/publish/collect_xgen.py b/openpype/hosts/maya/plugins/publish/collect_xgen.py new file mode 100644 index 0000000000..da0549b2d8 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_xgen.py @@ -0,0 +1,71 @@ +import os + +from maya import cmds + +import pyblish.api +from openpype.hosts.maya.api.lib import get_attribute_input + + +class CollectXgen(pyblish.api.InstancePlugin): + """Collect Xgen""" + + order = pyblish.api.CollectorOrder + 0.499999 + label = "Collect Xgen" + families = ["xgen"] + + def process(self, instance): + data = { + "xgmPalettes": cmds.ls(instance, type="xgmPalette", long=True), + "xgmDescriptions": cmds.ls( + instance, type="xgmDescription", long=True + ), + "xgmSubdPatches": cmds.ls(instance, type="xgmSubdPatch", long=True) + } + data["xgenNodes"] = ( + data["xgmPalettes"] + + data["xgmDescriptions"] + + data["xgmSubdPatches"] + ) + + if data["xgmPalettes"]: + data["xgmPalette"] = data["xgmPalettes"][0] + + data["xgenConnections"] = {} + for node in data["xgmSubdPatches"]: + data["xgenConnections"][node] = {} + for attr in ["transform", "geometry"]: + input = get_attribute_input("{}.{}".format(node, attr)) + data["xgenConnections"][node][attr] = input + + # Collect all files under palette root as resources. + import xgenm + + data_path = xgenm.getAttr( + "xgDataPath", data["xgmPalette"].replace("|", "") + ).split(os.pathsep)[0] + data_path = data_path.replace( + "${PROJECT}", + xgenm.getAttr("xgProjectPath", data["xgmPalette"].replace("|", "")) + ) + transfers = [] + + # Since we are duplicating this palette when extracting we predict that + # the name will be the basename without namespaces. + predicted_palette_name = data["xgmPalette"].split(":")[-1] + predicted_palette_name = predicted_palette_name.replace("|", "") + + for root, _, files in os.walk(data_path): + for file in files: + source = os.path.join(root, file).replace("\\", "/") + destination = os.path.join( + instance.data["resourcesDir"], + "collections", + predicted_palette_name, + source.replace(data_path, "")[1:] + ) + transfers.append((source, destination.replace("\\", "/"))) + + data["transfers"] = transfers + + self.log.info(data) + instance.data.update(data) diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py new file mode 100644 index 0000000000..924ac58c40 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -0,0 +1,160 @@ +import os + +from maya import cmds +import arnold + +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import ( + maintained_selection, attribute_values, delete_after +) + + +class ExtractArnoldSceneSource(publish.Extractor): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source" + hosts = ["maya"] + families = ["ass"] + asciiAss = False + + def process(self, instance): + staging_dir = self.staging_dir(instance) + filename = "{}.ass".format(instance.name) + file_path = os.path.join(staging_dir, filename) + + # Mask + mask = arnold.AI_NODE_ALL + + node_types = { + "options": arnold.AI_NODE_OPTIONS, + "camera": arnold.AI_NODE_CAMERA, + "light": arnold.AI_NODE_LIGHT, + "shape": arnold.AI_NODE_SHAPE, + "shader": arnold.AI_NODE_SHADER, + "override": arnold.AI_NODE_OVERRIDE, + "driver": arnold.AI_NODE_DRIVER, + "filter": arnold.AI_NODE_FILTER, + "color_manager": arnold.AI_NODE_COLOR_MANAGER, + "operator": arnold.AI_NODE_OPERATOR + } + + for key in node_types.keys(): + if instance.data.get("mask" + key.title()): + mask = mask ^ node_types[key] + + # Motion blur + attribute_data = { + "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( + "motionBlur", True + ), + "defaultArnoldRenderOptions.motion_steps": instance.data.get( + "motionBlurKeys", 2 + ), + "defaultArnoldRenderOptions.motion_frames": instance.data.get( + "motionBlurLength", 0.5 + ) + } + + # Write out .ass file + kwargs = { + "filename": file_path, + "startFrame": instance.data.get("frameStartHandle", 1), + "endFrame": instance.data.get("frameEndHandle", 1), + "frameStep": instance.data.get("step", 1), + "selected": True, + "asciiAss": self.asciiAss, + "shadowLinks": True, + "lightLinks": True, + "boundingBox": True, + "expandProcedurals": instance.data.get("expandProcedurals", False), + "camera": instance.data["camera"], + "mask": mask + } + + filenames = self._extract( + instance.data["setMembers"], attribute_data, kwargs + ) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "ass", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"] + } + + instance.data["representations"].append(representation) + + self.log.info( + "Extracted instance {} to: {}".format(instance.name, staging_dir) + ) + + # Extract proxy. + if not instance.data.get("proxy", []): + return + + kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + filenames = self._extract( + instance.data["proxy"], attribute_data, kwargs + ) + + representation = { + "name": "proxy", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"], + "outputName": "proxy" + } + + instance.data["representations"].append(representation) + + def _extract(self, nodes, attribute_data, kwargs): + self.log.info("Writing: " + kwargs["filename"]) + filenames = [] + # Duplicating nodes so they are direct children of the world. This + # makes the hierarchy of any exported ass file the same. + with delete_after() as delete_bin: + duplicate_nodes = [] + for node in nodes: + duplicate_transform = cmds.duplicate(node)[0] + + # Discard the children. + shapes = cmds.listRelatives(duplicate_transform, shapes=True) + children = cmds.listRelatives( + duplicate_transform, children=True + ) + cmds.delete(set(children) - set(shapes)) + + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + cmds.rename(duplicate_transform, node.split("|")[-1]) + duplicate_transform = "|" + node.split("|")[-1] + + duplicate_nodes.append(duplicate_transform) + delete_bin.append(duplicate_transform) + + with attribute_values(attribute_data): + with maintained_selection(): + self.log.info( + "Writing: {}".format(duplicate_nodes) + ) + cmds.select(duplicate_nodes, noExpand=True) + + self.log.info( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.info("Exported: {}".format(filenames)) + + return filenames diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py deleted file mode 100644 index 049f256a7a..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_ass.py +++ /dev/null @@ -1,106 +0,0 @@ -import os - -from maya import cmds -import arnold - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection, attribute_values - - -class ExtractAssStandin(publish.Extractor): - """Extract the content of the instance to a ass file""" - - label = "Arnold Scene Source (.ass)" - hosts = ["maya"] - families = ["ass"] - asciiAss = False - - def process(self, instance): - staging_dir = self.staging_dir(instance) - filename = "{}.ass".format(instance.name) - filenames = [] - file_path = os.path.join(staging_dir, filename) - - # Mask - mask = arnold.AI_NODE_ALL - - node_types = { - "options": arnold.AI_NODE_OPTIONS, - "camera": arnold.AI_NODE_CAMERA, - "light": arnold.AI_NODE_LIGHT, - "shape": arnold.AI_NODE_SHAPE, - "shader": arnold.AI_NODE_SHADER, - "override": arnold.AI_NODE_OVERRIDE, - "driver": arnold.AI_NODE_DRIVER, - "filter": arnold.AI_NODE_FILTER, - "color_manager": arnold.AI_NODE_COLOR_MANAGER, - "operator": arnold.AI_NODE_OPERATOR - } - - for key in node_types.keys(): - if instance.data.get("mask" + key.title()): - mask = mask ^ node_types[key] - - # Motion blur - values = { - "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( - "motionBlur", True - ), - "defaultArnoldRenderOptions.motion_steps": instance.data.get( - "motionBlurKeys", 2 - ), - "defaultArnoldRenderOptions.motion_frames": instance.data.get( - "motionBlurLength", 0.5 - ) - } - - # Write out .ass file - kwargs = { - "filename": file_path, - "startFrame": instance.data.get("frameStartHandle", 1), - "endFrame": instance.data.get("frameEndHandle", 1), - "frameStep": instance.data.get("step", 1), - "selected": True, - "asciiAss": self.asciiAss, - "shadowLinks": True, - "lightLinks": True, - "boundingBox": True, - "expandProcedurals": instance.data.get("expandProcedurals", False), - "camera": instance.data["camera"], - "mask": mask - } - - self.log.info("Writing: '%s'" % file_path) - with attribute_values(values): - with maintained_selection(): - self.log.info( - "Writing: {}".format(instance.data["setMembers"]) - ) - cmds.select(instance.data["setMembers"], noExpand=True) - - self.log.info( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.info("Exported: {}".format(filenames)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ass', - 'ext': 'ass', - 'files': filenames if len(filenames) > 1 else filenames[0], - "stagingDir": staging_dir, - 'frameStart': kwargs["startFrame"] - } - - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/openpype/hosts/maya/plugins/publish/extract_assproxy.py b/openpype/hosts/maya/plugins/publish/extract_assproxy.py deleted file mode 100644 index 4937a28a9e..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_assproxy.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import contextlib - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection - - -class ExtractAssProxy(publish.Extractor): - """Extract proxy model as Maya Ascii to use as arnold standin - - - """ - - order = publish.Extractor.order + 0.2 - label = "Ass Proxy (Maya ASCII)" - hosts = ["maya"] - families = ["ass"] - - def process(self, instance): - - @contextlib.contextmanager - def unparent(root): - """Temporarily unparent `root`""" - parent = cmds.listRelatives(root, parent=True) - if parent: - cmds.parent(root, world=True) - yield - self.log.info("{} - {}".format(root, parent)) - cmds.parent(root, parent) - else: - yield - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) - path = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.info("Performing extraction..") - - # Get only the shape contents we need in such a way that we avoid - # taking along intermediateObjects - proxy = instance.data.get('proxy', None) - - if not proxy: - self.log.info("no proxy mesh") - return - - members = cmds.ls(proxy, - dag=True, - transforms=True, - noIntermediate=True) - self.log.info(members) - - with maintained_selection(): - with unparent(members[0]): - cmds.select(members, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii", - exportSelected=True, - preserveReferences=False, - channels=False, - constraints=False, - expressions=False, - constructionHistory=False) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ma', - 'ext': 'ma', - 'files': filename, - "stagingDir": stagingdir - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 3769ec3605..c2411ca651 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -20,8 +20,7 @@ class ExtractMayaSceneRaw(publish.Extractor): "mayaScene", "setdress", "layout", - "camerarig", - "xgen"] + "camerarig"] scene_type = "ma" def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index 7ed73fd5b0..0eb65e4226 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -1,4 +1,5 @@ import os +import copy from maya import cmds @@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import ( maintained_selection, iter_visible_nodes_in_range ) +from openpype.lib import StringTemplate class ExtractAlembic(publish.Extractor): @@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor): label = "Extract Pointcache (Alembic)" hosts = ["maya"] - families = ["pointcache", - "model", - "vrayproxy"] + families = ["pointcache", "model", "vrayproxy"] targets = ["local", "remote"] def process(self, instance): @@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor): end=end)) suspend = not instance.data.get("refresh", False) + self.log.info(nodes) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) @@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor): instance.data["representations"] = [] representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, + "name": "abc", + "ext": "abc", + "files": filename, "stagingDir": dirname } instance.data["representations"].append(representation) @@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor): self.log.info("Extracted {} to {}".format(instance, dirname)) + # Extract proxy. + if not instance.data.get("proxy"): + return + + path = path.replace(".abc", "_proxy.abc") + if not instance.data.get("includeParentHierarchy", True): + # Set the root nodes if we don't want to include parents + # The roots are to be considered the ones that are the actual + # direct members of the set + options["root"] = instance.data["proxyRoots"] + + with suspended_refresh(suspend=suspend): + with maintained_selection(): + cmds.select(instance.data["proxy"]) + extract_alembic( + file=path, + startFrame=start, + endFrame=end, + **options + ) + + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({"ext": "abc"}) + templates = instance.context.data["anatomy"].templates["publish"] + published_filename_without_extension = StringTemplate( + templates["file"] + ).format(template_data).replace(".abc", "_proxy") + transfers = [] + destination = os.path.join( + instance.data["resourcesDir"], + filename.replace( + filename.split(".")[0], + published_filename_without_extension + ) + ) + transfers.append((path, destination)) + + for source, destination in transfers: + self.log.debug("Transfer: {} > {}".format(source, destination)) + + instance.data["transfers"] = transfers + def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") diff --git a/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py b/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py new file mode 100644 index 0000000000..20e1bd37d8 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -0,0 +1,250 @@ +import os +import shutil +import copy + +from maya import cmds + +import pyblish.api +from openpype.hosts.maya.api.lib import extract_alembic +from openpype.pipeline import publish +from openpype.lib import StringTemplate + + +class ExtractWorkfileXgen(publish.Extractor): + """Extract Workfile Xgen. + + When submitting a render, we need to prep Xgen side car files. + """ + + # Offset to run before workfile scene save. + order = pyblish.api.ExtractorOrder - 0.499 + label = "Extract Workfile Xgen" + families = ["workfile"] + hosts = ["maya"] + + def get_render_max_frame_range(self, context): + """Return start to end frame range including all renderlayers in + context. + + This will return the full frame range which includes all frames of the + renderlayer instances to be published/submitted. + + Args: + context (pyblish.api.Context): Current publishing context. + + Returns: + tuple or None: Start frame, end frame tuple if any renderlayers + found. Otherwise None is returned. + + """ + + def _is_active_renderlayer(i): + """Return whether instance is active renderlayer""" + if not i.data.get("publish", True): + return False + + is_renderlayer = ( + "renderlayer" in i.data.get("families", []) or + i.data["family"] == "renderlayer" + ) + return is_renderlayer + + start_frame = None + end_frame = None + for instance in context: + if not _is_active_renderlayer(instance): + # Only consider renderlyare instances + continue + + render_start_frame = instance.data["frameStart"] + render_end_frame = instance.data["frameStart"] + + if start_frame is None: + start_frame = render_start_frame + else: + start_frame = min(start_frame, render_start_frame) + + if end_frame is None: + end_frame = render_end_frame + else: + end_frame = max(end_frame, render_end_frame) + + if start_frame is None or end_frame is None: + return + + return start_frame, end_frame + + def process(self, instance): + transfers = [] + + # Validate there is any palettes in the scene. + if not cmds.ls(type="xgmPalette"): + self.log.debug( + "No collections found in the scene. Skipping Xgen extraction." + ) + return + + import xgenm + + # Validate to extract only when we are publishing a renderlayer as + # well. + render_range = self.get_render_max_frame_range(instance.context) + if not render_range: + self.log.debug( + "No publishable renderlayers found in context. Skipping Xgen" + " extraction." + ) + return + + start_frame, end_frame = render_range + + # We decrement start frame and increment end frame so motion blur will + # render correctly. + start_frame -= 1 + end_frame += 1 + + # Extract patches alembic. + path_no_ext, _ = os.path.splitext(instance.context.data["currentFile"]) + kwargs = {"attrPrefix": ["xgen"], "stripNamespaces": True} + alembic_files = [] + for palette in cmds.ls(type="xgmPalette"): + patch_names = [] + for description in xgenm.descriptions(palette): + for name in xgenm.boundGeometry(palette, description): + patch_names.append(name) + + alembic_file = "{}__{}.abc".format( + path_no_ext, palette.replace(":", "__ns__") + ) + extract_alembic( + alembic_file, + root=patch_names, + selection=False, + startFrame=float(start_frame), + endFrame=float(end_frame), + verbose=True, + **kwargs + ) + alembic_files.append(alembic_file) + + template_data = copy.deepcopy(instance.data["anatomyData"]) + published_maya_path = StringTemplate( + instance.context.data["anatomy"].templates["publish"]["file"] + ).format(template_data) + published_basename, _ = os.path.splitext(published_maya_path) + + for source in alembic_files: + destination = os.path.join( + os.path.dirname(instance.data["resourcesDir"]), + os.path.basename( + source.replace(path_no_ext, published_basename) + ) + ) + transfers.append((source, destination)) + + # Validate that we are using the published workfile. + deadline_settings = instance.context.get("deadline") + if deadline_settings: + publish_settings = deadline_settings["publish"] + if not publish_settings["MayaSubmitDeadline"]["use_published"]: + self.log.debug( + "Not using the published workfile. Abort Xgen extraction." + ) + return + + # Collect Xgen and Delta files. + xgen_files = [] + sources = [] + current_dir = os.path.dirname(instance.context.data["currentFile"]) + attrs = ["xgFileName", "xgBaseFile"] + for palette in cmds.ls(type="xgmPalette"): + for attr in attrs: + source = os.path.join( + current_dir, cmds.getAttr(palette + "." + attr) + ) + if not os.path.exists(source): + continue + + ext = os.path.splitext(source)[1] + if ext == ".xgen": + xgen_files.append(source) + if ext == ".xgd": + sources.append(source) + + # Copy .xgen file to temporary location and modify. + staging_dir = self.staging_dir(instance) + for source in xgen_files: + destination = os.path.join(staging_dir, os.path.basename(source)) + shutil.copy(source, destination) + + lines = [] + with open(destination, "r") as f: + for line in [line.rstrip() for line in f]: + if line.startswith("\txgProjectPath"): + path = os.path.dirname(instance.data["resourcesDir"]) + line = "\txgProjectPath\t\t{}/".format( + path.replace("\\", "/") + ) + + lines.append(line) + + with open(destination, "w") as f: + f.write("\n".join(lines)) + + sources.append(destination) + + # Add resource files to workfile instance. + for source in sources: + basename = os.path.basename(source) + destination = os.path.join( + os.path.dirname(instance.data["resourcesDir"]), basename + ) + transfers.append((source, destination)) + + destination_dir = os.path.join( + instance.data["resourcesDir"], "collections" + ) + for palette in cmds.ls(type="xgmPalette"): + project_path = xgenm.getAttr("xgProjectPath", palette) + data_path = xgenm.getAttr("xgDataPath", palette) + data_path = data_path.replace("${PROJECT}", project_path) + for path in data_path.split(";"): + for root, _, files in os.walk(path): + for f in files: + source = os.path.join(root, f) + destination = "{}/{}{}".format( + destination_dir, + palette.replace(":", "__ns__"), + source.replace(path, "") + ) + transfers.append((source, destination)) + + for source, destination in transfers: + self.log.debug("Transfer: {} > {}".format(source, destination)) + + instance.data["transfers"] = transfers + + # Set palette attributes in preparation for workfile publish. + attrs = {"xgFileName": None, "xgBaseFile": ""} + data = {} + for palette in cmds.ls(type="xgmPalette"): + attrs["xgFileName"] = "resources/{}.xgen".format( + palette.replace(":", "__ns__") + ) + for attr, value in attrs.items(): + node_attr = palette + "." + attr + + old_value = cmds.getAttr(node_attr) + try: + data[palette][attr] = old_value + except KeyError: + data[palette] = {attr: old_value} + + cmds.setAttr(node_attr, value, type="string") + self.log.info( + "Setting \"{}\" on \"{}\"".format(value, node_attr) + ) + + cmds.setAttr(palette + "." + "xgExportAsDelta", False) + + instance.data["xgenAttributes"] = data diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen.py b/openpype/hosts/maya/plugins/publish/extract_xgen.py new file mode 100644 index 0000000000..0cc842b4ec --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_xgen.py @@ -0,0 +1,142 @@ +import os +import copy +import tempfile + +from maya import cmds +import xgenm + +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import ( + maintained_selection, attribute_values, write_xgen_file, delete_after +) +from openpype.lib import StringTemplate + + +class ExtractXgen(publish.Extractor): + """Extract Xgen + + Workflow: + - Duplicate nodes used for patches. + - Export palette and import onto duplicate nodes. + - Export/Publish duplicate nodes and palette. + - Export duplicate palette to .xgen file and add to publish. + - Publish all xgen files as resources. + """ + + label = "Extract Xgen" + hosts = ["maya"] + families = ["xgen"] + scene_type = "ma" + + def process(self, instance): + if "representations" not in instance.data: + instance.data["representations"] = [] + + staging_dir = self.staging_dir(instance) + maya_filename = "{}.{}".format(instance.data["name"], self.scene_type) + maya_filepath = os.path.join(staging_dir, maya_filename) + + # Get published xgen file name. + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({"ext": "xgen"}) + templates = instance.context.data["anatomy"].templates["publish"] + xgen_filename = StringTemplate(templates["file"]).format(template_data) + + xgen_path = os.path.join( + self.staging_dir(instance), xgen_filename + ).replace("\\", "/") + type = "mayaAscii" if self.scene_type == "ma" else "mayaBinary" + + # Duplicate xgen setup. + with delete_after() as delete_bin: + duplicate_nodes = [] + # Collect nodes to export. + for _, connections in instance.data["xgenConnections"].items(): + transform_name = connections["transform"].split(".")[0] + + # Duplicate_transform subd patch geometry. + duplicate_transform = cmds.duplicate(transform_name)[0] + delete_bin.append(duplicate_transform) + + # Discard the children. + shapes = cmds.listRelatives(duplicate_transform, shapes=True) + children = cmds.listRelatives( + duplicate_transform, children=True + ) + cmds.delete(set(children) - set(shapes)) + + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + duplicate_nodes.append(duplicate_transform) + + # Export temp xgen palette files. + temp_xgen_path = os.path.join( + tempfile.gettempdir(), "temp.xgen" + ).replace("\\", "/") + xgenm.exportPalette( + instance.data["xgmPalette"].replace("|", ""), temp_xgen_path + ) + self.log.info("Extracted to {}".format(temp_xgen_path)) + + # Import xgen onto the duplicate. + with maintained_selection(): + cmds.select(duplicate_nodes) + palette = xgenm.importPalette(temp_xgen_path, []) + + delete_bin.append(palette) + + # Export duplicated palettes. + xgenm.exportPalette(palette, xgen_path) + + # Export Maya file. + attribute_data = {"{}.xgFileName".format(palette): xgen_filename} + with attribute_values(attribute_data): + with maintained_selection(): + cmds.select(duplicate_nodes + [palette]) + cmds.file( + maya_filepath, + force=True, + type=type, + exportSelected=True, + preserveReferences=False, + constructionHistory=True, + shader=True, + constraints=True, + expressions=True + ) + + self.log.info("Extracted to {}".format(maya_filepath)) + + if os.path.exists(temp_xgen_path): + os.remove(temp_xgen_path) + + data = { + "xgDataPath": os.path.join( + instance.data["resourcesDir"], + "collections", + palette.replace(":", "__ns__") + ).replace("\\", "/"), + "xgProjectPath": os.path.dirname( + instance.data["resourcesDir"] + ).replace("\\", "/") + } + write_xgen_file(data, xgen_path) + + # Adding representations. + representation = { + "name": "xgen", + "ext": "xgen", + "files": xgen_filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + representation = { + "name": self.scene_type, + "ext": self.scene_type, + "files": maya_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py deleted file mode 100644 index 77350f343e..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py +++ /dev/null @@ -1,64 +0,0 @@ -import os - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( - suspended_refresh, - maintained_selection -) - - -class ExtractXgenCache(publish.Extractor): - """Produce an alembic of just xgen interactive groom - - """ - - label = "Extract Xgen ABC Cache" - hosts = ["maya"] - families = ["xgen"] - optional = True - - def process(self, instance): - - # Collect the out set nodes - out_descriptions = [node for node in instance - if cmds.nodeType(node) == "xgmSplineDescription"] - - start = 1 - end = 1 - - self.log.info("Extracting Xgen Cache..") - dirname = self.staging_dir(instance) - - parent_dir = self.staging_dir(instance) - filename = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, filename) - - with suspended_refresh(): - with maintained_selection(): - command = ( - '-file ' - + path - + ' -df "ogawa" -fr ' - + str(start) - + ' ' - + str(end) - + ' -step 1 -mxf -wfw' - ) - for desc in out_descriptions: - command += (" -obj " + desc) - cmds.xgmSplineCache(export=True, j=command) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dirname, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted {} to {}".format(instance, dirname)) diff --git a/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py new file mode 100644 index 0000000000..b90885663c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py @@ -0,0 +1,36 @@ +from maya import cmds + +import pyblish.api + + +class ResetXgenAttributes(pyblish.api.InstancePlugin): + """Reset Xgen attributes. + + When the incremental save of the workfile triggers, the Xgen attributes + changes so this plugin will change it back to the values before publishing. + """ + + label = "Reset Xgen Attributes." + # Offset to run after workfile increment plugin. + order = pyblish.api.IntegratorOrder + 10.0 + families = ["workfile"] + + def process(self, instance): + xgen_attributes = instance.data.get("xgenAttributes", {}) + if not xgen_attributes: + return + + for palette, data in xgen_attributes.items(): + for attr, value in data.items(): + node_attr = "{}.{}".format(palette, attr) + self.log.info( + "Setting \"{}\" on \"{}\"".format(value, node_attr) + ) + cmds.setAttr(node_attr, value, type="string") + cmds.setAttr(palette + ".xgExportAsDelta", True) + + # Need to save the scene, cause the attribute changes above does not + # mark the scene as modified so user can exit without commiting the + # changes. + self.log.info("Saving changes.") + cmds.file(save=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py new file mode 100644 index 0000000000..3b0ffd52d7 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -0,0 +1,106 @@ +import maya.cmds as cmds + +import pyblish.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError +) + + +class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source. + + We require at least 1 root node/parent for the meshes. This is to ensure we + can duplicate the nodes and preserve the names. + + If using proxies we need the nodes to share the same names and not be + parent to the world. This ends up needing at least two groups with content + nodes and proxy nodes in another. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass"] + label = "Validate Arnold Scene Source" + + def _get_nodes_data(self, nodes): + ungrouped_nodes = [] + nodes_by_name = {} + parents = [] + for node in nodes: + node_split = node.split("|") + if len(node_split) == 2: + ungrouped_nodes.append(node) + + parent = "|".join(node_split[:-1]) + if parent: + parents.append(parent) + + nodes_by_name[node_split[-1]] = node + for shape in cmds.listRelatives(node, shapes=True): + nodes_by_name[shape.split("|")[-1]] = shape + + return ungrouped_nodes, nodes_by_name, parents + + def process(self, instance): + ungrouped_nodes = [] + + nodes, content_nodes_by_name, content_parents = self._get_nodes_data( + instance.data["setMembers"] + ) + ungrouped_nodes.extend(nodes) + + nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data( + instance.data.get("proxy", []) + ) + ungrouped_nodes.extend(nodes) + + # Validate against nodes directly parented to world. + if ungrouped_nodes: + raise PublishValidationError( + "Found nodes parented to the world: {}\n" + "All nodes need to be grouped.".format(ungrouped_nodes) + ) + + # Proxy validation. + if not instance.data.get("proxy", []): + return + + # Validate for content and proxy nodes amount being the same. + if len(instance.data["setMembers"]) != len(instance.data["proxy"]): + raise PublishValidationError( + "Amount of content nodes ({}) and proxy nodes ({}) needs to " + "be the same.".format( + len(instance.data["setMembers"]), + len(instance.data["proxy"]) + ) + ) + + # Validate against content and proxy nodes sharing same parent. + if list(set(content_parents) & set(proxy_parents)): + raise PublishValidationError( + "Content and proxy nodes cannot share the same parent." + ) + + # Validate for content and proxy nodes sharing same names. + sorted_content_names = sorted(content_nodes_by_name.keys()) + sorted_proxy_names = sorted(proxy_nodes_by_name.keys()) + odd_content_names = list( + set(sorted_content_names) - set(sorted_proxy_names) + ) + odd_content_nodes = [ + content_nodes_by_name[x] for x in odd_content_names + ] + odd_proxy_names = list( + set(sorted_proxy_names) - set(sorted_content_names) + ) + odd_proxy_nodes = [ + proxy_nodes_by_name[x] for x in odd_proxy_names + ] + if not sorted_content_names == sorted_proxy_names: + raise PublishValidationError( + "Content and proxy nodes need to share the same names.\n" + "Content nodes not matching: {}\n" + "Proxy nodes not matching: {}".format( + odd_content_nodes, odd_proxy_nodes + ) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py index ac6ce4d22d..6975d583bb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py +++ b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py @@ -2,11 +2,13 @@ import os import types import maya.cmds as cmds +from mtoa.core import createOptions import pyblish.api from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, + PublishValidationError ) @@ -34,8 +36,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): "defaultArnoldRenderOptions.pspath" ) except ValueError: - assert False, ("Can not validate, render setting were not opened " - "yet so Arnold setting cannot be validate") + raise PublishValidationError( + "Default Arnold options has not been created yet." + ) scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) @@ -66,6 +69,8 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): + createOptions() + texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath") procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath") diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py index 136c38bc1d..7a1f0cf086 100644 --- a/openpype/hosts/maya/plugins/publish/validate_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_attributes.py @@ -58,23 +58,23 @@ class ValidateAttributes(pyblish.api.ContextPlugin): # Filter families. families = [instance.data["family"]] families += instance.data.get("families", []) - families = list(set(families) & set(self.attributes.keys())) + families = list(set(families) & set(cls.attributes.keys())) if not families: continue # Get all attributes to validate. attributes = {} for family in families: - for preset in self.attributes[family]: + for preset in cls.attributes[family]: [node_name, attribute_name] = preset.split(".") try: attributes[node_name].update( - {attribute_name: self.attributes[family][preset]} + {attribute_name: cls.attributes[family][preset]} ) except KeyError: attributes.update({ node_name: { - attribute_name: self.attributes[family][preset] + attribute_name: cls.attributes[family][preset] } }) diff --git a/openpype/hosts/maya/plugins/publish/validate_color_sets.py b/openpype/hosts/maya/plugins/publish/validate_color_sets.py index 905417bafa..7ce3cca61a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_color_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_color_sets.py @@ -19,7 +19,6 @@ class ValidateColorSets(pyblish.api.Validator): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh ColorSets' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index 5698d795ff..ad256b6a72 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -11,10 +11,6 @@ from openpype.pipeline.publish import ( ) -def float_round(num, places=0, direction=ceil): - return direction(num * (10**places)) / float(10**places) - - class ValidateMayaUnits(pyblish.api.ContextPlugin): """Check if the Maya units are set correct""" @@ -36,18 +32,12 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): # Collected units linearunits = context.data.get('linearUnits') angularunits = context.data.get('angularUnits') - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - # NOTE: this must be revisited yet again as it seems that Ftrack is - # now flooring the value? - fps = float_round(context.data.get('fps'), 2, ceil) + + fps = context.data.get('fps') # TODO repace query with using 'context.data["assetEntity"]' asset_doc = get_current_project_asset() - asset_fps = asset_doc["data"]["fps"] + asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"]) self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py index c1c0636b9e..fa4c66952c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py @@ -19,7 +19,6 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ["maya"] families = ["model"] - category = "geometry" label = "Mesh Arnold Attributes" actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index 36a0da7a59..0eece1014e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -48,7 +48,6 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Has UVs' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py index 4427c6eece..f120361583 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py @@ -15,8 +15,6 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Lamina Faces' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 0ef2716559..78e844d201 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -19,8 +19,6 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): order = ValidateMeshOrder families = ['model'] hosts = ['maya'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Edge Length Non Zero' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py index c8892a8e59..1b754a9829 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py @@ -20,8 +20,6 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Normals Unlocked' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index be7324a68f..be23f61ec5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -235,7 +235,6 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Has Overlapping UVs' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py index 6ca8c06ba5..faa360380e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py @@ -21,9 +21,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model', 'pointcache'] - category = 'uv' optional = True - version = (0, 1, 0) label = "Mesh Single UV Set" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py index 1e6d290ae7..9ac7735501 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py @@ -63,7 +63,6 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Vertices Have Edges' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py index 1a5773e6a7..a4fb938d43 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py @@ -16,7 +16,6 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['camera'] - version = (0, 1, 0) label = "No Default Cameras" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py index 01c77e5b2e..e91b99359d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py @@ -23,8 +23,6 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' - version = (0, 1, 0) label = 'No Namespaces' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py index b430c2b63c..f77fc81dc1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py @@ -43,8 +43,6 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' - version = (0, 1, 0) label = 'No Empty/Null Transforms' actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py index d5bf7fd1cf..30d95128a2 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py @@ -24,7 +24,6 @@ class ValidateRigJointsHidden(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['rig'] - version = (0, 1, 0) label = "Joints Hidden" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py index ec2bea220d..f1fa4d3c4c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py +++ b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py @@ -31,8 +31,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin): order = ValidatePipelineOrder hosts = ['maya'] - category = 'scene' - version = (0, 1, 0) label = 'Maya Workspace Set' def process(self, context): diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py index 651c6bcec9..4ab669f46b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py @@ -38,9 +38,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' optional = True - version = (0, 1, 0) label = "Shape Default Naming" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index 65551c8d5e..0147aa8a52 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -32,9 +32,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' optional = True - version = (0, 1, 0) label = 'Suffix Naming Conventions' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] SUFFIX_NAMING_TABLE = {"mesh": ["_GEO", "_GES", "_GEP", "_OSD"], diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py index da569195e8..abd9e00af1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py @@ -18,8 +18,6 @@ class ValidateTransformZero(pyblish.api.Validator): order = ValidateContentsOrder hosts = ["maya"] families = ["model"] - category = "geometry" - version = (0, 1, 0) label = "Transform Zero (Freeze)" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index 4211e76a73..e78962bf97 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -13,7 +13,6 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ["maya"] families = ["staticMesh"] - category = "geometry" label = "Mesh is Triangulated" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] active = False diff --git a/openpype/hosts/maya/plugins/publish/validate_vray.py b/openpype/hosts/maya/plugins/publish/validate_vray.py new file mode 100644 index 0000000000..045ac258a1 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_vray.py @@ -0,0 +1,18 @@ +from maya import cmds + +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class ValidateVray(pyblish.api.InstancePlugin): + """Validate general Vray setup.""" + + order = pyblish.api.ValidatorOrder + label = 'VRay' + hosts = ["maya"] + families = ["vrayproxy"] + + def process(self, instance): + # Validate vray plugin is loaded. + if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): + raise PublishValidationError("Vray plugin is not loaded.") diff --git a/openpype/hosts/maya/plugins/publish/validate_xgen.py b/openpype/hosts/maya/plugins/publish/validate_xgen.py new file mode 100644 index 0000000000..2870909974 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_xgen.py @@ -0,0 +1,59 @@ +import json + +import maya.cmds as cmds +import xgenm + +import pyblish.api +from openpype.pipeline.publish import PublishValidationError + + +class ValidateXgen(pyblish.api.InstancePlugin): + """Validate Xgen data.""" + + label = "Validate Xgen" + order = pyblish.api.ValidatorOrder + host = ["maya"] + families = ["xgen"] + + def process(self, instance): + set_members = instance.data.get("setMembers") + + # Only 1 collection/node per instance. + if len(set_members) != 1: + raise PublishValidationError( + "Only one collection per instance is allowed." + " Found:\n{}".format(set_members) + ) + + # Only xgen palette node is allowed. + node_type = cmds.nodeType(set_members[0]) + if node_type != "xgmPalette": + raise PublishValidationError( + "Only node of type \"xgmPalette\" are allowed. Referred to as" + " \"collection\" in the Maya UI." + " Node type found: {}".format(node_type) + ) + + # Cant have inactive modifiers in collection cause Xgen will try and + # look for them when loading. + palette = instance.data["xgmPalette"].replace("|", "") + inactive_modifiers = {} + for description in instance.data["xgmDescriptions"]: + description = description.split("|")[-2] + modifier_names = xgenm.fxModules(palette, description) + for name in modifier_names: + attr = xgenm.getAttr("active", palette, description, name) + # Attribute value are lowercase strings of false/true. + if attr == "false": + try: + inactive_modifiers[description].append(name) + except KeyError: + inactive_modifiers[description] = [name] + + if inactive_modifiers: + raise PublishValidationError( + "There are inactive modifiers on the collection. " + "Please delete these:\n{}".format( + json.dumps(inactive_modifiers, indent=4, sort_keys=True) + ) + ) diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index 40cd51f2d8..bfa5e6e60d 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,16 +1,33 @@ import os +from functools import partial + from openpype.settings import get_project_settings from openpype.pipeline import install_host from openpype.hosts.maya.api import MayaHost + from maya import cmds host = MayaHost() install_host(host) -print("starting OpenPype usersetup") +print("Starting OpenPype usersetup...") -# build a shelf +# Open Workfile Post Initialization. +key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" +if bool(int(os.environ.get(key, "0"))): + cmds.evalDeferred( + partial( + cmds.file, + os.environ["AVALON_LAST_WORKFILE"], + open=True, + force=True + ), + lowestPriority=True + ) + + +# Build a shelf. settings = get_project_settings(os.environ['AVALON_PROJECT']) shelf_preset = settings['maya'].get('project_shelf') @@ -26,7 +43,10 @@ if shelf_preset: print(import_string) exec(import_string) - cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") + cmds.evalDeferred( + "mlib.shelf(name=shelf_preset['name'], iconPath=icon_path," + " preset=shelf_preset)" + ) -print("finished OpenPype usersetup") +print("Finished OpenPype usersetup.") diff --git a/openpype/hosts/nuke/api/gizmo_menu.py b/openpype/hosts/nuke/api/gizmo_menu.py index 9edfc62e3b..5838ee8a8a 100644 --- a/openpype/hosts/nuke/api/gizmo_menu.py +++ b/openpype/hosts/nuke/api/gizmo_menu.py @@ -53,12 +53,18 @@ class GizmoMenu(): item_type = item.get("sourcetype") - if item_type == ("python" or "file"): + if item_type == "python": parent.addCommand( item["title"], command=str(item["command"]), icon=item.get("icon"), - shortcut=item.get("hotkey") + shortcut=item.get("shortcut") + ) + elif item_type == "file": + parent.addCommand( + item['title'], + "nuke.createNode('{}')".format(item.get('file_name')), + shortcut=item.get('shortcut') ) # add separator diff --git a/openpype/hosts/nuke/plugins/publish/collect_context_data.py b/openpype/hosts/nuke/plugins/publish/collect_context_data.py index 5a1cdcf49e..b487c946f0 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_context_data.py +++ b/openpype/hosts/nuke/plugins/publish/collect_context_data.py @@ -1,7 +1,7 @@ import os import nuke import pyblish.api -import openpype.api as api +from openpype.lib import get_version_from_path import openpype.hosts.nuke.api as napi from openpype.pipeline import KnownPublishError @@ -57,7 +57,7 @@ class CollectContextData(pyblish.api.ContextPlugin): "fps": root_node['fps'].value(), "currentFile": current_file, - "version": int(api.get_version_from_path(current_file)), + "version": int(get_version_from_path(current_file)), "host": pyblish.api.current_host(), "hostVersion": nuke.NUKE_VERSION_STRING diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index ca3bbfd27c..3d82d6b6f0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -5,7 +5,7 @@ from openpype.lib import BoolDef from openpype.pipeline import ( Creator, CreatedInstance, - legacy_io + CreatorError ) from openpype.lib import prepare_template_data from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS @@ -13,27 +13,16 @@ from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances class ImageCreator(Creator): - """Creates image instance for publishing.""" + """Creates image instance for publishing. + + Result of 'image' instance is image of all visible layers, or image(s) of + selected layers. + """ identifier = "image" label = "Image" family = "image" description = "Image creator" - def collect_instances(self): - for instance_data in cache_and_get_instances(self): - # legacy instances have family=='image' - creator_id = (instance_data.get("creator_identifier") or - instance_data.get("family")) - - if creator_id == self.identifier: - instance_data = self._handle_legacy(instance_data) - layer = api.stub().get_layer(instance_data["members"][0]) - instance_data["layer"] = layer - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - def create(self, subset_name_from_ui, data, pre_create_data): groups_to_create = [] top_layers_to_wrap = [] @@ -59,9 +48,10 @@ class ImageCreator(Creator): try: group = stub.group_selected_layers(subset_name_from_ui) except: - raise ValueError("Cannot group locked Bakcground layer!") + raise CreatorError("Cannot group locked Background layer!") groups_to_create.append(group) + # create empty group if nothing selected if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) groups_to_create.append(group) @@ -73,13 +63,16 @@ class ImageCreator(Creator): groups_to_create.append(group) layer_name = '' - creating_multiple_groups = len(groups_to_create) > 1 + # use artist chosen option OR force layer if more subsets are created + # to differentiate them + use_layer_name = (pre_create_data.get("use_layer_name") or + len(groups_to_create) > 1) for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI layer_names_in_hierarchy = [] created_group_name = self._clean_highlights(stub, group.name) - if creating_multiple_groups: + if use_layer_name: layer_name = re.sub( "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), "", @@ -112,6 +105,21 @@ class ImageCreator(Creator): stub.rename_layer(group.id, stub.PUBLISH_ICON + created_group_name) + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + # legacy instances have family=='image' + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family")) + + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + layer = api.stub().get_layer(instance_data["members"][0]) + instance_data["layer"] = layer + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + def update_instances(self, update_list): self.log.debug("update_list:: {}".format(update_list)) for created_inst, _changes in update_list: @@ -137,12 +145,42 @@ class ImageCreator(Creator): label="Create only for selected"), BoolDef("create_multiple", default=True, - label="Create separate instance for each selected") + label="Create separate instance for each selected"), + BoolDef("use_layer_name", + default=False, + label="Use layer name in subset") ] return output def get_detail_description(self): - return """Creator for Image instances""" + return """Creator for Image instances + + Main publishable item in Photoshop will be of `image` family. Result of + this item (instance) is picture that could be loaded and used + in another DCCs (for example as single layer in composition in + AfterEffects, reference in Maya etc). + + There are couple of options what to publish: + - separate image per selected layer (or group of layers) + - one image for all selected layers + - all visible layers (groups) flattened into single image + + In most cases you would like to keep `Create only for selected` + toggled on and select what you would like to publish. + Toggling this option off will allow you to create instance for all + visible layers without a need to select them explicitly. + + Use 'Create separate instance for each selected' to create separate + images per selected layer (group of layers). + + 'Use layer name in subset' will explicitly add layer name into subset + name. Position of this name is configurable in + `project_settings/global/tools/creator/subset_name_profiles`. + If layer placeholder ({layer}) is not used in `subset_name_profiles` + but layer name should be used (set explicitly in UI or implicitly if + multiple images should be created), it is added in capitalized form + as a suffix to subset name. + """ def _handle_legacy(self, instance_data): """Converts old instances to new format.""" @@ -155,7 +193,7 @@ class ImageCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("variant"): instance_data["variant"] = '' diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py index 8ee9a0d832..f5d56adcbc 100644 --- a/openpype/hosts/photoshop/plugins/create/workfile_creator.py +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -2,8 +2,7 @@ import openpype.hosts.photoshop.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io + CreatedInstance ) from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances @@ -38,10 +37,11 @@ class PSWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 0a8ddaa343..3264f52b0f 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -37,7 +37,7 @@ class TrayPublisherHost(HostBase, IPublishHost): return HostContext.get_context_data() def update_context_data(self, data, changes): - HostContext.save_context_data(data, changes) + HostContext.save_context_data(data) def set_project_name(self, project_name): # TODO Deregister project specific plugins and register new project diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py index 1dc4bad9b3..d077131e4c 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -33,6 +33,8 @@ class BatchMovieCreator(TrayPublishCreator): create_allow_context_change = False version_regex = re.compile(r"^(.+)_v([0-9]+)$") + # Position batch creator after simple creators + order = 110 def __init__(self, project_settings, *args, **kwargs): super(BatchMovieCreator, self).__init__(project_settings, diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index a64b7c2911..9eb7724a60 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -30,7 +30,7 @@ from .vendor_bin_utils import ( ) from .attribute_definitions import ( - AbtractAttrDef, + AbstractAttrDef, UIDef, UISeparatorDef, @@ -82,9 +82,6 @@ from .mongo import ( validate_mongo_connection, OpenPypeMongoConnection ) -from .anatomy import ( - Anatomy -) from .dateutils import ( get_datetime_data, @@ -119,36 +116,19 @@ from .transcoding import ( ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, create_project, - is_latest, - any_outdated, - get_asset, - get_linked_assets, - get_latest_version, - get_system_general_anatomy_data, get_workfile_template_key, get_workfile_template_key_from_context, - get_workdir_data, - get_workdir, - get_workdir_with_workdir_data, get_last_workfile_with_version, get_last_workfile, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_doc, - BuildWorkfile, get_creator_by_name, get_custom_workfile_template, - change_timer_to_current_context, - get_custom_workfile_template_by_context, get_custom_workfile_template_by_string_context, get_custom_workfile_template @@ -186,8 +166,6 @@ from .plugin_tools import ( get_subset_name, get_subset_name_with_asset_doc, prepare_template_data, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, source_hash, ) @@ -246,7 +224,7 @@ __all__ = [ "get_ffmpeg_tool_path", "is_oiio_supported", - "AbtractAttrDef", + "AbstractAttrDef", "UIDef", "UISeparatorDef", @@ -278,34 +256,17 @@ __all__ = [ "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", - "PROJECT_NAME_ALLOWED_SYMBOLS", - "PROJECT_NAME_REGEX", "create_project", - "is_latest", - "any_outdated", - "get_asset", - "get_linked_assets", - "get_latest_version", - "get_system_general_anatomy_data", "get_workfile_template_key", "get_workfile_template_key_from_context", - "get_workdir_data", - "get_workdir", - "get_workdir_with_workdir_data", "get_last_workfile_with_version", "get_last_workfile", - "create_workfile_doc", - "save_workfile_data_to_doc", - "get_workfile_doc", - "BuildWorkfile", "get_creator_by_name", - "change_timer_to_current_context", - "get_custom_workfile_template_by_context", "get_custom_workfile_template_by_string_context", "get_custom_workfile_template", @@ -338,8 +299,6 @@ __all__ = [ "TaskNotSetError", "get_subset_name", "get_subset_name_with_asset_doc", - "filter_pyblish_plugins", - "set_plugin_attributes_from_settings", "source_hash", "format_file_size", @@ -358,8 +317,6 @@ __all__ = [ "terminal", - "Anatomy", - "get_datetime_data", "get_formatted_current_time", diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py deleted file mode 100644 index 6d339f058f..0000000000 --- a/openpype/lib/anatomy.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code related to project Anatomy was moved -to 'openpype.pipeline.anatomy' please change your imports as soon as -possible. File will be probably removed in OpenPype 3.14.* -""" - -import warnings -import functools - - -class AnatomyDeprecatedWarning(DeprecationWarning): - pass - - -def anatomy_deprecated(func): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.simplefilter("always", AnatomyDeprecatedWarning) - warnings.warn( - ( - "Deprecated import of 'Anatomy'." - " Class was moved to 'openpype.pipeline.anatomy'." - " Please change your imports of Anatomy in codebase." - ), - category=AnatomyDeprecatedWarning - ) - return func(*args, **kwargs) - return new_func - - -@anatomy_deprecated -def Anatomy(*args, **kwargs): - from openpype.pipeline.anatomy import Anatomy - return Anatomy(*args, **kwargs) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index 04db0edc64..b5cd15f41a 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -20,7 +20,7 @@ def register_attr_def_class(cls): Currently are registered definitions used to deserialize data to objects. Attrs: - cls (AbtractAttrDef): Non-abstract class to be registered with unique + cls (AbstractAttrDef): Non-abstract class to be registered with unique 'type' attribute. Raises: @@ -36,7 +36,7 @@ def get_attributes_keys(attribute_definitions): """Collect keys from list of attribute definitions. Args: - attribute_definitions (List[AbtractAttrDef]): Objects of attribute + attribute_definitions (List[AbstractAttrDef]): Objects of attribute definitions. Returns: @@ -57,8 +57,8 @@ def get_default_values(attribute_definitions): """Receive default values for attribute definitions. Args: - attribute_definitions (List[AbtractAttrDef]): Attribute definitions for - which default values should be collected. + attribute_definitions (List[AbstractAttrDef]): Attribute definitions + for which default values should be collected. Returns: Dict[str, Any]: Default values for passet attribute definitions. @@ -76,15 +76,15 @@ def get_default_values(attribute_definitions): class AbstractAttrDefMeta(ABCMeta): - """Meta class to validate existence of 'key' attribute. + """Metaclass to validate existence of 'key' attribute. - Each object of `AbtractAttrDef` mus have defined 'key' attribute. + Each object of `AbstractAttrDef` mus have defined 'key' attribute. """ def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) - if init_class is not AbtractAttrDef: + if init_class is not AbstractAttrDef: raise TypeError("{} super was not called in __init__.".format( type(obj) )) @@ -92,7 +92,7 @@ class AbstractAttrDefMeta(ABCMeta): @six.add_metaclass(AbstractAttrDefMeta) -class AbtractAttrDef(object): +class AbstractAttrDef(object): """Abstraction of attribute definiton. Each attribute definition must have implemented validation and @@ -145,7 +145,7 @@ class AbtractAttrDef(object): self.disabled = disabled self._id = uuid.uuid4().hex - self.__init__class__ = AbtractAttrDef + self.__init__class__ = AbstractAttrDef @property def id(self): @@ -154,7 +154,15 @@ class AbtractAttrDef(object): def __eq__(self, other): if not isinstance(other, self.__class__): return False - return self.key == other.key + return ( + self.key == other.key + and self.hidden == other.hidden + and self.default == other.default + and self.disabled == other.disabled + ) + + def __ne__(self, other): + return not self.__eq__(other) @abstractproperty def type(self): @@ -212,7 +220,7 @@ class AbtractAttrDef(object): # UI attribute definitoins won't hold value # ----------------------------------------- -class UIDef(AbtractAttrDef): +class UIDef(AbstractAttrDef): is_value_def = False def __init__(self, key=None, default=None, *args, **kwargs): @@ -237,7 +245,7 @@ class UILabelDef(UIDef): # Attribute defintioins should hold value # --------------------------------------- -class UnknownDef(AbtractAttrDef): +class UnknownDef(AbstractAttrDef): """Definition is not known because definition is not available. This attribute can be used to keep existing data unchanged but does not @@ -254,7 +262,7 @@ class UnknownDef(AbtractAttrDef): return value -class HiddenDef(AbtractAttrDef): +class HiddenDef(AbstractAttrDef): """Hidden value of Any type. This attribute can be used for UI purposes to pass values related @@ -274,7 +282,7 @@ class HiddenDef(AbtractAttrDef): return value -class NumberDef(AbtractAttrDef): +class NumberDef(AbstractAttrDef): """Number definition. Number can have defined minimum/maximum value and decimal points. Value @@ -350,7 +358,7 @@ class NumberDef(AbtractAttrDef): return round(float(value), self.decimals) -class TextDef(AbtractAttrDef): +class TextDef(AbstractAttrDef): """Text definition. Text can have multiline option so endline characters are allowed regex @@ -415,7 +423,7 @@ class TextDef(AbtractAttrDef): return data -class EnumDef(AbtractAttrDef): +class EnumDef(AbstractAttrDef): """Enumeration of single item from items. Args: @@ -457,7 +465,7 @@ class EnumDef(AbtractAttrDef): return self.default def serialize(self): - data = super(TextDef, self).serialize() + data = super(EnumDef, self).serialize() data["items"] = copy.deepcopy(self.items) return data @@ -523,7 +531,8 @@ class EnumDef(AbtractAttrDef): return output -class BoolDef(AbtractAttrDef): + +class BoolDef(AbstractAttrDef): """Boolean representation. Args: @@ -768,7 +777,7 @@ class FileDefItem(object): return output -class FileDef(AbtractAttrDef): +class FileDef(AbstractAttrDef): """File definition. It is possible to define filters of allowed file extensions and if supports folders. @@ -886,7 +895,7 @@ def serialize_attr_def(attr_def): """Serialize attribute definition to data. Args: - attr_def (AbtractAttrDef): Attribute definition to serialize. + attr_def (AbstractAttrDef): Attribute definition to serialize. Returns: Dict[str, Any]: Serialized data. @@ -899,7 +908,7 @@ def serialize_attr_defs(attr_defs): """Serialize attribute definitions to data. Args: - attr_defs (List[AbtractAttrDef]): Attribute definitions to serialize. + attr_defs (List[AbstractAttrDef]): Attribute definitions to serialize. Returns: List[Dict[str, Any]]: Serialized data. diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 12f4a5198b..a9ae27cb79 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -1,6 +1,5 @@ """Should be used only inside of hosts.""" -import os -import copy + import platform import logging import functools @@ -10,17 +9,12 @@ import six from openpype.client import ( get_project, - get_assets, get_asset_by_name, - get_last_version_by_subset_name, - get_workfile_info, ) from openpype.client.operations import ( CURRENT_ASSET_DOC_SCHEMA, CURRENT_PROJECT_SCHEMA, CURRENT_PROJECT_CONFIG_SCHEMA, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, ) from .profiles_filtering import filter_profiles from .path_templates import StringTemplate @@ -128,70 +122,6 @@ def with_pipeline_io(func): return wrapped -@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") -def is_latest(representation): - """Return whether the representation is from latest version - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import is_representation_from_latest - - return is_representation_from_latest(representation) - - -@deprecated("openpype.pipeline.load.any_outdated_containers") -def any_outdated(): - """Return whether the current scene has any outdated content. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.load import any_outdated_containers - - return any_outdated_containers() - - -@deprecated("openpype.pipeline.context_tools.get_current_project_asset") -def get_asset(asset_name=None): - """ Returning asset document from database by its name. - - Doesn't count with duplicities on asset names! - - Args: - asset_name (str) - - Returns: - (MongoDB document) - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_current_project_asset - - return get_current_project_asset(asset_name=asset_name) - - -@deprecated("openpype.pipeline.template_data.get_general_template_data") -def get_system_general_anatomy_data(system_settings=None): - """ - Deprecated: - Function will be removed after release version 3.15.* - """ - from openpype.pipeline.template_data import get_general_template_data - - return get_general_template_data(system_settings) - - @deprecated("openpype.client.get_linked_asset_ids") def get_linked_asset_ids(asset_doc): """Return linked asset ids for `asset_doc` from DB @@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc): return get_linked_asset_ids(project_name, asset_doc=asset_doc) -@deprecated("openpype.client.get_linked_assets") -def get_linked_assets(asset_doc): - """Return linked assets for `asset_doc` from DB - - Args: - asset_doc (dict): Asset document from DB - - Returns: - (list) Asset documents of input links for passed asset doc. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline import legacy_io - from openpype.client import get_linked_assets - - project_name = legacy_io.active_project() - - return get_linked_assets(project_name, asset_doc=asset_doc) - - -@deprecated("openpype.client.get_last_version_by_subset_name") -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not project_name: - if not dbcon: - from openpype.pipeline import legacy_io - - log.debug("Using `legacy_io` for query.") - dbcon = legacy_io - # Make sure is installed - dbcon.install() - - project_name = dbcon.active_project() - - return get_last_version_by_subset_name( - project_name, subset_name, asset_name=asset_name - ) - - @deprecated( "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( @@ -361,142 +231,6 @@ def get_workfile_template_key( ) -@deprecated("openpype.pipeline.template_data.get_template_data") -def get_workdir_data(project_doc, asset_doc, task_name, host_name): - """Prepare data for workdir template filling from entered information. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. - - Returns: - dict: Data prepared for filling workdir template. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.template_data import get_template_data - - return get_template_data( - project_doc, asset_doc, task_name, host_name - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir_with_workdir_data( - workdir_data, anatomy=None, project_name=None, template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - It is possible to pass only project's name instead of project's anatomy but - one of them **must** be entered. It is preferred to enter anatomy if is - available as initialization of a new Anatomy object may be time consuming. - - Args: - workdir_data (dict): Data to fill workdir template. - anatomy (Anatomy): Anatomy object for specific project. Optional if - `project_name` is entered. - project_name (str): Project's name. Optional if `anatomy` is entered - otherwise Anatomy object is created with using the project name. - template_key (str): Key of work templates in anatomy templates. If not - passed `get_workfile_template_key_from_context` is used to get it. - dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key' - and 'project_name' are not passed. - - Returns: - TemplateResult: Workdir path. - - Raises: - ValueError: When both `anatomy` and `project_name` are set to None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not anatomy and not project_name: - raise ValueError(( - "Missing required arguments one of `project_name` or `anatomy`" - " must be entered." - )) - - if not project_name: - project_name = anatomy.project_name - - from openpype.pipeline.workfile import get_workdir_with_workdir_data - - return get_workdir_with_workdir_data( - workdir_data, project_name, anatomy, template_key - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy=None, - template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. In `Session` - is stored under `AVALON_APP` key. - anatomy (Anatomy): Optional argument. Anatomy object is created using - project name from `project_doc`. It is preferred to pass this - argument as initialization of a new Anatomy object may be time - consuming. - template_key (str): Key of work templates in anatomy templates. Default - value is defined in `get_workdir_with_workdir_data`. - - Returns: - TemplateResult: Workdir path. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.workfile import get_workdir - # Output is TemplateResult object which contain useful data - return get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy, - template_key - ) - - -@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") -def template_data_from_session(session=None): - """ Return dictionary with template from session keys. - - Args: - session (dict, Optional): The Session to use. If not provided use the - currently active global Session. - - Returns: - dict: All available data from session. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_template_data_from_session - - return get_template_data_from_session(session) - - @deprecated("openpype.pipeline.context_tools.compute_session_changes") def compute_session_changes( session, task=None, asset=None, app=None, template_key=None @@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return change_current_context(asset, task, template_key) -@deprecated("openpype.client.get_workfile_info") -def get_workfile_doc(asset_id, task_name, filename, dbcon=None): - """Return workfile document for entered context. - - Do not use this method to get more than one document. In that cases use - custom query as this will return documents from database one by one. - - Args: - asset_id (ObjectId): Mongo ID of an asset under which workfile belongs. - task_name (str): Name of task under which the workfile belongs. - filename (str): Name of a workfile. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - - Returns: - dict: Workfile document or None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - project_name = dbcon.active_project() - return get_workfile_info(project_name, asset_id, task_name, filename) - - -@deprecated -def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): - """Creates or replace workfile document in mongo. - - Do not use this method to update data. This method will remove all - additional data from existing document. - - Args: - asset_doc (dict): Document of asset under which workfile belongs. - task_name (str): Name of task for which is workfile related to. - filename (str): Filename of workfile. - workdir (str): Path to directory where `filename` is located. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - """ - - from openpype.pipeline import Anatomy - from openpype.pipeline.template_data import get_template_data - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Filter of workfile document - doc_filter = { - "type": "workfile", - "parent": asset_doc["_id"], - "task_name": task_name, - "filename": filename - } - # Document data are copy of filter - doc_data = copy.deepcopy(doc_filter) - - # Prepare project for workdir data - project_name = dbcon.active_project() - project_doc = get_project(project_name) - workdir_data = get_template_data( - project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] - ) - # Prepare anatomy - anatomy = Anatomy(project_name) - # Get workdir path (result is anatomy.TemplateResult) - template_workdir = get_workdir_with_workdir_data( - workdir_data, anatomy - ) - template_workdir_path = str(template_workdir).replace("\\", "/") - - # Replace slashses in workdir path where workfile is located - mod_workdir = workdir.replace("\\", "/") - - # Replace workdir from templates with rootless workdir - rootles_workdir = mod_workdir.replace( - template_workdir_path, - template_workdir.rootless.replace("\\", "/") - ) - - doc_data["schema"] = "pype:workfile-1.0" - doc_data["files"] = ["/".join([rootles_workdir, filename])] - doc_data["data"] = {} - - dbcon.replace_one( - doc_filter, - doc_data, - upsert=True - ) - - -@deprecated -def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): - if not workfile_doc: - # TODO add log message - return - - if not data: - return - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Convert data to mongo modification keys/values - # - this is naive implementation which does not expect nested - # dictionaries - set_data = {} - for key, value in data.items(): - new_key = "data.{}".format(key) - set_data[new_key] = value - - # Update workfile document with data - dbcon.update_one( - {"_id": workfile_doc["_id"]}, - {"$set": set_data} - ) - - @deprecated("openpype.pipeline.workfile.BuildWorkfile") def BuildWorkfile(): """Build workfile class was moved to workfile pipeline. @@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False): return get_legacy_creator_by_name(creator_name, case_sensitive) -@deprecated -def change_timer_to_current_context(): - """Called after context change to change timers. - - Deprecated: - This method is specific for TimersManager module so please use the - functionality from there. Function will be removed after release - version 3.15.* - """ - - from openpype.pipeline import legacy_io - - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - if not webserver_url: - log.warning("Couldn't find webserver url") - return - - rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) - try: - import requests - except Exception: - log.warning("Couldn't start timer") - return - data = { - "project_name": legacy_io.Session["AVALON_PROJECT"], - "asset_name": legacy_io.Session["AVALON_ASSET"], - "task_name": legacy_io.Session["AVALON_TASK"] - } - - requests.post(rest_api_url, json=data) - - def _get_task_context_data_for_anatomy( project_doc, asset_doc, task_name, anatomy=None ): @@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy( dict: With Anatomy context data. """ + from openpype.pipeline.template_data import get_general_template_data + if anatomy is None: from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) @@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy( } } - system_general_data = get_system_general_anatomy_data() + system_general_data = get_general_template_data() data.update(system_general_data) return data diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index f1f2a4fa0a..39532b7aa5 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -117,12 +117,12 @@ def run_subprocess(*args, **kwargs): full_output = "" _stdout, _stderr = proc.communicate() if _stdout: - _stdout = _stdout.decode("utf-8") + _stdout = _stdout.decode("utf-8", errors="backslashreplace") full_output += _stdout logger.debug(_stdout) if _stderr: - _stderr = _stderr.decode("utf-8") + _stderr = _stderr.decode("utf-8", errors="backslashreplace") # Add additional line break if output already contains stdout if full_output: full_output += "\n" diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1e157dfbfd..10fd3940b8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -8,7 +8,6 @@ import warnings import functools from openpype.client import get_asset_by_id -from openpype.settings import get_project_settings log = logging.getLogger(__name__) @@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc( is not passed. dynamic_data (dict): Dynamic data specific for a creator which creates instance. - dbcon (AvalonMongoDB): Mongo connection to be able query asset document - if 'asset_doc' is not passed. """ from openpype.pipeline.create import get_subset_name @@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs): return fill_data -@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") -def filter_pyblish_plugins(plugins): - """Filter pyblish plugins by presets. - - This servers as plugin filter / modifier for pyblish. It will load plugin - definitions from presets and filter those needed to be excluded. - - Args: - plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` - `discover()` method. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.publish.lib import filter_pyblish_plugins - - filter_pyblish_plugins(plugins) - - -@deprecated -def set_plugin_attributes_from_settings( - plugins, superclass, host_name=None, project_name=None -): - """Change attribute values on Avalon plugins by project settings. - - This function should be used only in host context. Modify - behavior of plugins. - - Args: - plugins (list): Plugins discovered by origin avalon discover method. - superclass (object): Superclass of plugin type (e.g. Cretor, Loader). - host_name (str): Name of host for which plugins are loaded and from. - Value from environment `AVALON_APP` is used if not entered. - project_name (str): Name of project for which settings will be loaded. - Value from environment `AVALON_PROJECT` is used if not entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Function is not used anymore - from openpype.pipeline import LegacyCreator, LoaderPlugin - - # determine host application to use for finding presets - if host_name is None: - host_name = os.environ.get("AVALON_APP") - - if project_name is None: - project_name = os.environ.get("AVALON_PROJECT") - - # map plugin superclass to preset json. Currently supported is load and - # create (LoaderPlugin and LegacyCreator) - plugin_type = None - if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): - plugin_type = "load" - elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): - plugin_type = "create" - - if not host_name or not project_name or plugin_type is None: - msg = "Skipped attributes override from settings." - if not host_name: - msg += " Host name is not defined." - - if not project_name: - msg += " Project name is not defined." - - if plugin_type is None: - msg += " Plugin type is unsupported for class {}.".format( - superclass.__name__ - ) - - print(msg) - return - - print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type)) - - project_settings = get_project_settings(project_name) - plugin_type_settings = ( - project_settings - .get(host_name, {}) - .get(plugin_type, {}) - ) - global_type_settings = ( - project_settings - .get("global", {}) - .get(plugin_type, {}) - ) - if not global_type_settings and not plugin_type_settings: - return - - for plugin in plugins: - plugin_name = plugin.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - # Look for plugin settings in global settings - elif plugin_name in global_type_settings: - plugin_settings = global_type_settings[plugin_name] - - if not plugin_settings: - continue - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - setattr(plugin, "active", False) - print(" - is disabled by preset") - else: - setattr(plugin, option, value) - print(" - setting `{}`: `{}`".format(option, value)) - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 070d4eab18..ed37ff1897 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -64,6 +64,7 @@ class MayaPluginInfo(object): # Include all lights flag RenderSetupIncludeLights = attr.ib( default="1", validator=_validate_deadline_bool_value) + StrictErrorChecking = attr.ib(default=True) @attr.s @@ -219,6 +220,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): "renderSetupIncludeLights", default_rs_include_lights) if rs_include_lights not in {"1", "0", True, False}: rs_include_lights = default_rs_include_lights + strict_error_checking = instance.data.get("strict_error_checking", + True) plugin_info = MayaPluginInfo( SceneFile=self.scene_path, Version=cmds.about(version=True), @@ -227,6 +230,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): RenderSetupIncludeLights=rs_include_lights, # noqa ProjectPath=context.data["workspaceDir"], UsingRenderLayers=True, + StrictErrorChecking=strict_error_checking ) plugin_payload = attr.asdict(plugin_info) diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 984590ddba..b0560ce1e8 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -35,7 +35,7 @@ class OpenPypeVersion: self.prerelease = prerelease is_valid = True - if not major or not minor or not patch: + if major is None or minor is None or patch is None: is_valid = False self.is_valid = is_valid @@ -157,7 +157,7 @@ def get_openpype_version_from_path(path, build=True): # fix path for application bundle on macos if platform.system().lower() == "darwin": - path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + path = os.path.join(path, "MacOS") version_file = os.path.join(path, "openpype", "version.py") if not os.path.isfile(version_file): @@ -189,6 +189,11 @@ def get_openpype_executable(): exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") dir_list = config.GetConfigEntryWithDefault( "OpenPypeInstallationDirs", "") + + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") + dir_list = dir_list.replace("\\ ", " ") return exe_list, dir_list @@ -218,8 +223,8 @@ def get_requested_openpype_executable( requested_version_obj = OpenPypeVersion.from_string(requested_version) if not requested_version_obj: print(( - ">>> Requested version does not match version regex \"{}\"" - ).format(VERSION_REGEX)) + ">>> Requested version '{}' does not match version regex '{}'" + ).format(requested_version, VERSION_REGEX)) return None print(( @@ -272,7 +277,8 @@ def get_requested_openpype_executable( # Deadline decide. exe_list = [ os.path.join(version_dir, "openpype_console.exe"), - os.path.join(version_dir, "openpype_console") + os.path.join(version_dir, "openpype_console"), + os.path.join(version_dir, "MacOS", "openpype_console") ] return FileUtils.SearchFileList(";".join(exe_list)) diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 6b0f69d98f..ab4a3d5e9b 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -73,7 +73,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): """ # fix path for application bundle on macos if platform.system().lower() == "darwin": - path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + path = os.path.join(path, "MacOS") version_file = os.path.join(path, "openpype", "version.py") if not os.path.isfile(version_file): @@ -107,8 +107,11 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): "Scanning for compatible requested " f"version {requested_version}")) dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + dir_list = dir_list.replace("\\ ", " ") install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if dir: + if install_dir: sub_dirs = [ f.path for f in os.scandir(install_dir) if f.is_dir() @@ -120,6 +123,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): openpype_versions.append((version, subdir)) exe_list = self.GetConfigEntry("OpenPypeExecutable") + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") exe = FileUtils.SearchFileList(exe_list) if openpype_versions: # if looking for requested compatible version, @@ -161,7 +167,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): os.path.join( compatible_versions[-1][1], "openpype_console.exe"), os.path.join( - compatible_versions[-1][1], "openpype_console") + compatible_versions[-1][1], "openpype_console"), + os.path.join( + compatible_versions[-1][1], "MacOS", "openpype_console") ] exe = FileUtils.SearchFileList(";".join(exe_list)) diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py index 625a3f1a28..861f16518c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py @@ -204,10 +204,10 @@ def info_about_input(oiiotool_path, filepath): _stdout, _stderr = popen.communicate() output = "" if _stdout: - output += _stdout.decode("utf-8") + output += _stdout.decode("utf-8", errors="backslashreplace") if _stderr: - output += _stderr.decode("utf-8") + output += _stderr.decode("utf-8", errors="backslashreplace") output = output.replace("\r\n", "\n") xml_started = False diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index 6f14f8428d..d61b5f0b26 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -64,6 +64,16 @@ class FtrackModule( self._timers_manager_module = None def get_ftrack_url(self): + """Resolved ftrack url. + + Resolving is trying to fill missing information in url and tried to + connect to the server. + + Returns: + Union[str, None]: Final variant of url or None if url could not be + reached. + """ + if self._ftrack_url is _URL_NOT_SET: self._ftrack_url = resolve_ftrack_url( self._settings_ftrack_url, @@ -73,8 +83,19 @@ class FtrackModule( ftrack_url = property(get_ftrack_url) + @property + def settings_ftrack_url(self): + """Ftrack url from settings in a format as it is. + + Returns: + str: Ftrack url from settings. + """ + + return self._settings_ftrack_url + def get_global_environments(self): """Ftrack's global environments.""" + return { "FTRACK_SERVER": self.ftrack_url } @@ -510,7 +531,10 @@ def resolve_ftrack_url(url, logger=None): url = "https://" + url ftrack_url = None - if not url.endswith("ftrackapp.com"): + if url and _check_ftrack_url(url): + ftrack_url = url + + if not ftrack_url and not url.endswith("ftrackapp.com"): ftrackapp_url = url + ".ftrackapp.com" if _check_ftrack_url(ftrackapp_url): ftrack_url = ftrackapp_url diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 25ebad6658..ad7ffd8e25 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -316,7 +316,7 @@ def main_loop(ftrack_url): statuser_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not statuser_thread.isAlive(): + elif not statuser_thread.is_alive(): statuser_thread.join() statuser_thread = None ftrack_accessible = False @@ -359,7 +359,7 @@ def main_loop(ftrack_url): storer_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not storer_thread.isAlive(): + elif not storer_thread.is_alive(): if storer_thread.mongo_error: raise MongoPermissionsError() storer_thread.join() @@ -396,7 +396,7 @@ def main_loop(ftrack_url): processor_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not processor_thread.isAlive(): + elif not processor_thread.is_alive(): if processor_thread.mongo_error: raise Exception( "Exiting because have issue with acces to MongoDB" diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 2d06e2ab02..d6cb3daf0d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,6 +3,7 @@ import json import copy import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path from openpype.lib.openpype_version import get_openpype_version from openpype.lib.transcoding import ( get_ffprobe_streams, @@ -153,7 +154,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if not review_representations or has_movie_review: for repre in thumbnail_representations: - repre_path = self._get_repre_path(instance, repre, False) + repre_path = get_publish_repre_path(instance, repre, False) if not repre_path: self.log.warning( "Published path is not set and source was removed." @@ -210,7 +211,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "from {}".format(repre)) continue - repre_path = self._get_repre_path(instance, repre, False) + repre_path = get_publish_repre_path(instance, repre, False) if not repre_path: self.log.warning( "Published path is not set and source was removed." @@ -324,7 +325,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add others representations as component for repre in other_representations: - published_path = self._get_repre_path(instance, repre, True) + published_path = get_publish_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -364,51 +365,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): def _collect_additional_metadata(self, streams): pass - def _get_repre_path(self, instance, repre, only_published): - """Get representation path that can be used for integration. - - When 'only_published' is set to true the validation of path is not - relevant. In that case we just need what is set in 'published_path' - as "reference". The reference is not used to get or upload the file but - for reference where the file was published. - - Args: - instance (pyblish.Instance): Processed instance object. Used - for source of staging dir if representation does not have - filled it. - repre (dict): Representation on instance which could be and - could not be integrated with main integrator. - only_published (bool): Care only about published paths and - ignore if filepath is not existing anymore. - - Returns: - str: Path to representation file. - None: Path is not filled or does not exists. - """ - - published_path = repre.get("published_path") - if published_path: - published_path = os.path.normpath(published_path) - if os.path.exists(published_path): - return published_path - - if only_published: - return published_path - - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - - staging_dir = repre.get("stagingDir") - if not staging_dir: - staging_dir = instance.data["stagingDir"] - src_path = os.path.normpath(os.path.join(staging_dir, filename)) - if os.path.exists(src_path): - return src_path - return None - def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None diff --git a/openpype/modules/ftrack/tray/login_dialog.py b/openpype/modules/ftrack/tray/login_dialog.py index fbb3455775..f374a71178 100644 --- a/openpype/modules/ftrack/tray/login_dialog.py +++ b/openpype/modules/ftrack/tray/login_dialog.py @@ -139,8 +139,7 @@ class CredentialsDialog(QtWidgets.QDialog): self.fill_ftrack_url() def fill_ftrack_url(self): - url = os.getenv("FTRACK_SERVER") - checked_url = self.check_url(url) + checked_url = self.check_url() if checked_url == self.ftsite_input.text(): return @@ -154,7 +153,7 @@ class CredentialsDialog(QtWidgets.QDialog): self.api_input.setEnabled(enabled) self.user_input.setEnabled(enabled) - if not url: + if not checked_url: self.btn_advanced.hide() self.btn_simple.hide() self.btn_ftrack_login.hide() @@ -254,13 +253,13 @@ class CredentialsDialog(QtWidgets.QDialog): ) def _on_ftrack_login_clicked(self): - url = self.check_url(self.ftsite_input.text()) + url = self.check_url() if not url: return # If there is an existing server thread running we need to stop it. if self._login_server_thread: - if self._login_server_thread.isAlive(): + if self._login_server_thread.is_alive(): self._login_server_thread.stop() self._login_server_thread.join() self._login_server_thread = None @@ -302,21 +301,21 @@ class CredentialsDialog(QtWidgets.QDialog): if is_logged is not None: self.set_is_logged(is_logged) - def check_url(self, url): - if url is not None: - url = url.strip("/ ") - - if not url: + def check_url(self): + settings_url = self._module.settings_ftrack_url + url = self._module.ftrack_url + if not settings_url: self.set_error( "Ftrack URL is not defined in settings!" ) return - if "http" not in url: - if url.endswith("ftrackapp.com"): - url = "https://" + url - else: - url = "https://{}.ftrackapp.com".format(url) + if url is None: + self.set_error( + "Specified URL does not lead to a valid Ftrack server." + ) + return + try: result = requests.get( url, diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py index cfd2d10fd9..fc15d5515f 100644 --- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -1,6 +1,8 @@ import os import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path + class IntegrateShotgridPublish(pyblish.api.InstancePlugin): """ @@ -22,7 +24,9 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin): for representation in instance.data.get("representations", []): - local_path = representation.get("published_path") + local_path = get_publish_repre_path( + instance, representation, False + ) code = os.path.basename(local_path) if representation.get("tags", []): diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py index a1b7140e22..adfdca718c 100644 --- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -1,6 +1,7 @@ -import os import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path + class IntegrateShotgridVersion(pyblish.api.InstancePlugin): """Integrate Shotgrid Version""" @@ -41,8 +42,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin): data_to_update["sg_status_list"] = status for representation in instance.data.get("representations", []): - local_path = representation.get("published_path") - code = os.path.basename(local_path) + local_path = get_publish_repre_path( + instance, representation, False + ) if "shotgridreview" in representation.get("tags", []): diff --git a/openpype/modules/slack/manifest.yml b/openpype/modules/slack/manifest.yml index 7a65cc5915..233c39fbaf 100644 --- a/openpype/modules/slack/manifest.yml +++ b/openpype/modules/slack/manifest.yml @@ -19,6 +19,8 @@ oauth_config: - chat:write.public - files:write - channels:read + - users:read + - usergroups:read settings: org_deploy_enabled: false socket_mode_enabled: false diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 612031efac..4e2557ccc7 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -8,6 +8,7 @@ from abc import ABCMeta, abstractmethod import time from openpype.client import OpenPypeMongoConnection +from openpype.pipeline.publish import get_publish_repre_path from openpype.lib.plugin_tools import prepare_template_data @@ -167,9 +168,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): thumbnail_path = None for repre in instance.data.get("representations", []): if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []): - repre_thumbnail_path = ( - repre.get("published_path") or - os.path.join(repre["stagingDir"], repre["files"]) + repre_thumbnail_path = get_publish_repre_path( + instance, repre, False ) if os.path.exists(repre_thumbnail_path): thumbnail_path = repre_thumbnail_path @@ -184,9 +184,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): if (repre.get("review") or "review" in tags or "burnin" in tags): - repre_review_path = ( - repre.get("published_path") or - os.path.join(repre["stagingDir"], repre["files"]) + repre_review_path = get_publish_repre_path( + instance, repre, False ) if os.path.exists(repre_review_path): review_path = repre_review_path diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index f5319c5a48..7a2ef59a5a 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -86,6 +86,12 @@ from .context_tools import ( registered_host, deregister_host, get_process_id, + + get_current_context, + get_current_host_name, + get_current_project_name, + get_current_asset_name, + get_current_task_name, ) install = install_host uninstall = uninstall_host @@ -176,6 +182,13 @@ __all__ = ( "register_host", "registered_host", "deregister_host", + "get_process_id", + + "get_current_context", + "get_current_host_name", + "get_current_project_name", + "get_current_asset_name", + "get_current_task_name", # Backwards compatible function names "install", diff --git a/openpype/pipeline/anatomy.py b/openpype/pipeline/anatomy.py index a18b46d9ac..49d86d69d6 100644 --- a/openpype/pipeline/anatomy.py +++ b/openpype/pipeline/anatomy.py @@ -60,6 +60,7 @@ class BaseAnatomy(object): def __init__(self, project_doc, local_settings, site_name): project_name = project_doc["name"] self.project_name = project_name + self.project_code = project_doc["data"]["code"] if (site_name and site_name not in ["studio", "local", get_local_site_id()]): diff --git a/openpype/pipeline/colorspace.py b/openpype/pipeline/colorspace.py index e1ffe9d333..cb37b2c4ae 100644 --- a/openpype/pipeline/colorspace.py +++ b/openpype/pipeline/colorspace.py @@ -438,13 +438,14 @@ def get_imageio_file_rules(project_name, host_name, project_settings=None): # get file rules from global and host_name frules_global = imageio_global["file_rules"] - frules_host = imageio_host["file_rules"] + # host is optional, some might not have any settings + frules_host = imageio_host.get("file_rules", {}) # compile file rules dictionary file_rules = {} if frules_global["enabled"]: file_rules.update(frules_global["rules"]) - if frules_host["enabled"]: + if frules_host and frules_host["enabled"]: file_rules.update(frules_host["rules"]) return file_rules @@ -455,7 +456,7 @@ def _get_imageio_settings(project_settings, host_name): Args: project_settings (dict): project settings. - Defaults to None. + Defaults to None. host_name (str): host name Returns: @@ -463,6 +464,7 @@ def _get_imageio_settings(project_settings, host_name): """ # get image io from global and host_name imageio_global = project_settings["global"]["imageio"] - imageio_host = project_settings[host_name]["imageio"] + # host is optional, some might not have any settings + imageio_host = project_settings.get(host_name, {}).get("imageio", {}) return imageio_global, imageio_host diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py index da0ce8ecf4..6610fd7da7 100644 --- a/openpype/pipeline/context_tools.py +++ b/openpype/pipeline/context_tools.py @@ -11,6 +11,7 @@ import pyblish.api from pyblish.lib import MessageHandler import openpype +from openpype.host import HostBase from openpype.client import ( get_project, get_asset_by_id, @@ -306,6 +307,58 @@ def debug_host(): return host +def get_current_host_name(): + """Current host name. + + Function is based on currently registered host integration or environment + variant 'AVALON_APP'. + + Returns: + Union[str, None]: Name of host integration in current process or None. + """ + + host = registered_host() + if isinstance(host, HostBase): + return host.name + return os.environ.get("AVALON_APP") + + +def get_global_context(): + return { + "project_name": os.environ.get("AVALON_PROJECT"), + "asset_name": os.environ.get("AVALON_ASSET"), + "task_name": os.environ.get("AVALON_TASK"), + } + + +def get_current_context(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_context() + return get_global_context() + + +def get_current_project_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_project_name() + return get_global_context()["project_name"] + + +def get_current_asset_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_asset_name() + return get_global_context()["asset_name"] + + +def get_current_task_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_task_name() + return get_global_context()["task_name"] + + def get_current_project(fields=None): """Helper function to get project document based on global Session. @@ -316,7 +369,7 @@ def get_current_project(fields=None): None: Project is not set. """ - project_name = legacy_io.active_project() + project_name = get_current_project_name() return get_project(project_name, fields=fields) @@ -341,12 +394,12 @@ def get_current_project_asset(asset_name=None, asset_id=None, fields=None): None: Asset is not set or not exist. """ - project_name = legacy_io.active_project() + project_name = get_current_project_name() if asset_id: return get_asset_by_id(project_name, asset_id, fields=fields) if not asset_name: - asset_name = legacy_io.Session.get("AVALON_ASSET") + asset_name = get_current_asset_name() # Skip if is not set even on context if not asset_name: return None @@ -363,7 +416,7 @@ def is_representation_from_latest(representation): bool: Whether the representation is of latest version. """ - project_name = legacy_io.active_project() + project_name = get_current_project_name() return version_is_latest(project_name, representation["parent"]) diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index a0cdb7dfea..7672c49eb3 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -8,17 +8,23 @@ import inspect from uuid import uuid4 from contextlib import contextmanager -from openpype.client import get_assets +import pyblish.logic +import pyblish.api + +from openpype.client import get_assets, get_asset_by_name from openpype.settings import ( get_system_settings, get_project_settings ) +from openpype.lib.attribute_definitions import ( + UnknownDef, + serialize_attr_defs, + deserialize_attr_defs, + get_default_values, +) from openpype.host import IPublishHost from openpype.pipeline import legacy_io -from openpype.pipeline.mongodb import ( - AvalonMongoDB, - session_data_from_environment, -) +from openpype.pipeline.plugin_discover import DiscoverResult from .creator_plugins import ( Creator, @@ -28,6 +34,7 @@ from .creator_plugins import ( CreatorError, ) +# Changes of instances and context are send as tuple of 2 information UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) @@ -177,6 +184,319 @@ def prepare_failed_creator_operation_info( } +_EMPTY_VALUE = object() + + +class TrackChangesItem(object): + """Helper object to track changes in data. + + Has access to full old and new data and will create deep copy of them, + so it is not needed to create copy before passed in. + + Can work as a dictionary if old or new value is a dictionary. In + that case received object is another object of 'TrackChangesItem'. + + Goal is to be able to get old or new value as was or only changed values + or get information about removed/changed keys, and all of that on + any "dictionary level". + + ``` + # Example of possible usages + >>> old_value = { + ... "key_1": "value_1", + ... "key_2": { + ... "key_sub_1": 1, + ... "key_sub_2": { + ... "enabled": True + ... } + ... }, + ... "key_3": "value_2" + ... } + >>> new_value = { + ... "key_1": "value_1", + ... "key_2": { + ... "key_sub_2": { + ... "enabled": False + ... }, + ... "key_sub_3": 3 + ... }, + ... "key_3": "value_3" + ... } + + >>> changes = TrackChangesItem(old_value, new_value) + >>> changes.changed + True + + >>> changes["key_2"]["key_sub_1"].new_value is None + True + + >>> list(sorted(changes.changed_keys)) + ['key_2', 'key_3'] + + >>> changes["key_2"]["key_sub_2"]["enabled"].changed + True + + >>> changes["key_2"].removed_keys + {'key_sub_1'} + + >>> list(sorted(changes["key_2"].available_keys)) + ['key_sub_1', 'key_sub_2', 'key_sub_3'] + + >>> changes.new_value == new_value + True + + # Get only changed values + only_changed_new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + ``` + + Args: + old_value (Any): Old value. + new_value (Any): New value. + """ + + def __init__(self, old_value, new_value): + self._changed = old_value != new_value + # Resolve if value is '_EMPTY_VALUE' after comparison of the values + if old_value is _EMPTY_VALUE: + old_value = None + if new_value is _EMPTY_VALUE: + new_value = None + self._old_value = copy.deepcopy(old_value) + self._new_value = copy.deepcopy(new_value) + + self._old_is_dict = isinstance(old_value, dict) + self._new_is_dict = isinstance(new_value, dict) + + self._old_keys = None + self._new_keys = None + self._available_keys = None + self._removed_keys = None + + self._changed_keys = None + + self._sub_items = None + + def __getitem__(self, key): + """Getter looks into subitems if object is dictionary.""" + + if self._sub_items is None: + self._prepare_sub_items() + return self._sub_items[key] + + def __bool__(self): + """Boolean of object is if old and new value are the same.""" + + return self._changed + + def get(self, key, default=None): + """Try to get sub item.""" + + if self._sub_items is None: + self._prepare_sub_items() + return self._sub_items.get(key, default) + + @property + def old_value(self): + """Get copy of old value. + + Returns: + Any: Whatever old value was. + """ + + return copy.deepcopy(self._old_value) + + @property + def new_value(self): + """Get copy of new value. + + Returns: + Any: Whatever new value was. + """ + + return copy.deepcopy(self._new_value) + + @property + def changed(self): + """Value changed. + + Returns: + bool: If data changed. + """ + + return self._changed + + @property + def is_dict(self): + """Object can be used as dictionary. + + Returns: + bool: When can be used that way. + """ + + return self._old_is_dict or self._new_is_dict + + @property + def changes(self): + """Get changes in raw data. + + This method should be used only if 'is_dict' value is 'True'. + + Returns: + Dict[str, Tuple[Any, Any]]: Changes are by key in tuple + (, ). If 'is_dict' is 'False' then + output is always empty dictionary. + """ + + output = {} + if not self.is_dict: + return output + + old_value = self.old_value + new_value = self.new_value + for key in self.changed_keys: + _old = None + _new = None + if self._old_is_dict: + _old = old_value.get(key) + if self._new_is_dict: + _new = new_value.get(key) + output[key] = (_old, _new) + return output + + # Methods/properties that can be used when 'is_dict' is 'True' + @property + def old_keys(self): + """Keys from old value. + + Empty set is returned if old value is not a dict. + + Returns: + Set[str]: Keys from old value. + """ + + if self._old_keys is None: + self._prepare_keys() + return set(self._old_keys) + + @property + def new_keys(self): + """Keys from new value. + + Empty set is returned if old value is not a dict. + + Returns: + Set[str]: Keys from new value. + """ + + if self._new_keys is None: + self._prepare_keys() + return set(self._new_keys) + + @property + def changed_keys(self): + """Keys that has changed from old to new value. + + Empty set is returned if both old and new value are not a dict. + + Returns: + Set[str]: Keys of changed keys. + """ + + if self._changed_keys is None: + self._prepare_sub_items() + return set(self._changed_keys) + + @property + def available_keys(self): + """All keys that are available in old and new value. + + Empty set is returned if both old and new value are not a dict. + Output is Union of 'old_keys' and 'new_keys'. + + Returns: + Set[str]: All keys from old and new value. + """ + + if self._available_keys is None: + self._prepare_keys() + return set(self._available_keys) + + @property + def removed_keys(self): + """Key that are not available in new value but were in old value. + + Returns: + Set[str]: All removed keys. + """ + + if self._removed_keys is None: + self._prepare_sub_items() + return set(self._removed_keys) + + def _prepare_keys(self): + old_keys = set() + new_keys = set() + if self._old_is_dict and self._new_is_dict: + old_keys = set(self._old_value.keys()) + new_keys = set(self._new_value.keys()) + + elif self._old_is_dict: + old_keys = set(self._old_value.keys()) + + elif self._new_is_dict: + new_keys = set(self._new_value.keys()) + + self._old_keys = old_keys + self._new_keys = new_keys + self._available_keys = old_keys | new_keys + self._removed_keys = old_keys - new_keys + + def _prepare_sub_items(self): + sub_items = {} + changed_keys = set() + + old_keys = self.old_keys + new_keys = self.new_keys + new_value = self.new_value + old_value = self.old_value + if self._old_is_dict and self._new_is_dict: + for key in self.available_keys: + item = TrackChangesItem( + old_value.get(key), new_value.get(key) + ) + sub_items[key] = item + if item.changed or key not in old_keys or key not in new_keys: + changed_keys.add(key) + + elif self._old_is_dict: + old_keys = set(old_value.keys()) + available_keys = set(old_keys) + changed_keys = set(available_keys) + for key in available_keys: + # NOTE Use '_EMPTY_VALUE' because old value could be 'None' + # which would result in "unchanged" item + sub_items[key] = TrackChangesItem( + old_value.get(key), _EMPTY_VALUE + ) + + elif self._new_is_dict: + new_keys = set(new_value.keys()) + available_keys = set(new_keys) + changed_keys = set(available_keys) + for key in available_keys: + # NOTE Use '_EMPTY_VALUE' because new value could be 'None' + # which would result in "unchanged" item + sub_items[key] = TrackChangesItem( + _EMPTY_VALUE, new_value.get(key) + ) + + self._sub_items = sub_items + self._changed_keys = changed_keys + + class InstanceMember: """Representation of instance member. @@ -208,14 +528,12 @@ class AttributeValues(object): Has dictionary like methods. Not all of them are allowed all the time. Args: - attr_defs(AbtractAttrDef): Defintions of value type and properties. + attr_defs(AbstractAttrDef): Defintions of value type and properties. values(dict): Values after possible conversion. origin_data(dict): Values loaded from host before conversion. """ def __init__(self, attr_defs, values, origin_data=None): - from openpype.lib.attribute_definitions import UnknownDef - if origin_data is None: origin_data = copy.deepcopy(values) self._origin_data = origin_data @@ -288,11 +606,25 @@ class AttributeValues(object): @property def attr_defs(self): - """Pointer to attribute definitions.""" - return self._attr_defs + """Pointer to attribute definitions. + + Returns: + List[AbstractAttrDef]: Attribute definitions. + """ + + return list(self._attr_defs) + + @property + def origin_data(self): + return copy.deepcopy(self._origin_data) def data_to_store(self): - """Create new dictionary with data to store.""" + """Create new dictionary with data to store. + + Returns: + Dict[str, Any]: Attribute values that should be stored. + """ + output = {} for key in self._data: output[key] = self[key] @@ -302,28 +634,14 @@ class AttributeValues(object): output[key] = attr_def.default return output - @staticmethod - def calculate_changes(new_data, old_data): - """Calculate changes of 2 dictionary objects.""" - changes = {} - for key, new_value in new_data.items(): - old_value = old_data.get(key) - if old_value != new_value: - changes[key] = (old_value, new_value) - return changes + def get_serialized_attr_defs(self): + """Serialize attribute definitions to json serializable types. - def changes(self): - return self.calculate_changes(self._data, self._origin_data) + Returns: + List[Dict[str, Any]]: Serialized attribute definitions. + """ - def apply_changes(self, changes): - for key, item in changes.items(): - old_value, new_value = item - if new_value is None: - if key in self: - self.pop(key) - - elif self.get(key) != new_value: - self[key] = new_value + return serialize_attr_defs(self._attr_defs) class CreatorAttributeValues(AttributeValues): @@ -362,13 +680,14 @@ class PublishAttributes: """Wrapper for publish plugin attribute definitions. Cares about handling attribute definitions of multiple publish plugins. + Keep information about attribute definitions and their values. Args: parent(CreatedInstance, CreateContext): Parent for which will be data stored and from which are data loaded. origin_data(dict): Loaded data by plugin class name. - attr_plugins(list): List of publish plugins that may have defined - attribute definitions. + attr_plugins(Union[List[pyblish.api.Plugin], None]): List of publish + plugins that may have defined attribute definitions. """ def __init__(self, parent, origin_data, attr_plugins=None): @@ -442,36 +761,9 @@ class PublishAttributes: output[key] = attr_value.data_to_store() return output - def changes(self): - """Return changes per each key.""" - - changes = {} - for key, attr_val in self._data.items(): - attr_changes = attr_val.changes() - if attr_changes: - if key not in changes: - changes[key] = {} - changes[key].update(attr_val) - - for key, value in self._origin_data.items(): - if key not in self._data: - changes[key] = (value, None) - return changes - - def apply_changes(self, changes): - for key, item in changes.items(): - if isinstance(item, dict): - self._data[key].apply_changes(item) - continue - - old_value, new_value = item - if new_value is not None: - raise ValueError( - "Unexpected type \"{}\" expected None".format( - str(type(new_value)) - ) - ) - self.pop(key) + @property + def origin_data(self): + return copy.deepcopy(self._origin_data) def set_publish_plugins(self, attr_plugins): """Set publish plugins attribute definitions.""" @@ -509,6 +801,42 @@ class PublishAttributes: self, [], value, value ) + def serialize_attributes(self): + return { + "attr_defs": { + plugin_name: attrs_value.get_serialized_attr_defs() + for plugin_name, attrs_value in self._data.items() + }, + "plugin_names_order": self._plugin_names_order, + "missing_plugins": self._missing_plugins + } + + def deserialize_attributes(self, data): + self._plugin_names_order = data["plugin_names_order"] + self._missing_plugins = data["missing_plugins"] + + attr_defs = deserialize_attr_defs(data["attr_defs"]) + + origin_data = self._origin_data + data = self._data + self._data = {} + + added_keys = set() + for plugin_name, attr_defs_data in attr_defs.items(): + attr_defs = deserialize_attr_defs(attr_defs_data) + value = data.get(plugin_name) or {} + orig_value = copy.deepcopy(origin_data.get(plugin_name) or {}) + self._data[plugin_name] = PublishAttributeValues( + self, attr_defs, value, orig_value + ) + + for key, value in data.items(): + if key not in added_keys: + self._missing_plugins.append(key) + self._data[key] = PublishAttributeValues( + self, [], value, value + ) + class CreatedInstance: """Instance entity with data that will be stored to workfile. @@ -517,15 +845,22 @@ class CreatedInstance: about instance like "asset" and "task" and all data used for filling subset name as creators may have custom data for subset name filling. + Notes: + Object have 2 possible initialization. One using 'creator' object which + is recommended for api usage. Second by passing information about + creator. + Args: - family(str): Name of family that will be created. - subset_name(str): Name of subset that will be created. - data(dict): Data used for filling subset name or override data from - already existing instance. - creator(BaseCreator): Creator responsible for instance. - host(ModuleType): Host implementation loaded with - `openpype.pipeline.registered_host`. - new(bool): Is instance new. + family (str): Name of family that will be created. + subset_name (str): Name of subset that will be created. + data (Dict[str, Any]): Data used for filling subset name or override + data from already existing instance. + creator (Union[BaseCreator, None]): Creator responsible for instance. + creator_identifier (str): Identifier of creator plugin. + creator_label (str): Creator plugin label. + group_label (str): Default group label from creator plugin. + creator_attr_defs (List[AbstractAttrDef]): Attribute definitions from + creator. """ # Keys that can't be changed or removed from data after loading using @@ -542,9 +877,24 @@ class CreatedInstance: ) def __init__( - self, family, subset_name, data, creator, new=True + self, + family, + subset_name, + data, + creator=None, + creator_identifier=None, + creator_label=None, + group_label=None, + creator_attr_defs=None, ): - self.creator = creator + if creator is not None: + creator_identifier = creator.identifier + group_label = creator.get_group_label() + creator_label = creator.label + creator_attr_defs = creator.get_instance_attr_defs() + + self._creator_label = creator_label + self._group_label = group_label or creator_identifier # Instance members may have actions on them # TODO implement members logic @@ -574,7 +924,7 @@ class CreatedInstance: self._data["family"] = family self._data["subset"] = subset_name self._data["active"] = data.get("active", True) - self._data["creator_identifier"] = creator.identifier + self._data["creator_identifier"] = creator_identifier # Pop from source data all keys that are defined in `_data` before # this moment and through their values away @@ -588,10 +938,12 @@ class CreatedInstance: # Stored creator specific attribute values # {key: value} creator_values = copy.deepcopy(orig_creator_attributes) - creator_attr_defs = creator.get_instance_attr_defs() self._data["creator_attributes"] = CreatorAttributeValues( - self, creator_attr_defs, creator_values, orig_creator_attributes + self, + list(creator_attr_defs), + creator_values, + orig_creator_attributes ) # Stored publish specific attribute values @@ -676,64 +1028,27 @@ class CreatedInstance: label = self._data.get("group") if label: return label - return self.creator.get_group_label() + return self._group_label + + @property + def origin_data(self): + return copy.deepcopy(self._orig_data) @property def creator_identifier(self): - return self.creator.identifier + return self._data["creator_identifier"] @property def creator_label(self): - return self.creator.label or self.creator_identifier - - @property - def create_context(self): - return self.creator.create_context - - @property - def host(self): - return self.create_context.host - - @property - def has_set_asset(self): - """Asset name is set in data.""" - return "asset" in self._data - - @property - def has_set_task(self): - """Task name is set in data.""" - return "task" in self._data - - @property - def has_valid_context(self): - """Context data are valid for publishing.""" - return self.has_valid_asset and self.has_valid_task - - @property - def has_valid_asset(self): - """Asset set in context exists in project.""" - if not self.has_set_asset: - return False - return self._asset_is_valid - - @property - def has_valid_task(self): - """Task set in context exists in project.""" - if not self.has_set_task: - return False - return self._task_is_valid - - def set_asset_invalid(self, invalid): - # TODO replace with `set_asset_name` - self._asset_is_valid = not invalid - - def set_task_invalid(self, invalid): - # TODO replace with `set_task_name` - self._task_is_valid = not invalid + return self._creator_label or self.creator_identifier @property def id(self): - """Instance identifier.""" + """Instance identifier. + + Returns: + str: UUID of instance. + """ return self._data["instance_id"] @@ -742,6 +1057,10 @@ class CreatedInstance: """Legacy access to data. Access to data is needed to modify values. + + Returns: + CreatedInstance: Object can be used as dictionary but with + validations of immutable keys. """ return self @@ -769,29 +1088,7 @@ class CreatedInstance: def changes(self): """Calculate and return changes.""" - changes = {} - new_keys = set() - for key, new_value in self._data.items(): - new_keys.add(key) - if key in ("creator_attributes", "publish_attributes"): - continue - - old_value = self._orig_data.get(key) - if old_value != new_value: - changes[key] = (old_value, new_value) - - creator_attr_changes = self.creator_attributes.changes() - if creator_attr_changes: - changes["creator_attributes"] = creator_attr_changes - - publish_attr_changes = self.publish_attributes.changes() - if publish_attr_changes: - changes["publish_attributes"] = publish_attr_changes - - for key, old_value in self._orig_data.items(): - if key not in new_keys: - changes[key] = (old_value, None) - return changes + return TrackChangesItem(self._orig_data, self.data_to_store()) def mark_as_stored(self): """Should be called when instance data are stored. @@ -818,6 +1115,12 @@ class CreatedInstance: @property def creator_attribute_defs(self): + """Attribute defintions defined by creator plugin. + + Returns: + List[AbstractAttrDef]: Attribute defitions. + """ + return self.creator_attributes.attr_defs @property @@ -829,7 +1132,7 @@ class CreatedInstance: It is possible to recreate the instance using these data. - Todo: + Todos: We probably don't need OrderedDict. When data are loaded they are not ordered anymore. @@ -850,7 +1153,15 @@ class CreatedInstance: @classmethod def from_existing(cls, instance_data, creator): - """Convert instance data from workfile to CreatedInstance.""" + """Convert instance data from workfile to CreatedInstance. + + Args: + instance_data (Dict[str, Any]): Data in a structure ready for + 'CreatedInstance' object. + creator (Creator): Creator plugin which is creating the instance + of for which the instance belong. + """ + instance_data = copy.deepcopy(instance_data) family = instance_data.get("family", None) @@ -859,26 +1170,49 @@ class CreatedInstance: subset_name = instance_data.get("subset", None) return cls( - family, subset_name, instance_data, creator, new=False + family, subset_name, instance_data, creator ) def set_publish_plugins(self, attr_plugins): + """Set publish plugins with attribute definitions. + + This method should be called only from 'CreateContext'. + + Args: + attr_plugins (List[pyblish.api.Plugin]): Pyblish plugins which + inherit from 'OpenPypePyblishPluginMixin' and may contain + attribute definitions. + """ + self.publish_attributes.set_publish_plugins(attr_plugins) def add_members(self, members): """Currently unused method.""" + for member in members: if member not in self._members: self._members.append(member) def serialize_for_remote(self): + """Serialize object into data to be possible recreated object. + + Returns: + Dict[str, Any]: Serialized data. + """ + + creator_attr_defs = self.creator_attributes.get_serialized_attr_defs() + publish_attributes = self.publish_attributes.serialize_attributes() return { "data": self.data_to_store(), - "orig_data": copy.deepcopy(self._orig_data) + "orig_data": copy.deepcopy(self._orig_data), + "creator_attr_defs": creator_attr_defs, + "publish_attributes": publish_attributes, + "creator_label": self._creator_label, + "group_label": self._group_label, } @classmethod - def deserialize_on_remote(cls, serialized_data, creator_items): + def deserialize_on_remote(cls, serialized_data): """Convert instance data to CreatedInstance. This is fake instance in remote process e.g. in UI process. The creator @@ -888,79 +1222,77 @@ class CreatedInstance: Args: serialized_data (Dict[str, Any]): Serialized data for remote recreating. Should contain 'data' and 'orig_data'. - creator_items (Dict[str, Any]): Mapping of creator identifier and - objects that behave like a creator for most of attribute - access. """ instance_data = copy.deepcopy(serialized_data["data"]) creator_identifier = instance_data["creator_identifier"] - creator_item = creator_items[creator_identifier] - family = instance_data.get("family", None) - if family is None: - family = creator_item.family + family = instance_data["family"] subset_name = instance_data.get("subset", None) + creator_label = serialized_data["creator_label"] + group_label = serialized_data["group_label"] + creator_attr_defs = deserialize_attr_defs( + serialized_data["creator_attr_defs"] + ) + publish_attributes = serialized_data["publish_attributes"] + obj = cls( - family, subset_name, instance_data, creator_item, new=False + family, + subset_name, + instance_data, + creator_identifier=creator_identifier, + creator_label=creator_label, + group_label=group_label, + creator_attributes=creator_attr_defs ) obj._orig_data = serialized_data["orig_data"] + obj.publish_attributes.deserialize_attributes(publish_attributes) return obj - def remote_changes(self): - """Prepare serializable changes on remote side. + # Context validation related methods/properties + @property + def has_set_asset(self): + """Asset name is set in data.""" - Returns: - Dict[str, Any]: Prepared changes that can be send to client side. - """ + return "asset" in self._data - return { - "changes": self.changes(), - "asset_is_valid": self._asset_is_valid, - "task_is_valid": self._task_is_valid, - } + @property + def has_set_task(self): + """Task name is set in data.""" - def update_from_remote(self, remote_changes): - """Apply changes from remote side on client side. + return "task" in self._data - Args: - remote_changes (Dict[str, Any]): Changes created on remote side. - """ + @property + def has_valid_context(self): + """Context data are valid for publishing.""" - self._asset_is_valid = remote_changes["asset_is_valid"] - self._task_is_valid = remote_changes["task_is_valid"] + return self.has_valid_asset and self.has_valid_task - changes = remote_changes["changes"] - creator_attributes = changes.pop("creator_attributes", None) or {} - publish_attributes = changes.pop("publish_attributes", None) or {} - if changes: - self.apply_changes(changes) + @property + def has_valid_asset(self): + """Asset set in context exists in project.""" - if creator_attributes: - self.creator_attributes.apply_changes(creator_attributes) + if not self.has_set_asset: + return False + return self._asset_is_valid - if publish_attributes: - self.publish_attributes.apply_changes(publish_attributes) + @property + def has_valid_task(self): + """Task set in context exists in project.""" - def apply_changes(self, changes): - """Apply changes created via 'changes'. + if not self.has_set_task: + return False + return self._task_is_valid - Args: - Dict[str, Tuple[Any, Any]]: Instance changes to apply. Same values - are kept untouched. - """ + def set_asset_invalid(self, invalid): + # TODO replace with `set_asset_name` + self._asset_is_valid = not invalid - for key, item in changes.items(): - old_value, new_value = item - if new_value is None: - if key in self: - self.pop(key) - else: - current_value = self.get(key) - if current_value != new_value: - self[key] = new_value + def set_task_invalid(self, invalid): + # TODO replace with `set_task_name` + self._task_is_valid = not invalid class ConvertorItem(object): @@ -1000,11 +1332,13 @@ class CreateContext: Context itself also can store data related to whole creation (workfile). - those are mainly for Context publish plugins + Todos: + Don't use 'AvalonMongoDB'. It's used only to keep track about current + context which should be handled by host. + Args: host(ModuleType): Host implementation which handles implementation and global metadata. - dbcon(AvalonMongoDB): Connection to mongo with context (at least - project). headless(bool): Context is created out of UI (Current not used). reset(bool): Reset context on initialization. discover_publish_plugins(bool): Discover publish plugins during reset @@ -1012,16 +1346,8 @@ class CreateContext: """ def __init__( - self, host, dbcon=None, headless=False, reset=True, - discover_publish_plugins=True + self, host, headless=False, reset=True, discover_publish_plugins=True ): - # Create conncetion if is not passed - if dbcon is None: - session = session_data_from_environment(True) - dbcon = AvalonMongoDB(session) - dbcon.install() - - self.dbcon = dbcon self.host = host # Prepare attribute for logger (Created on demand in `log` property) @@ -1045,6 +1371,10 @@ class CreateContext: " Missing methods: {}" ).format(joined_methods)) + self._current_project_name = None + self._current_asset_name = None + self._current_task_name = None + self._host_is_valid = host_is_valid # Currently unused variable self.headless = headless @@ -1052,6 +1382,8 @@ class CreateContext: # Instances by their ID self._instances_by_id = {} + self.creator_discover_result = None + self.convertor_discover_result = None # Discovered creators self.creators = {} # Prepare categories of creators @@ -1097,6 +1429,53 @@ class CreateContext: """Access to global publish attributes.""" return self._publish_attributes + def get_sorted_creators(self, identifiers=None): + """Sorted creators by 'order' attribute. + + Args: + identifiers (Iterable[str]): Filter creators by identifiers. All + creators are returned if 'None' is passed. + + Returns: + List[BaseCreator]: Sorted creator plugins by 'order' value. + """ + + if identifiers is not None: + identifiers = set(identifiers) + creators = [ + creator + for identifier, creator in self.creators.items() + if identifier in identifiers + ] + else: + creators = self.creators.values() + + return sorted( + creators, key=lambda creator: creator.order + ) + + @property + def sorted_creators(self): + """Sorted creators by 'order' attribute. + + Returns: + List[BaseCreator]: Sorted creator plugins by 'order' value. + """ + + return self.get_sorted_creators() + + @property + def sorted_autocreators(self): + """Sorted auto-creators by 'order' attribute. + + Returns: + List[AutoCreator]: Sorted plugins by 'order' value. + """ + + return sorted( + self.autocreators.values(), key=lambda creator: creator.order + ) + @classmethod def get_host_misssing_methods(cls, host): """Collect missing methods from host. @@ -1117,11 +1496,20 @@ class CreateContext: @property def host_name(self): + if hasattr(self.host, "name"): + return self.host.name return os.environ["AVALON_APP"] - @property - def project_name(self): - return self.dbcon.active_project() + def get_current_project_name(self): + return self._current_project_name + + def get_current_asset_name(self): + return self._current_asset_name + + def get_current_task_name(self): + return self._current_task_name + + project_name = property(get_current_project_name) @property def log(self): @@ -1138,7 +1526,7 @@ class CreateContext: self.reset_preparation() - self.reset_avalon_context() + self.reset_current_context() self.reset_plugins(discover_publish_plugins) self.reset_context_data() @@ -1185,14 +1573,22 @@ class CreateContext: self._collection_shared_data = None self.refresh_thumbnails() - def reset_avalon_context(self): - """Give ability to reset avalon context. + def reset_current_context(self): + """Refresh current context. Reset is based on optional host implementation of `get_current_context` function or using `legacy_io.Session`. Some hosts have ability to change context file without using workfiles - tool but that change is not propagated to + tool but that change is not propagated to 'legacy_io.Session' + nor 'os.environ'. + + Todos: + UI: Current context should be also checked on save - compare + initial values vs. current values. + Related to UI checks: Current workfile can be also considered + as current context information as that's where the metadata + are stored. We should store the workfile (if is available) too. """ project_name = asset_name = task_name = None @@ -1210,12 +1606,9 @@ class CreateContext: if not task_name: task_name = legacy_io.Session.get("AVALON_TASK") - if project_name: - self.dbcon.Session["AVALON_PROJECT"] = project_name - if asset_name: - self.dbcon.Session["AVALON_ASSET"] = asset_name - if task_name: - self.dbcon.Session["AVALON_TASK"] = task_name + self._current_project_name = project_name + self._current_asset_name = asset_name + self._current_task_name = task_name def reset_plugins(self, discover_publish_plugins=True): """Reload plugins. @@ -1229,18 +1622,15 @@ class CreateContext: self._reset_convertor_plugins() def _reset_publish_plugins(self, discover_publish_plugins): - import pyblish.logic - from openpype.pipeline import OpenPypePyblishPluginMixin from openpype.pipeline.publish import ( - publish_plugins_discover, - DiscoverResult + publish_plugins_discover ) # Reset publish plugins self._attr_plugins_by_family = {} - discover_result = DiscoverResult() + discover_result = DiscoverResult(pyblish.api.Plugin) plugins_with_defs = [] plugins_by_targets = [] plugins_mismatch_targets = [] @@ -1279,7 +1669,9 @@ class CreateContext: creators = {} autocreators = {} manual_creators = {} - for creator_class in discover_creator_plugins(): + report = discover_creator_plugins(return_report=True) + self.creator_discover_result = report + for creator_class in report.plugins: if inspect.isabstract(creator_class): self.log.info( "Skipping abstract Creator {}".format(str(creator_class)) @@ -1324,7 +1716,9 @@ class CreateContext: def _reset_convertor_plugins(self): convertors_plugins = {} - for convertor_class in discover_convertor_plugins(): + report = discover_convertor_plugins(return_report=True) + self.convertor_discover_result = report + for convertor_class in report.plugins: if inspect.isabstract(convertor_class): self.log.info( "Skipping abstract Creator {}".format(str(convertor_class)) @@ -1375,11 +1769,10 @@ class CreateContext: def context_data_changes(self): """Changes of attributes.""" - changes = {} - publish_attribute_changes = self._publish_attributes.changes() - if publish_attribute_changes: - changes["publish_attributes"] = publish_attribute_changes - return changes + + return TrackChangesItem( + self._original_context_data, self.context_data_to_store() + ) def creator_adds_instance(self, instance): """Creator adds new instance to context. @@ -1402,7 +1795,7 @@ class CreateContext: self._instances_by_id[instance.id] = instance # Prepare publish plugin attributes and set it on instance attr_plugins = self._get_publish_plugins_with_attr_for_family( - instance.creator.family + instance.family ) instance.set_publish_plugins(attr_plugins) @@ -1411,40 +1804,128 @@ class CreateContext: with self.bulk_instances_collection(): self._bulk_instances_to_process.append(instance) - def create(self, identifier, *args, **kwargs): - """Wrapper for creators to trigger created. + def _get_creator_in_create(self, identifier): + """Creator by identifier with unified error. - Different types of creators may expect different arguments thus the - hints for args are blind. + Helper method to get creator by identifier with same error when creator + is not available. Args: - identifier (str): Creator's identifier. - *args (Tuple[Any]): Arguments for create method. - **kwargs (Dict[Any, Any]): Keyword argument for create method. + identifier (str): Identifier of creator plugin. + + Returns: + BaseCreator: Creator found by identifier. + + Raises: + CreatorError: When identifier is not known. """ - error_message = "Failed to run Creator with identifier \"{}\". {}" creator = self.creators.get(identifier) - label = getattr(creator, "label", None) - failed = False - add_traceback = False - exc_info = None - try: - # Fake CreatorError (Could be maybe specific exception?) - if creator is None: + # Fake CreatorError (Could be maybe specific exception?) + if creator is None: + raise CreatorError( + "Creator {} was not found".format(identifier) + ) + return creator + + def create( + self, + creator_identifier, + variant, + asset_doc=None, + task_name=None, + pre_create_data=None + ): + """Trigger create of plugins with standartized arguments. + + Arguments 'asset_doc' and 'task_name' use current context as default + values. If only 'task_name' is provided it will be overriden by + task name from current context. If 'task_name' is not provided + when 'asset_doc' is, it is considered that task name is not specified, + which can lead to error if subset name template requires task name. + + Args: + creator_identifier (str): Identifier of creator plugin. + variant (str): Variant used for subset name. + asset_doc (Dict[str, Any]): Asset document which define context of + creation (possible context of created instance/s). + task_name (str): Name of task to which is context related. + pre_create_data (Dict[str, Any]): Pre-create attribute values. + + Returns: + Any: Output of triggered creator's 'create' method. + + Raises: + CreatorError: If creator was not found or asset is empty. + """ + + creator = self._get_creator_in_create(creator_identifier) + + project_name = self.project_name + if asset_doc is None: + asset_name = self.get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name) + task_name = self.get_current_task_name() + if asset_doc is None: raise CreatorError( - "Creator {} was not found".format(identifier) + "Asset with name {} was not found".format(asset_name) ) - creator.create(*args, **kwargs) + if pre_create_data is None: + pre_create_data = {} + + precreate_attr_defs = creator.get_pre_create_attr_defs() or [] + # Create default values of precreate data + _pre_create_data = get_default_values(precreate_attr_defs) + # Update passed precreate data to default values + # TODO validate types + _pre_create_data.update(pre_create_data) + + subset_name = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + self.host_name + ) + instance_data = { + "asset": asset_doc["name"], + "task": task_name, + "family": creator.family, + "variant": variant + } + return creator.create( + subset_name, + instance_data, + _pre_create_data + ) + + def _create_with_unified_error( + self, identifier, creator, *args, **kwargs + ): + error_message = "Failed to run Creator with identifier \"{}\". {}" + + label = None + add_traceback = False + result = None + fail_info = None + success = False + + try: + # Try to get creator and his label + if creator is None: + creator = self._get_creator_in_create(identifier) + label = getattr(creator, "label", label) + + # Run create + result = creator.create(*args, **kwargs) + success = True except CreatorError: - failed = True exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) except: - failed = True add_traceback = True exc_info = sys.exc_info() self.log.warning( @@ -1452,12 +1933,38 @@ class CreateContext: exc_info=True ) - if failed: - raise CreatorsCreateFailed([ - prepare_failed_creator_operation_info( - identifier, label, exc_info, add_traceback - ) - ]) + if not success: + fail_info = prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + return result, fail_info + + def create_with_unified_error(self, identifier, *args, **kwargs): + """Trigger create but raise only one error if anything fails. + + Added to raise unified exception. Capture any possible issues and + reraise it with unified information. + + Args: + identifier (str): Identifier of creator. + *args (Tuple[Any]): Arguments for create method. + **kwargs (Dict[Any, Any]): Keyword argument for create method. + + Raises: + CreatorsCreateFailed: When creation fails due to any possible + reason. If anything goes wrong this is only possible exception + the method should raise. + """ + + result, fail_info = self._create_with_unified_error( + identifier, None, *args, **kwargs + ) + if fail_info is not None: + raise CreatorsCreateFailed([fail_info]) + return result + + def _remove_instance(self, instance): + self._instances_by_id.pop(instance.id, None) def creator_removed_instance(self, instance): """When creator removes instance context should be acknowledged. @@ -1470,7 +1977,7 @@ class CreateContext: from scene metadata. """ - self._instances_by_id.pop(instance.id, None) + self._remove_instance(instance) def add_convertor_item(self, convertor_identifier, label): self.convertor_items_by_id[convertor_identifier] = ConvertorItem( @@ -1514,7 +2021,7 @@ class CreateContext: # Collect instances error_message = "Collection of instances for creator {} failed. {}" failed_info = [] - for creator in self.creators.values(): + for creator in self.sorted_creators: label = creator.label identifier = creator.identifier failed = False @@ -1584,37 +2091,12 @@ class CreateContext: Reset instances if any autocreator executed properly. """ - error_message = "Failed to run AutoCreator with identifier \"{}\". {}" failed_info = [] - for identifier, creator in self.autocreators.items(): - label = creator.label - failed = False - add_traceback = False - try: - creator.create() - - except CreatorError: - failed = True - exc_info = sys.exc_info() - self.log.warning(error_message.format(identifier, exc_info[1])) - - # Use bare except because some hosts raise their exceptions that - # do not inherit from python's `BaseException` - except: - failed = True - add_traceback = True - exc_info = sys.exc_info() - self.log.warning( - error_message.format(identifier, ""), - exc_info=True - ) - - if failed: - failed_info.append( - prepare_failed_creator_operation_info( - identifier, label, exc_info, add_traceback - ) - ) + for creator in self.sorted_autocreators: + identifier = creator.identifier + _, fail_info = self._create_with_unified_error(identifier, creator) + if fail_info is not None: + failed_info.append(fail_info) if failed_info: raise CreatorsCreateFailed(failed_info) @@ -1691,19 +2173,26 @@ class CreateContext: """Save instance specific values.""" instances_by_identifier = collections.defaultdict(list) for instance in self._instances_by_id.values(): + instance_changes = instance.changes() + if not instance_changes: + continue + identifier = instance.creator_identifier - instances_by_identifier[identifier].append(instance) + instances_by_identifier[identifier].append( + UpdateData(instance, instance_changes) + ) + + if not instances_by_identifier: + return error_message = "Instances update of creator \"{}\" failed. {}" failed_info = [] - for identifier, creator_instances in instances_by_identifier.items(): - update_list = [] - for instance in creator_instances: - instance_changes = instance.changes() - if instance_changes: - update_list.append(UpdateData(instance, instance_changes)) - creator = self.creators[identifier] + for creator in self.get_sorted_creators( + instances_by_identifier.keys() + ): + identifier = creator.identifier + update_list = instances_by_identifier[identifier] if not update_list: continue @@ -1739,9 +2228,13 @@ class CreateContext: def remove_instances(self, instances): """Remove instances from context. + All instances that don't have creator identifier leading to existing + creator are just removed from context. + Args: - instances(list): Instances that should be removed - from context. + instances(List[CreatedInstance]): Instances that should be removed. + Remove logic is done using creator, which may require to + do other cleanup than just remove instance from context. """ instances_by_identifier = collections.defaultdict(list) @@ -1749,10 +2242,21 @@ class CreateContext: identifier = instance.creator_identifier instances_by_identifier[identifier].append(instance) + # Just remove instances from context if creator is not available + missing_creators = set(instances_by_identifier) - set(self.creators) + for identifier in missing_creators: + for instance in instances_by_identifier[identifier]: + self._remove_instance(instance) + error_message = "Instances removement of creator \"{}\" failed. {}" failed_info = [] - for identifier, creator_instances in instances_by_identifier.items(): - creator = self.creators.get(identifier) + # Remove instances by creator plugin order + for creator in self.get_sorted_creators( + instances_by_identifier.keys() + ): + identifier = creator.identifier + creator_instances = instances_by_identifier[identifier] + label = creator.label failed = False add_traceback = False @@ -1795,6 +2299,7 @@ class CreateContext: family(str): Instance family for which should be attribute definitions returned. """ + if family not in self._attr_plugins_by_family: import pyblish.logic @@ -1810,7 +2315,13 @@ class CreateContext: return self._attr_plugins_by_family[family] def _get_publish_plugins_with_attr_for_context(self): - """Publish plugins attributes for Context plugins.""" + """Publish plugins attributes for Context plugins. + + Returns: + List[pyblish.api.Plugin]: Publish plugins that have attribute + definitions for context. + """ + plugins = [] for plugin in self.plugins_with_defs: if not plugin.__instanceEnabled__: @@ -1835,7 +2346,7 @@ class CreateContext: return self._collection_shared_data def run_convertor(self, convertor_identifier): - """Run convertor plugin by it's idenfitifier. + """Run convertor plugin by identifier. Conversion is skipped if convertor is not available. @@ -1848,7 +2359,7 @@ class CreateContext: convertor.convert() def run_convertors(self, convertor_identifiers): - """Run convertor plugins by idenfitifiers. + """Run convertor plugins by identifiers. Conversion is skipped if convertor is not available. It is recommended to trigger reset after conversion to reload instances. diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 8500dd1e22..628245faf2 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -107,7 +107,11 @@ class SubsetConvertorPlugin(object): @property def create_context(self): - """Quick access to create context.""" + """Quick access to create context. + + Returns: + CreateContext: Context which initialized the plugin. + """ return self._create_context @@ -149,6 +153,12 @@ class BaseCreator: Single object should be used for multiple instances instead of single instance per one creator object. Do not store temp data or mid-process data to `self` if it's not Plugin specific. + + Args: + project_settings (Dict[str, Any]): Project settings. + system_settings (Dict[str, Any]): System settings. + create_context (CreateContext): Context which initialized creator. + headless (bool): Running in headless mode. """ # Label shown in UI @@ -157,6 +167,10 @@ class BaseCreator: # Cached group label after first call 'get_group_label' _cached_group_label = None + # Order in which will be plugin executed (collect & update instances) + # less == earlier -> Order '90' will be processed before '100' + order = 100 + # Variable to store logger _log = None @@ -425,8 +439,8 @@ class BaseCreator: keys/values when plugin attributes change. Returns: - List[AbtractAttrDef]: Attribute definitions that can be tweaked for - created instance. + List[AbstractAttrDef]: Attribute definitions that can be tweaked + for created instance. """ return self.instance_attr_defs @@ -489,6 +503,17 @@ class Creator(BaseCreator): # - similar to instance attribute definitions pre_create_attr_defs = [] + @property + def show_order(self): + """Order in which is creator shown in UI. + + Returns: + int: Order in which is creator shown (less == earlier). By default + is using Creator's 'order' or processing. + """ + + return self.order + @abstractmethod def create(self, subset_name, instance_data, pre_create_data): """Create new instance and store it. @@ -563,8 +588,8 @@ class Creator(BaseCreator): updating keys/values when plugin attributes change. Returns: - List[AbtractAttrDef]: Attribute definitions that can be tweaked for - created instance. + List[AbstractAttrDef]: Attribute definitions that can be tweaked + for created instance. """ return self.pre_create_attr_defs @@ -586,12 +611,12 @@ class AutoCreator(BaseCreator): pass -def discover_creator_plugins(): - return discover(BaseCreator) +def discover_creator_plugins(*args, **kwargs): + return discover(BaseCreator, *args, **kwargs) -def discover_convertor_plugins(): - return discover(SubsetConvertorPlugin) +def discover_convertor_plugins(*args, **kwargs): + return discover(SubsetConvertorPlugin, *args, **kwargs) def discover_legacy_creator_plugins(): diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index b5e55834db..9b891a4da3 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -2,7 +2,10 @@ import os import logging from openpype.settings import get_system_settings, get_project_settings -from openpype.pipeline import legacy_io +from openpype.pipeline import ( + schema, + legacy_io, +) from openpype.pipeline.plugin_discover import ( discover, register_plugin, @@ -79,6 +82,45 @@ class LoaderPlugin(list): print(" - setting `{}`: `{}`".format(option, value)) setattr(cls, option, value) + @classmethod + def is_compatible_loader(cls, context): + """Return whether a loader is compatible with a context. + + This checks the version's families and the representation for the given + Loader. + + Returns: + bool + """ + + plugin_repre_names = cls.get_representations() + plugin_families = cls.families + if not plugin_repre_names or not plugin_families: + return False + + repre_doc = context.get("representation") + if not repre_doc: + return False + + plugin_repre_names = set(plugin_repre_names) + if ( + "*" not in plugin_repre_names + and repre_doc["name"] not in plugin_repre_names + ): + return False + + maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) + if maj_version < 3: + families = context["version"]["data"].get("families", []) + else: + families = context["subset"]["data"]["families"] + + plugin_families = set(plugin_families) + return ( + "*" in plugin_families + or any(family in plugin_families for family in families) + ) + @classmethod def get_representations(cls): return cls.representations diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index e2b3675115..fefdb8537b 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -28,7 +28,6 @@ from openpype.lib import ( TemplateUnsolved, ) from openpype.pipeline import ( - schema, legacy_io, Anatomy, ) @@ -643,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None): def path_from_config(): try: - version_, subset, asset, project = dbcon.parenthood(representation) + project_name = dbcon.active_project() + version_, subset, asset, project = get_representation_parents( + project_name, representation + ) except ValueError: log.debug( "Representation %s wasn't found in database, " @@ -748,25 +750,9 @@ def is_compatible_loader(Loader, context): Returns: bool - """ - maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) - if maj_version < 3: - families = context["version"]["data"].get("families", []) - else: - families = context["subset"]["data"]["families"] - representation = context["representation"] - has_family = ( - "*" in Loader.families or any( - family in Loader.families for family in families - ) - ) - representations = Loader.get_representations() - has_representation = ( - "*" in representations or representation["name"] in representations - ) - return has_family and has_representation + return Loader.is_compatible_loader(context) def loaders_from_repre_context(loaders, repre_context): diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py index 7edd9ac290..e5257b801a 100644 --- a/openpype/pipeline/plugin_discover.py +++ b/openpype/pipeline/plugin_discover.py @@ -135,11 +135,12 @@ class PluginDiscoverContext(object): allow_duplicates (bool): Validate class name duplications. ignore_classes (list): List of classes that will be ignored and not added to result. + return_report (bool): Output will be full report if set to 'True'. Returns: - DiscoverResult: Object holding succesfully discovered plugins, - ignored plugins, plugins with missing abstract implementation - and duplicated plugin. + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. """ if not ignore_classes: @@ -268,9 +269,34 @@ class _GlobalDiscover: return cls._context -def discover(superclass, allow_duplicates=True): +def discover( + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False +): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + return_report (bool): Output will be full report if set to 'True'. + + Returns: + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. + """ + context = _GlobalDiscover.get_context() - return context.discover(superclass, allow_duplicates) + return context.discover( + superclass, + allow_duplicates, + ignore_classes, + return_report + ) def get_last_discovered_plugins(superclass): diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index dc6fc0f97a..05ba1c9c33 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -25,7 +25,6 @@ from .publish_plugins import ( from .lib import ( get_publish_template_name, - DiscoverResult, publish_plugins_discover, load_help_content_from_plugin, load_help_content_from_filepath, @@ -36,6 +35,7 @@ from .lib import ( filter_instances_for_context_plugin, context_plugin_should_run, get_instance_staging_dir, + get_publish_repre_path, ) from .abstract_expected_files import ExpectedFiles @@ -68,7 +68,6 @@ __all__ = ( "get_publish_template_name", - "DiscoverResult", "publish_plugins_discover", "load_help_content_from_plugin", "load_help_content_from_filepath", @@ -79,6 +78,7 @@ __all__ = ( "filter_instances_for_context_plugin", "context_plugin_should_run", "get_instance_staging_dir", + "get_publish_repre_path", "ExpectedFiles", diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index c76671fa39..bbc511fc5a 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -10,11 +10,18 @@ import six import pyblish.plugin import pyblish.api -from openpype.lib import Logger, filter_profiles +from openpype.lib import ( + Logger, + filter_profiles +) from openpype.settings import ( get_project_settings, get_system_settings, ) +from openpype.pipeline import ( + tempdir +) +from openpype.pipeline.plugin_discover import DiscoverResult from .contants import ( DEFAULT_PUBLISH_TEMPLATE, @@ -196,28 +203,6 @@ def get_publish_template_name( return template or default_template -class DiscoverResult: - """Hold result of publish plugins discovery. - - Stores discovered plugins duplicated plugins and file paths which - crashed on execution of file. - """ - def __init__(self): - self.plugins = [] - self.crashed_file_paths = {} - self.duplicated_plugins = [] - - def __iter__(self): - for plugin in self.plugins: - yield plugin - - def __getitem__(self, item): - return self.plugins[item] - - def __setitem__(self, item, value): - self.plugins[item] = value - - class HelpContent: def __init__(self, title, description, detail=None): self.title = title @@ -285,7 +270,7 @@ def publish_plugins_discover(paths=None): """ # The only difference with `pyblish.api.discover` - result = DiscoverResult() + result = DiscoverResult(pyblish.api.Plugin) plugins = dict() plugin_names = [] @@ -595,7 +580,7 @@ def context_plugin_should_run(plugin, context): Args: plugin (pyblish.api.Plugin): Plugin with filters. - context (pyblish.api.Context): Pyblish context with insances. + context (pyblish.api.Context): Pyblish context with instances. Returns: bool: Context plugin should run based on valid instances. @@ -609,12 +594,21 @@ def context_plugin_should_run(plugin, context): def get_instance_staging_dir(instance): """Unified way how staging dir is stored and created on instances. - First check if 'stagingDir' is already set in instance data. If there is - not create new in tempdir. + First check if 'stagingDir' is already set in instance data. + In case there already is new tempdir will not be created. + + It also supports `OPENPYPE_TMPDIR`, so studio can define own temp + shared repository per project or even per more granular context. + Template formatting is supported also with optional keys. Folder is + created in case it doesn't exists. + + Available anatomy formatting keys: + - root[work | ] + - project[name | code] Note: - Staging dir does not have to be necessarily in tempdir so be carefull - about it's usage. + Staging dir does not have to be necessarily in tempdir so be careful + about its usage. Args: instance (pyblish.lib.Instance): Instance for which we want to get @@ -623,12 +617,73 @@ def get_instance_staging_dir(instance): Returns: str: Path to staging dir of instance. """ + staging_dir = instance.data.get('stagingDir') + if staging_dir: + return staging_dir - staging_dir = instance.data.get("stagingDir") - if not staging_dir: + anatomy = instance.context.data.get("anatomy") + + # get customized tempdir path from `OPENPYPE_TMPDIR` env var + custom_temp_dir = tempdir.create_custom_tempdir( + anatomy.project_name, anatomy) + + if custom_temp_dir: + staging_dir = os.path.normpath( + tempfile.mkdtemp( + prefix="pyblish_tmp_", + dir=custom_temp_dir + ) + ) + else: staging_dir = os.path.normpath( tempfile.mkdtemp(prefix="pyblish_tmp_") ) - instance.data["stagingDir"] = staging_dir + instance.data['stagingDir'] = staging_dir return staging_dir + + +def get_publish_repre_path(instance, repre, only_published=False): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = get_instance_staging_dir(instance) + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index 5ba3ded475..e2ae893aa9 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -118,7 +118,7 @@ class OpenPypePyblishPluginMixin: Attributes available for all families in plugin's `families` attribute. Returns: - list: Attribute definitions for plugin. + list: Attribute definitions for plugin. """ return [] diff --git a/openpype/pipeline/tempdir.py b/openpype/pipeline/tempdir.py new file mode 100644 index 0000000000..55a1346b08 --- /dev/null +++ b/openpype/pipeline/tempdir.py @@ -0,0 +1,59 @@ +""" +Temporary folder operations +""" + +import os +from openpype.lib import StringTemplate +from openpype.pipeline import Anatomy + + +def create_custom_tempdir(project_name, anatomy=None): + """ Create custom tempdir + + Template path formatting is supporting: + - optional key formatting + - available keys: + - root[work | ] + - project[name | code] + + Args: + project_name (str): project name + anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object + + Returns: + str | None: formatted path or None + """ + openpype_tempdir = os.getenv("OPENPYPE_TMPDIR") + if not openpype_tempdir: + return + + custom_tempdir = None + if "{" in openpype_tempdir: + if anatomy is None: + anatomy = Anatomy(project_name) + # create base formate data + data = { + "root": anatomy.roots, + "project": { + "name": anatomy.project_name, + "code": anatomy.project_code, + } + } + # path is anatomy template + custom_tempdir = StringTemplate.format_template( + openpype_tempdir, data).normalized() + + else: + # path is absolute + custom_tempdir = openpype_tempdir + + # create the dir path if it doesn't exists + if not os.path.exists(custom_tempdir): + try: + # create it if it doesn't exists + os.makedirs(custom_tempdir) + except IOError as error: + raise IOError( + "Path couldn't be created: {}".format(error)) + + return custom_tempdir diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py index 1266c27fd7..119e4aaeb7 100644 --- a/openpype/pipeline/workfile/workfile_template_builder.py +++ b/openpype/pipeline/workfile/workfile_template_builder.py @@ -842,7 +842,8 @@ class PlaceholderPlugin(object): """Placeholder options for data showed. Returns: - List[AbtractAttrDef]: Attribute definitions of placeholder options. + List[AbstractAttrDef]: Attribute definitions of + placeholder options. """ return [] @@ -1143,7 +1144,7 @@ class PlaceholderLoadMixin(object): as defaults for attributes. Returns: - List[AbtractAttrDef]: Attribute definitions common for load + List[AbstractAttrDef]: Attribute definitions common for load plugins. """ @@ -1513,7 +1514,7 @@ class PlaceholderCreateMixin(object): as defaults for attributes. Returns: - List[AbtractAttrDef]: Attribute definitions common for create + List[AbstractAttrDef]: Attribute definitions common for create plugins. """ diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index ac931e41db..e31f746f51 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -34,12 +34,24 @@ class AddSyncSite(load.LoaderPlugin): return self._sync_server def load(self, context, name=None, namespace=None, data=None): - self.log.info("Adding {} to representation: {}".format( - data["site_name"], data["_id"])) - family = context["representation"]["context"]["family"] - project_name = data["project_name"] - repre_id = data["_id"] + """"Adds site skeleton information on representation_id + + Looks for loaded containers for workfile, adds them site skeleton too + (eg. they should be downloaded too). + Args: + context (dict): + name (str): + namespace (str): + data (dict): expects {"site_name": SITE_NAME_TO_ADD} + """ + # self.log wont propagate + project_name = context["project"]["name"] + repre_doc = context["representation"] + family = repre_doc["context"]["family"] + repre_id = repre_doc["_id"] site_name = data["site_name"] + print("Adding {} to representation: {}".format( + data["site_name"], repre_id)) self.sync_server.add_site(project_name, repre_id, site_name, force=True) @@ -52,6 +64,8 @@ class AddSyncSite(load.LoaderPlugin): ) for link_repre_id in links: try: + print("Adding {} to linked representation: {}".format( + data["site_name"], link_repre_id)) self.sync_server.add_site(project_name, link_repre_id, site_name, force=False) diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index c5f442b2f5..bea8b1b346 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -3,7 +3,10 @@ from openpype.pipeline import load class RemoveSyncSite(load.LoaderPlugin): - """Remove sync site and its files on representation""" + """Remove sync site and its files on representation. + + Removes files only on local site! + """ representations = ["*"] families = ["*"] @@ -24,13 +27,18 @@ class RemoveSyncSite(load.LoaderPlugin): return self._sync_server def load(self, context, name=None, namespace=None, data=None): - self.log.info("Removing {} on representation: {}".format( - data["site_name"], data["_id"])) - self.sync_server.remove_site(data["project_name"], - data["_id"], - data["site_name"], + project_name = context["project"]["name"] + repre_doc = context["representation"] + repre_id = repre_doc["_id"] + site_name = data["site_name"] + + print("Removing {} on representation: {}".format(site_name, repre_id)) + + self.sync_server.remove_site(project_name, + repre_id, + site_name, True) - self.log.debug("Site added.") + self.log.debug("Site removed.") def filepath_from_context(self, context): """No real file loading""" diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index d3398c885e..5fcf8feb56 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -32,7 +32,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): thumbnail_paths_by_instance_id.get(None) ) - project_name = create_context.project_name + project_name = create_context.get_current_project_name() if project_name: context.data["projectName"] = project_name @@ -53,11 +53,15 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): context.data.update(create_context.context_data_to_store()) context.data["newPublishing"] = True # Update context data - for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"): - value = create_context.dbcon.Session.get(key) - if value is not None: - legacy_io.Session[key] = value - os.environ[key] = value + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + for key, value in ( + ("AVALON_PROJECT", project_name), + ("AVALON_ASSET", asset_name), + ("AVALON_TASK", task_name) + ): + legacy_io.Session[key] = value + os.environ[key] = value def create_instance( self, diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index dcd80fbbdf..4a5f9f1cc2 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -61,7 +61,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "background", "effect", "staticMesh", - "skeletalMesh" + "skeletalMesh", + "xgen" ] def process(self, instance): diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py index 7b73943c37..b117006871 100644 --- a/openpype/plugins/publish/integrate.py +++ b/openpype/plugins/publish/integrate.py @@ -506,6 +506,43 @@ class IntegrateAsset(pyblish.api.InstancePlugin): return version_doc + def _validate_repre_files(self, files, is_sequence_representation): + """Validate representation files before transfer preparation. + + Check if files contain only filenames instead of full paths and check + if sequence don't contain more than one sequence or has remainders. + + Args: + files (Union[str, List[str]]): Files from representation. + is_sequence_representation (bool): Files are for sequence. + + Raises: + KnownPublishError: If validations don't pass. + """ + + if not files: + return + + if not is_sequence_representation: + files = [files] + + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") + + if not is_sequence_representation: + return + + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) + def prepare_representation(self, repre, template_name, existing_repres_by_name, @@ -587,7 +624,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): is_udim = bool(repre.get("udim")) # handle publish in place - if "originalDirname" in template: + if "{originalDirname}" in template: # store as originalDirname only original value without project root # if instance collected originalDirname is present, it should be # used for all represe @@ -606,24 +643,64 @@ class IntegrateAsset(pyblish.api.InstancePlugin): template_data["originalDirname"] = without_root is_sequence_representation = isinstance(files, (list, tuple)) - if is_sequence_representation: - # Collection of files (sequence) - if any(os.path.isabs(fname) for fname in files): - raise KnownPublishError("Given file names contain full paths") + self._validate_repre_files(files, is_sequence_representation) + # Output variables of conditions below: + # - transfers (List[Tuple[str, str]]): src -> dst filepaths to copy + # - repre_context (Dict[str, Any]): context data used to fill template + # - template_data (Dict[str, Any]): source data used to fill template + # - to add required data to 'repre_context' not used for + # formatting + # - anatomy_filled (Dict[str, Any]): filled anatomy of last file + # - to fill 'publishDir' on instance.data -> not ideal + + # Treat template with 'orignalBasename' in special way + if "{originalBasename}" in template: + # Remove 'frame' from template data + template_data.pop("frame", None) + + # Find out first frame string value + first_index_padded = None + if not is_udim and is_sequence_representation: + col = clique.assemble(files)[0][0] + sorted_frames = tuple(sorted(col.indexes)) + # First frame used for end value + first_frame = sorted_frames[0] + # Get last frame for padding + last_frame = sorted_frames[-1] + # Use padding from collection of length of last frame as string + padding = max(col.padding, len(str(last_frame))) + first_index_padded = get_frame_padded( + frame=first_frame, + padding=padding + ) + + # Convert files to list for single file as remaining part is only + # transfers creation (iteration over files) + if not is_sequence_representation: + files = [files] + + repre_context = None + transfers = [] + for src_file_name in files: + template_data["originalBasename"], _ = os.path.splitext( + src_file_name) + + anatomy_filled = anatomy.format(template_data) + dst = anatomy_filled[template_name]["path"] + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + if repre_context is None: + repre_context = dst.used_values + + if not is_udim and first_index_padded is not None: + repre_context["frame"] = first_index_padded + + elif is_sequence_representation: + # Collection of files (sequence) src_collections, remainders = clique.assemble(files) - if len(files) < 2 or len(src_collections) != 1 or remainders: - raise KnownPublishError(( - "Files of representation does not contain proper" - " sequence files.\nCollected collections: {}" - "\nCollected remainders: {}" - ).format( - ", ".join([str(col) for col in src_collections]), - ", ".join([str(rem) for rem in remainders]) - )) src_collection = src_collections[0] - template_data["originalBasename"] = src_collection.head[:-1] destination_indexes = list(src_collection.indexes) # Use last frame for minimum padding # - that should cover both 'udim' and 'frame' minimum padding @@ -645,11 +722,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin): # In case source are published in place we need to # skip renumbering repre_frame_start = repre.get("frameStart") - if ( - "originalBasename" not in template - and repre_frame_start is not None - ): - index_frame_start = int(repre["frameStart"]) + if repre_frame_start is not None: + index_frame_start = int(repre_frame_start) # Shift destination sequence to the start frame destination_indexes = [ index_frame_start + idx @@ -705,15 +779,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin): else: # Single file - fname = files - if os.path.isabs(fname): - self.log.error( - "Filename in representation is filepath {}".format(fname) - ) - raise KnownPublishError( - "This is a bug. Representation file name is full path" - ) - template_data["originalBasename"], _ = os.path.splitext(fname) # Manage anatomy template data template_data.pop("frame", None) if is_udim: @@ -725,7 +790,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): dst = os.path.normpath(template_filled) # Single file transfer - src = os.path.join(stagingdir, fname) + src = os.path.join(stagingdir, files) transfers = [(src, dst)] # todo: Are we sure the assumption each representation diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 05d383415b..e796f7b376 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -386,6 +386,25 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): repre["_id"] = old_repre["_id"] update_data = prepare_representation_update_data( old_repre, repre) + + # Keep previously synchronized sites up-to-date + # by comparing old and new sites and adding old sites + # if missing in new ones + old_repre_files_sites = [ + f.get("sites", []) for f in old_repre.get("files", []) + ] + for i, file in enumerate(repre.get("files", [])): + repre_sites_names = { + s["name"] for s in file.get("sites", []) + } + for site in old_repre_files_sites[i]: + if site["name"] not in repre_sites_names: + # Pop the date to tag for sync + site.pop("created_dt", None) + file["sites"].append(site) + + update_data["files"][i] = file + op_session.update_entity( project_name, old_repre["type"], diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 7223e8d4de..3e40bf0c8b 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -340,13 +340,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): _stdout, _stderr = proc.communicate() if _stdout: - for line in _stdout.split(b"\r\n"): - print(line.decode("utf-8")) + print(_stdout.decode("utf-8", errors="backslashreplace")) # This will probably never happen as ffmpeg use stdout if _stderr: - for line in _stderr.split(b"\r\n"): - print(line.decode("utf-8")) + print(_stderr.decode("utf-8", errors="backslashreplace")) if proc.returncode != 0: raise RuntimeError( diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 25d2988982..24d1f7405b 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -33,7 +33,8 @@ "limit": [], "jobInfo": {}, "pluginInfo": {}, - "scene_patches": [] + "scene_patches": [], + "strict_error_checking": true }, "MaxSubmitRenderDeadline": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 23bd3e085e..64bba7b28c 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -1,4 +1,5 @@ { + "open_workfile_post_initialization": false, "imageio": { "ocio_config": { "enabled": false, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index cd8ea02272..2999d1427d 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -246,6 +246,7 @@ "sourcetype": "python", "title": "Gizmo Note", "command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')", + "icon": "", "shortcut": "" } ] diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index f71a253105..3d1b413d6c 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -195,6 +195,12 @@ ] } + }, + { + "type": "boolean", + "key": "strict_error_checking", + "label": "Strict Error Checking", + "default": true } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index d83666b5b2..47dfb37024 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -5,6 +5,11 @@ "label": "Maya", "is_file": true, "children": [ + { + "type": "boolean", + "key": "open_workfile_post_initialization", + "label": "Open Workfile Post Initialization" + }, { "key": "imageio", "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json index b1a8cc1812..26c64e6219 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -17,6 +17,11 @@ "key": "menu", "label": "OpenPype Menu shortcuts", "children": [ + { + "type": "text", + "key": "create", + "label": "Create..." + }, { "type": "text", "key": "publish", @@ -288,4 +293,4 @@ "name": "schema_publish_gui_filter" } ] -} \ No newline at end of file +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json index abe14970c5..e4c65177a7 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json @@ -72,6 +72,11 @@ "key": "command", "label": "Python command" }, + { + "type": "text", + "key": "icon", + "label": "Icon Path" + }, { "type": "text", "key": "shortcut", diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index c8952e5a1c..669706d470 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -5,11 +5,9 @@ def test_backward_compatibility(printer): printer("Test if imports still work") try: - from openpype.lib import filter_pyblish_plugins from openpype.lib import execute_hook from openpype.lib import PypeHook - from openpype.lib import get_latest_version from openpype.lib import ApplicationLaunchFailed from openpype.lib import get_ffmpeg_tool_path @@ -18,10 +16,6 @@ def test_backward_compatibility(printer): from openpype.lib import get_version_from_path from openpype.lib import version_up - from openpype.lib import is_latest - from openpype.lib import any_outdated - from openpype.lib import get_asset - from openpype.lib import get_linked_assets from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tests/test_pyblish_filter.py b/openpype/tests/test_pyblish_filter.py index ea23da26e4..b74784145f 100644 --- a/openpype/tests/test_pyblish_filter.py +++ b/openpype/tests/test_pyblish_filter.py @@ -1,9 +1,9 @@ -from . import lib +import os import pyblish.api import pyblish.util import pyblish.plugin -from openpype.lib import filter_pyblish_plugins -import os +from openpype.pipeline.publish.lib import filter_pyblish_plugins +from . import lib def test_pyblish_plugin_filter_modifier(printer, monkeypatch): diff --git a/openpype/tools/attribute_defs/widgets.py b/openpype/tools/attribute_defs/widgets.py index bf61dc3776..18e2e13d06 100644 --- a/openpype/tools/attribute_defs/widgets.py +++ b/openpype/tools/attribute_defs/widgets.py @@ -4,7 +4,7 @@ import copy from qtpy import QtWidgets, QtCore from openpype.lib.attribute_definitions import ( - AbtractAttrDef, + AbstractAttrDef, UnknownDef, HiddenDef, NumberDef, @@ -16,7 +16,11 @@ from openpype.lib.attribute_definitions import ( UISeparatorDef, UILabelDef ) -from openpype.tools.utils import CustomTextComboBox +from openpype.tools.utils import ( + CustomTextComboBox, + FocusSpinBox, + FocusDoubleSpinBox, +) from openpype.widgets.nice_checkbox import NiceCheckbox from .files_widget import FilesWidget @@ -33,9 +37,9 @@ def create_widget_for_attr_def(attr_def, parent=None): def _create_widget_for_attr_def(attr_def, parent=None): - if not isinstance(attr_def, AbtractAttrDef): + if not isinstance(attr_def, AbstractAttrDef): raise TypeError("Unexpected type \"{}\" expected \"{}\"".format( - str(type(attr_def)), AbtractAttrDef + str(type(attr_def)), AbstractAttrDef )) if isinstance(attr_def, NumberDef): @@ -142,6 +146,9 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget): if attr_def.label: label_widget = QtWidgets.QLabel(attr_def.label, self) + tooltip = attr_def.tooltip + if tooltip: + label_widget.setToolTip(tooltip) layout.addWidget( label_widget, row, 0, 1, expand_cols ) @@ -243,10 +250,10 @@ class NumberAttrWidget(_BaseAttrDefWidget): def _ui_init(self): decimals = self.attr_def.decimals if decimals > 0: - input_widget = QtWidgets.QDoubleSpinBox(self) + input_widget = FocusDoubleSpinBox(self) input_widget.setDecimals(decimals) else: - input_widget = QtWidgets.QSpinBox(self) + input_widget = FocusSpinBox(self) if self.attr_def.tooltip: input_widget.setToolTip(self.attr_def.tooltip) diff --git a/openpype/tools/loader/lib.py b/openpype/tools/loader/lib.py index 552dc91a10..d47bc7e07a 100644 --- a/openpype/tools/loader/lib.py +++ b/openpype/tools/loader/lib.py @@ -2,7 +2,7 @@ import inspect from qtpy import QtGui import qtawesome -from openpype.lib.attribute_definitions import AbtractAttrDef +from openpype.lib.attribute_definitions import AbstractAttrDef from openpype.tools.attribute_defs import AttributeDefinitionsDialog from openpype.tools.utils.widgets import ( OptionalAction, @@ -43,7 +43,7 @@ def get_options(action, loader, parent, repre_contexts): if not getattr(action, "optioned", False) or not loader_options: return options - if isinstance(loader_options[0], AbtractAttrDef): + if isinstance(loader_options[0], AbstractAttrDef): qargparse_options = False dialog = AttributeDefinitionsDialog(loader_options, parent) else: diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index c0e68fcc7a..0c5c9391cf 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -1480,23 +1480,21 @@ class RepresentationWidget(QtWidgets.QWidget): repre_ids = [] data_by_repre_id = {} selected_side = action_representation.get("selected_side") + site_name = "{}_site_name".format(selected_side) is_sync_loader = tools_lib.is_sync_loader(loader) for item in items: - item_id = item.get("_id") - repre_ids.append(item_id) + repre_id = item["_id"] + repre_ids.append(repre_id) if not is_sync_loader: continue - site_name = "{}_site_name".format(selected_side) data_site_name = item.get(site_name) if not data_site_name: continue - data_by_repre_id[item_id] = { - "_id": item_id, - "site_name": data_site_name, - "project_name": self.dbcon.active_project() + data_by_repre_id[repre_id] = { + "site_name": data_site_name } repre_contexts = get_repres_contexts(repre_ids, self.dbcon) @@ -1586,8 +1584,8 @@ def _load_representations_by_loader(loader, repre_contexts, version_name = version_doc.get("name") try: if data_by_repre_id: - _id = repre_context["representation"]["_id"] - data = data_by_repre_id.get(_id) + repre_id = repre_context["representation"]["_id"] + data = data_by_repre_id.get(repre_id) options.update(data) load_with_repre_context( loader, diff --git a/openpype/tools/publisher/constants.py b/openpype/tools/publisher/constants.py index e9fdd4774a..b2bfd7dd5c 100644 --- a/openpype/tools/publisher/constants.py +++ b/openpype/tools/publisher/constants.py @@ -24,6 +24,7 @@ CREATOR_THUMBNAIL_ENABLED_ROLE = QtCore.Qt.UserRole + 5 FAMILY_ROLE = QtCore.Qt.UserRole + 6 GROUP_ROLE = QtCore.Qt.UserRole + 7 CONVERTER_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 8 +CREATOR_SORT_ROLE = QtCore.Qt.UserRole + 9 __all__ = ( @@ -36,6 +37,7 @@ __all__ = ( "IS_GROUP_ROLE", "CREATOR_IDENTIFIER_ROLE", "CREATOR_THUMBNAIL_ENABLED_ROLE", + "CREATOR_SORT_ROLE", "FAMILY_ROLE", "GROUP_ROLE", "CONVERTER_IDENTIFIER_ROLE", diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 50a814de5c..023a20ca5e 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -169,6 +169,8 @@ class PublishReport: def __init__(self, controller): self.controller = controller + self._create_discover_result = None + self._convert_discover_result = None self._publish_discover_result = None self._plugin_data = [] self._plugin_data_with_plugin = [] @@ -181,6 +183,10 @@ class PublishReport: def reset(self, context, create_context): """Reset report and clear all data.""" + self._create_discover_result = create_context.creator_discover_result + self._convert_discover_result = ( + create_context.convertor_discover_result + ) self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] @@ -293,9 +299,19 @@ class PublishReport: if plugin not in self._stored_plugins: plugins_data.append(self._create_plugin_data_item(plugin)) - crashed_file_paths = {} + reports = [] + if self._create_discover_result is not None: + reports.append(self._create_discover_result) + + if self._convert_discover_result is not None: + reports.append(self._convert_discover_result) + if self._publish_discover_result is not None: - items = self._publish_discover_result.crashed_file_paths.items() + reports.append(self._publish_discover_result) + + crashed_file_paths = {} + for report in reports: + items = report.crashed_file_paths.items() for filepath, exc_info in items: crashed_file_paths[filepath] = "".join( traceback.format_exception(*exc_info) @@ -826,14 +842,14 @@ class CreatorItem: label, group_label, icon, - instance_attributes_defs, description, detailed_description, default_variant, default_variants, create_allow_context_change, create_allow_thumbnail, - pre_create_attributes_defs + show_order, + pre_create_attributes_defs, ): self.identifier = identifier self.creator_type = creator_type @@ -847,12 +863,9 @@ class CreatorItem: self.default_variants = default_variants self.create_allow_context_change = create_allow_context_change self.create_allow_thumbnail = create_allow_thumbnail - self.instance_attributes_defs = instance_attributes_defs + self.show_order = show_order self.pre_create_attributes_defs = pre_create_attributes_defs - def get_instance_attr_defs(self): - return self.instance_attributes_defs - def get_group_label(self): return self.group_label @@ -874,6 +887,7 @@ class CreatorItem: pre_create_attr_defs = None create_allow_context_change = None create_allow_thumbnail = None + show_order = creator.order if creator_type is CreatorTypes.artist: description = creator.get_description() detail_description = creator.get_detail_description() @@ -882,6 +896,7 @@ class CreatorItem: pre_create_attr_defs = creator.get_pre_create_attr_defs() create_allow_context_change = creator.create_allow_context_change create_allow_thumbnail = creator.create_allow_thumbnail + show_order = creator.show_order identifier = creator.identifier return cls( @@ -891,26 +906,20 @@ class CreatorItem: creator.label or identifier, creator.get_group_label(), creator.get_icon(), - creator.get_instance_attr_defs(), description, detail_description, default_variant, default_variants, create_allow_context_change, create_allow_thumbnail, - pre_create_attr_defs + show_order, + pre_create_attr_defs, ) def to_data(self): - instance_attributes_defs = None - if self.instance_attributes_defs is not None: - instance_attributes_defs = serialize_attr_defs( - self.instance_attributes_defs - ) - pre_create_attributes_defs = None if self.pre_create_attributes_defs is not None: - instance_attributes_defs = serialize_attr_defs( + pre_create_attributes_defs = serialize_attr_defs( self.pre_create_attributes_defs ) @@ -927,18 +936,12 @@ class CreatorItem: "default_variants": self.default_variants, "create_allow_context_change": self.create_allow_context_change, "create_allow_thumbnail": self.create_allow_thumbnail, - "instance_attributes_defs": instance_attributes_defs, + "show_order": self.show_order, "pre_create_attributes_defs": pre_create_attributes_defs, } @classmethod def from_data(cls, data): - instance_attributes_defs = data["instance_attributes_defs"] - if instance_attributes_defs is not None: - data["instance_attributes_defs"] = deserialize_attr_defs( - instance_attributes_defs - ) - pre_create_attributes_defs = data["pre_create_attributes_defs"] if pre_create_attributes_defs is not None: data["pre_create_attributes_defs"] = deserialize_attr_defs( @@ -1521,9 +1524,6 @@ class BasePublisherController(AbstractPublisherController): def _reset_attributes(self): """Reset most of attributes that can be reset.""" - # Reset creator items - self._creator_items = None - self.publish_is_running = False self.publish_has_validated = False self.publish_has_crashed = False @@ -1589,20 +1589,19 @@ class PublisherController(BasePublisherController): Handle both creation and publishing parts. Args: - dbcon (AvalonMongoDB): Connection to mongo with context. headless (bool): Headless publishing. ATM not implemented or used. """ _log = None - def __init__(self, dbcon=None, headless=False): + def __init__(self, headless=False): super(PublisherController, self).__init__() self._host = registered_host() self._headless = headless self._create_context = CreateContext( - self._host, dbcon, headless=headless, reset=False + self._host, headless=headless, reset=False ) self._publish_plugins_proxy = None @@ -1756,7 +1755,7 @@ class PublisherController(BasePublisherController): self._create_context.reset_preparation() # Reset avalon context - self._create_context.reset_avalon_context() + self._create_context.reset_current_context() self._asset_docs_cache.reset() @@ -1779,6 +1778,8 @@ class PublisherController(BasePublisherController): self._resetting_plugins = True self._create_context.reset_plugins() + # Reset creator items + self._creator_items = None self._resetting_plugins = False @@ -1879,12 +1880,12 @@ class PublisherController(BasePublisherController): which should be attribute definitions returned. """ + # NOTE it would be great if attrdefs would have hash method implemented + # so they could be used as keys in dictionary output = [] _attr_defs = {} for instance in instances: - creator_identifier = instance.creator_identifier - creator_item = self.creator_items[creator_identifier] - for attr_def in creator_item.instance_attributes_defs: + for attr_def in instance.creator_attribute_defs: found_idx = None for idx, _attr_def in _attr_defs.items(): if attr_def == _attr_def: @@ -2018,9 +2019,10 @@ class PublisherController(BasePublisherController): success = True try: - self._create_context.create( + self._create_context.create_with_unified_error( creator_identifier, subset_name, instance_data, options ) + except CreatorsOperationFailed as exc: success = False self._emit_event( diff --git a/openpype/tools/publisher/control_qt.py b/openpype/tools/publisher/control_qt.py index 3639c4bb30..132b42f9ec 100644 --- a/openpype/tools/publisher/control_qt.py +++ b/openpype/tools/publisher/control_qt.py @@ -136,10 +136,7 @@ class QtRemotePublishController(BasePublisherController): created_instances = {} for serialized_data in serialized_instances: - item = CreatedInstance.deserialize_on_remote( - serialized_data, - self._creator_items - ) + item = CreatedInstance.deserialize_on_remote(serialized_data) created_instances[item.id] = item self._created_instances = created_instances diff --git a/openpype/tools/publisher/widgets/create_widget.py b/openpype/tools/publisher/widgets/create_widget.py index 07b124f616..ef9c5b98fe 100644 --- a/openpype/tools/publisher/widgets/create_widget.py +++ b/openpype/tools/publisher/widgets/create_widget.py @@ -18,9 +18,10 @@ from .tasks_widget import CreateWidgetTasksWidget from .precreate_widget import PreCreateWidget from ..constants import ( VARIANT_TOOLTIP, - CREATOR_IDENTIFIER_ROLE, FAMILY_ROLE, + CREATOR_IDENTIFIER_ROLE, CREATOR_THUMBNAIL_ENABLED_ROLE, + CREATOR_SORT_ROLE, ) SEPARATORS = ("---separator---", "---") @@ -90,12 +91,19 @@ class CreatorShortDescWidget(QtWidgets.QWidget): self._description_label.setText(description) +class CreatorsProxyModel(QtCore.QSortFilterProxyModel): + def lessThan(self, left, right): + l_show_order = left.data(CREATOR_SORT_ROLE) + r_show_order = right.data(CREATOR_SORT_ROLE) + if l_show_order == r_show_order: + return super(CreatorsProxyModel, self).lessThan(left, right) + return l_show_order < r_show_order + + class CreateWidget(QtWidgets.QWidget): def __init__(self, controller, parent=None): super(CreateWidget, self).__init__(parent) - self.setWindowTitle("Create new instance") - self._controller = controller self._asset_name = None @@ -141,7 +149,7 @@ class CreateWidget(QtWidgets.QWidget): creators_view = QtWidgets.QListView(creators_view_widget) creators_model = QtGui.QStandardItemModel() - creators_sort_model = QtCore.QSortFilterProxyModel() + creators_sort_model = CreatorsProxyModel() creators_sort_model.setSourceModel(creators_model) creators_view.setModel(creators_sort_model) @@ -441,28 +449,33 @@ class CreateWidget(QtWidgets.QWidget): # Add new families new_creators = set() - for identifier, creator_item in self._controller.creator_items.items(): + creator_items_by_identifier = self._controller.creator_items + for identifier, creator_item in creator_items_by_identifier.items(): if creator_item.creator_type != "artist": continue # TODO add details about creator new_creators.add(identifier) if identifier in existing_items: + is_new = False item = existing_items[identifier] else: + is_new = True item = QtGui.QStandardItem() item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) - self._creators_model.appendRow(item) item.setData(creator_item.label, QtCore.Qt.DisplayRole) + item.setData(creator_item.show_order, CREATOR_SORT_ROLE) item.setData(identifier, CREATOR_IDENTIFIER_ROLE) item.setData( creator_item.create_allow_thumbnail, CREATOR_THUMBNAIL_ENABLED_ROLE ) item.setData(creator_item.family, FAMILY_ROLE) + if is_new: + self._creators_model.appendRow(item) # Remove families that are no more available for identifier in (old_creators - new_creators): @@ -482,8 +495,9 @@ class CreateWidget(QtWidgets.QWidget): index = indexes[0] identifier = index.data(CREATOR_IDENTIFIER_ROLE) + create_item = creator_items_by_identifier.get(identifier) - self._set_creator_by_identifier(identifier) + self._set_creator(create_item) def _on_plugins_refresh(self): # Trigger refresh only if is visible diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index 2e8d0ce37c..8da3886419 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -250,21 +250,25 @@ class PublishReportBtn(PublishIconBtn): self._actions = [] def add_action(self, label, identifier): - action = QtWidgets.QAction(label) - action.setData(identifier) - action.triggered.connect( - functools.partial(self._on_action_trigger, action) + self._actions.append( + (label, identifier) ) - self._actions.append(action) - def _on_action_trigger(self, action): - identifier = action.data() + def _on_action_trigger(self, identifier): self.triggered.emit(identifier) def mouseReleaseEvent(self, event): super(PublishReportBtn, self).mouseReleaseEvent(event) menu = QtWidgets.QMenu(self) - menu.addActions(self._actions) + actions = [] + for item in self._actions: + label, identifier = item + action = QtWidgets.QAction(label, menu) + action.triggered.connect( + functools.partial(self._on_action_trigger, identifier) + ) + actions.append(action) + menu.addActions(actions) menu.exec_(event.globalPos()) @@ -1220,7 +1224,8 @@ class GlobalAttrsWidget(QtWidgets.QWidget): asset_task_combinations = [] for instance in instances: - if instance.creator is None: + # NOTE I'm not sure how this can even happen? + if instance.creator_identifier is None: editable = False variants.add(instance.get("variant") or self.unknown_value) diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py index 097e289f32..6f7ffdb8ea 100644 --- a/openpype/tools/publisher/window.py +++ b/openpype/tools/publisher/window.py @@ -366,7 +366,7 @@ class PublisherWindow(QtWidgets.QDialog): def make_sure_is_visible(self): if self._window_is_visible: - self.setWindowState(QtCore.Qt.ActiveWindow) + self.setWindowState(QtCore.Qt.WindowActive) else: self.show() @@ -566,24 +566,24 @@ class PublisherWindow(QtWidgets.QDialog): def _go_to_publish_tab(self): self._set_current_tab("publish") - def _go_to_details_tab(self): - self._set_current_tab("details") - def _go_to_report_tab(self): self._set_current_tab("report") + def _go_to_details_tab(self): + self._set_current_tab("details") + def _is_on_create_tab(self): return self._is_current_tab("create") def _is_on_publish_tab(self): return self._is_current_tab("publish") - def _is_on_details_tab(self): - return self._is_current_tab("details") - def _is_on_report_tab(self): return self._is_current_tab("report") + def _is_on_details_tab(self): + return self._is_current_tab("details") + def _set_publish_overlay_visibility(self, visible): if visible: widget = self._publish_overlay @@ -647,16 +647,10 @@ class PublisherWindow(QtWidgets.QDialog): # otherwise 'create' is used # - this happens only on first show if first_reset: - if self._overview_widget.has_items(): - self._go_to_publish_tab() - else: - self._go_to_create_tab() + self._go_to_create_tab() - elif ( - not self._is_on_create_tab() - and not self._is_on_publish_tab() - ): - # If current tab is not 'Create' or 'Publish' go to 'Publish' + elif self._is_on_report_tab(): + # Go to 'Publish' tab if is on 'Details' tab # - this can happen when publishing started and was reset # at that moment it doesn't make sense to stay at publish # specific tabs. diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py index d51ebb5744..4292e2d726 100644 --- a/openpype/tools/utils/__init__.py +++ b/openpype/tools/utils/__init__.py @@ -1,4 +1,6 @@ from .widgets import ( + FocusSpinBox, + FocusDoubleSpinBox, CustomTextComboBox, PlaceholderLineEdit, BaseClickableFrame, @@ -34,6 +36,8 @@ from .overlay_messages import ( __all__ = ( + "FocusSpinBox", + "FocusDoubleSpinBox", "CustomTextComboBox", "PlaceholderLineEdit", "BaseClickableFrame", diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py index a9d6fa35b2..b416c56797 100644 --- a/openpype/tools/utils/widgets.py +++ b/openpype/tools/utils/widgets.py @@ -8,11 +8,39 @@ from openpype.style import ( get_objected_colors, get_style_image_path ) -from openpype.lib.attribute_definitions import AbtractAttrDef +from openpype.lib.attribute_definitions import AbstractAttrDef log = logging.getLogger(__name__) +class FocusSpinBox(QtWidgets.QSpinBox): + """QSpinBox which allow scroll wheel changes only in active state.""" + + def __init__(self, *args, **kwargs): + super(FocusSpinBox, self).__init__(*args, **kwargs) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + def wheelEvent(self, event): + if not self.hasFocus(): + event.ignore() + else: + super(FocusSpinBox, self).wheelEvent(event) + + +class FocusDoubleSpinBox(QtWidgets.QDoubleSpinBox): + """QDoubleSpinBox which allow scroll wheel changes only in active state.""" + + def __init__(self, *args, **kwargs): + super(FocusDoubleSpinBox, self).__init__(*args, **kwargs) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + def wheelEvent(self, event): + if not self.hasFocus(): + event.ignore() + else: + super(FocusDoubleSpinBox, self).wheelEvent(event) + + class CustomTextComboBox(QtWidgets.QComboBox): """Combobox which can have different text showed.""" @@ -406,7 +434,7 @@ class OptionalAction(QtWidgets.QWidgetAction): def set_option_tip(self, options): sep = "\n\n" - if not options or not isinstance(options[0], AbtractAttrDef): + if not options or not isinstance(options[0], AbstractAttrDef): mak = (lambda opt: opt["name"] + " :\n " + opt["help"]) self.option_tip = sep.join(mak(opt) for opt in options) return diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 765d32b3d5..18be746d49 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -621,7 +621,7 @@ class FilesWidget(QtWidgets.QWidget): "caption": "Work Files", "filter": ext_filter } - if qtpy.API in ("pyside", "pyside2"): + if qtpy.API in ("pyside", "pyside2", "pyside6"): kwargs["dir"] = self._workfiles_root else: kwargs["directory"] = self._workfiles_root diff --git a/openpype/version.py b/openpype/version.py index 3941912c6e..6d060656cb 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.15.1-nightly.2" +__version__ = "3.15.1-nightly.6" diff --git a/pyproject.toml b/pyproject.toml index 634aeda5ac..a872ed3609 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.15.1-nightly.2" # OpenPype +version = "3.15.0" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/tools/ci_tools.py b/tools/ci_tools.py index c8f0cd48b4..750bf8645d 100644 --- a/tools/ci_tools.py +++ b/tools/ci_tools.py @@ -7,16 +7,10 @@ from github import Github import os def get_release_type_github(Log, github_token): - # print(Log) minor_labels = ["Bump Minor"] - # patch_labels = [ - # "type: enhancement", - # "type: bug", - # "type: deprecated", - # "type: Feature"] g = Github(github_token) - repo = g.get_repo("pypeclub/OpenPype") + repo = g.get_repo("ynput/OpenPype") labels = set() for line in Log.splitlines(): @@ -35,12 +29,12 @@ def get_release_type_github(Log, github_token): else: return "patch" - # TODO: if all is working fine, this part can be cleaned up eventually + # TODO: if all is working fine, this part can be cleaned up eventually # if any(label in labels for label in patch_labels): # return "patch" - + return None - + def remove_prefix(text, prefix): return text[text.startswith(prefix) and len(prefix):] @@ -93,12 +87,16 @@ def file_regex_replace(filename, regex, version): f.truncate() -def bump_file_versions(version): +def bump_file_versions(version, nightly=False): filename = "./openpype/version.py" regex = "(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(-((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?" file_regex_replace(filename, regex, version) + if nightly: + # skip nightly reversion in pyproject.toml + return + # bump pyproject.toml filename = "pyproject.toml" regex = "version = \"(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(\+((0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?\" # OpenPype" @@ -196,7 +194,7 @@ def main(): if options.nightly: next_tag_v = calculate_next_nightly(github_token=options.github_token) print(next_tag_v) - bump_file_versions(next_tag_v) + bump_file_versions(next_tag_v, True) if options.finalize: new_release = finalize_prerelease(options.finalize) @@ -222,7 +220,7 @@ def main(): new_prerelease = current_prerelease.bump_prerelease().__str__() print(new_prerelease) bump_file_versions(new_prerelease) - + if options.version: bump_file_versions(options.version) print(f"Injected version {options.version} into the release") diff --git a/website/docs/admin_environment.md b/website/docs/admin_environment.md new file mode 100644 index 0000000000..1eb755b90b --- /dev/null +++ b/website/docs/admin_environment.md @@ -0,0 +1,30 @@ +--- +id: admin_environment +title: Environment +sidebar_label: Environment +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## OPENPYPE_TMPDIR: + - Custom staging dir directory + - Supports anatomy keys formatting. ex `{root[work]}/{project[name]}/temp` + - supported formatting keys: + - root[work] + - project[name | code] + +## OPENPYPE_DEBUG + - setting logger to debug mode + - example value: "1" (to activate) + +## OPENPYPE_LOG_LEVEL + - stringified numeric value of log level. [Here for more info](https://docs.python.org/3/library/logging.html#logging-levels) + - example value: "10" + +## OPENPYPE_MONGO +- If set it takes precedence over the one set in keyring +- for more details on how to use it go [here](admin_use#check-for-mongodb-database-connection) + +## OPENPYPE_USERNAME +- if set it overides system created username diff --git a/website/docs/admin_settings_system.md b/website/docs/admin_settings_system.md index 8aeb281109..d61713ccd5 100644 --- a/website/docs/admin_settings_system.md +++ b/website/docs/admin_settings_system.md @@ -13,18 +13,23 @@ Settings applicable to the full studio. ![general_settings](assets/settings/settings_system_general.png) -**`Studio Name`** - Full name of the studio (can be used as variable on some places) +### Studio Name +Full name of the studio (can be used as variable on some places) -**`Studio Code`** - Studio acronym or a short code (can be used as variable on some places) +### Studio Code +Studio acronym or a short code (can be used as variable on some places) -**`Admin Password`** - After setting admin password, normal user won't have access to OpenPype settings +### Admin Password +After setting admin password, normal user won't have access to OpenPype settings and Project Manager GUI. Please keep in mind that this is a studio wide password and it is meant purely as a simple barrier to prevent artists from accidental setting changes. -**`Environment`** - Globally applied environment variables that will be appended to any OpenPype process in the studio. +### Environment +Globally applied environment variables that will be appended to any OpenPype process in the studio. -**`Disk mapping`** - Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up. -Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume). +### Disk mapping +- Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up. +- Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume). ### FFmpeg and OpenImageIO tools We bundle FFmpeg tools for all platforms and OpenImageIO tools for Windows and Linux. By default, bundled tools are used, but it is possible to set environment variables `OPENPYPE_FFMPEG_PATHS` and `OPENPYPE_OIIO_PATHS` in system settings environments to look for them in different directory. @@ -171,4 +176,4 @@ In the image before you can see that we set most of the environment variables in In this example MTOA will automatically will the `MAYA_VERSION`(which is set by Maya Application environment) and `MTOA_VERSION` into the `MTOA` variable. We then use the `MTOA` to set all the other variables needed for it to function within Maya. ![tools](assets/settings/tools_01.png) -All of the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible. \ No newline at end of file +All the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible. diff --git a/website/docs/artist_getting_started.md b/website/docs/artist_getting_started.md index 2f88a9f238..301a58fa56 100644 --- a/website/docs/artist_getting_started.md +++ b/website/docs/artist_getting_started.md @@ -7,6 +7,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + ## Working in the studio In studio environment you should have OpenPype already installed and deployed, so you can start using it without much setup. Your admin has probably put OpenPype icon on your desktop or even had your computer set up so OpenPype will start automatically. @@ -15,70 +16,66 @@ If this is not the case, please contact your administrator to consult on how to ## Working from home -If you are working from home though, you'll need to install it yourself. You should, however, receive the OpenPype installer files from your studio -admin, supervisor or production, because OpenPype versions and executables might not be compatible between studios. +If you are working from **home** though, you'll **need to install** it yourself. You should, however, receive the OpenPype installer files from your studio +admin, supervisor or production, because OpenPype versions and executables might not be compatible between studios. -To install OpenPype you just need to unzip it anywhere on the disk +Installing OpenPype is possible by Installer or by unzipping downloaded ZIP archive to any drive location. -To use it, you have two options - -**openpype_gui.exe** is the most common for artists. It runs OpenPype GUI in system tray. From there you can run all the available tools. To use any of the features, OpenPype must be running in the tray. - -**openpype_console.exe** in useful for debugging and error reporting. It opens console window where all the necessary information will appear during user's work. +:::tip Using the OpenPype Installer +See the [Installation section](artist_install.md) for more information on how to use the OpenPype Installer +::: - +You can run OpenPype by desktop "OP" icon (if it exists after installing) or by directly executing - +**openpype_gui.exe** located in the OpenPype folder. This executable being suitable **for artists**. -WIP - Windows instructions once installers are finished +or alternatively by - - +**openpype_console.exe** which is more suitable for **TDs/Admin** for debugging and error reporting. This one runs with +opened console window where all the necessary info will appear during user's work session. -WIP - Linux instructions once installers are finished +:::tip Is OpenPype running? +OpenPype runs in the operating system's tray. If you see turquoise OpenPype icon in the tray you can easily tell OpenPype is currently running. +Keep in mind that on Windows this icon might be hidden by default, in which case, the artist can simply drag the icon down to the tray. +::: - - - -WIP - Mac instructions once installers are finished - - - +![Systray](assets/artist_systray.png) ## First Launch -When you first start OpenPype, you will be asked to give it some basic information. +When you first start OpenPype, you will be asked to fill in some basic information. + ### MongoDB -In most cases that will only be your studio MongoDB Address. +In most cases you will only have to supply the MongoDB Address. +It's the database URL you should have received from your Studio admin and often will look like this -It is a URL that you should receive from you studio and most often will look like this `mongodb://username:passwword@mongo.mystudiodomain.com:12345` or `mongodb://192.168.100.15:27071`, it really depends on your studio setup. When OpenPype Igniter +`mongodb://username:passwword@mongo.mystudiodomain.com:12345` + + or + + `mongodb://192.168.100.15:27071` + +it really depends on your studio setup. When OpenPype Igniter asks for it, just put it in the corresponding text field and press `install` button. ### OpenPype Version Repository -Sometimes your studio might also ask you to fill in the path to it's version -repository. This is a location where OpenPype will be looking for when checking -if it's up to date and where updates are installed from automatically. +Sometimes your Studio might also ask you to fill in the path to its version +repository. This is a location where OpenPype will search for the latest versions, check +if it's up to date and where updates are installed from automatically. -This pat is usually taken from the database directly, so you shouldn't need it. +This path is usually taken from the database directly, so you shouldn't need it. ## Updates -If you're connected to your studio, OpenPype will check for, and install updates automatically every time you run it. That's why during the first start, it will go through a quick update installation process, even though you might have just installed it. +If you're connected to your Studio, OpenPype will check for, and install updates automatically every time you run it. That's why during the first start it can go through a quick update installation process, even though you might have just installed it. -## Advanced use +## Advanced Usage For more advanced use of OpenPype commands please visit [Admin section](admin_openpype_commands.md). diff --git a/website/docs/artist_hosts_3dsmax.md b/website/docs/artist_hosts_3dsmax.md new file mode 100644 index 0000000000..71ba8785dc --- /dev/null +++ b/website/docs/artist_hosts_3dsmax.md @@ -0,0 +1,125 @@ +--- +id: artist_hosts_3dsmax +title: 3dsmax +sidebar_label: 3dsmax +--- + +:::note Work in progress +This part of documentation is still work in progress. +::: + + + + +## First Steps With OpenPype + +Locate **OpenPype Icon** in the OS tray (if hidden dive in the tray toolbar). + +> If you cannot locate the OpenPype icon ...it is not probably running so check [Getting Started](artist_getting_started.md) first. + +By clicking the icon ```OpenPype Menu``` rolls out. + +Choose ```OpenPype Menu > Launcher``` to open the ```Launcher``` window. + +When opened you can **choose** the **project** to work in from the list. Then choose the particular **asset** you want to work on then choose **task** +and finally **run 3dsmax by its icon** in the tools. + +![Menu OpenPype](assets/3dsmax_tray_OP.png) + +:::note Launcher Content +The list of available projects, assets, tasks and tools will differ according to your Studio and need to be set in advance by supervisor/admin. +::: + +## Running in the 3dsmax + +If 3dsmax has been launched via OP Launcher there should be **OpenPype Menu** visible in 3dsmax **top header** after start. +This is the core functional area for you as a user. Most of your actions will take place here. + +![Menu OpenPype](assets/3dsmax_menu_first_OP.png) + +:::note OpenPype Menu +User should use this menu exclusively for **Opening/Saving** when dealing with work files not standard ```File Menu``` even though user still being able perform file operations via this menu but prefferably just performing quick saves during work session not saving actual workfile versions. +::: + +## Working With Scene Files + +In OpenPype menu first go to ```Work Files``` menu item so **Work Files Window** shows up. + + Here you can perform Save / Load actions as you would normally do with ```File Save ``` and ```File Open``` in the standard 3dsmax ```File Menu``` and navigate to different project components like assets, tasks, workfiles etc. + + +![Menu OpenPype](assets/3dsmax_menu_OP.png) + +You first choose particular asset and assigned task and corresponding workfile you would like to open. + +If not any workfile present simply hit ```Save As``` and keep ```Subversion``` empty and hit ```Ok```. + +![Save As Dialog](assets/3dsmax_SavingFirstFile_OP.png) + +OpenPype correctly names it and add version to the workfile. This basically happens whenever user trigger ```Save As``` action. Resulting into incremental version numbers like + +```workfileName_v001``` + +```workfileName_v002``` + + etc. + +Basically meaning user is free of guessing what is the correct naming and other neccessities to keep everthing in order and managed. + +> Note: user still has also other options for naming like ```Subversion```, ```Artist's Note``` but we won't dive into those now. + +Here you can see resulting work file after ```Save As``` action. + +![Save As Dialog](assets/3dsmax_SavingFirstFile2_OP.png) + +## Understanding Context + +As seen on our example OpenPype created pretty first workfile and named it ```220901_couch_modeling_v001.max``` meaning it sits in the Project ```220901``` being it ```couch``` asset and workfile being ```modeling``` task and obviously ```v001``` telling user its first existing version of this workfile. + +It is good to be aware that whenever you as a user choose ```asset``` and ```task``` you happen to be in so called **context** meaning that all user actions are in relation with particular ```asset```. This could be quickly seen in host application header and ```OpenPype Menu``` and its accompanying tools. + +![Workfile Context](assets/3dsmax_context.png) + +> Whenever you choose different ```asset``` and its ```task``` in **Work Files window** you are basically changing context to the current asset/task you have chosen. + + +This concludes the basics of working with workfiles in 3dsmax using OpenPype and its tools. Following chapters will cover other aspects like creating multiple assets types and their publishing for later usage in the production. + +--- + +## Creating and Publishing Instances + +:::warning Important +Before proceeding further please check [Glossary](artist_concepts.md) and [What Is Publishing?](artist_publish.md) So you have clear idea about terminology. +::: + + +### Intro + +Current OpenPype integration (ver 3.15.0) supports only ```PointCache``` and ```Camera``` families now. + +**Pointcache** family being basically any geometry outputted as Alembic cache (.abc) format + +**Camera** family being 3dsmax Camera object with/without animation outputted as native .max, FBX, Alembic format + + +--- + +:::note Work in progress +This part of documentation is still work in progress. +::: + +## ...to be added + + + + diff --git a/website/docs/artist_hosts_maya.md b/website/docs/artist_hosts_maya.md index 5cd8efa153..9fab845e62 100644 --- a/website/docs/artist_hosts_maya.md +++ b/website/docs/artist_hosts_maya.md @@ -308,6 +308,8 @@ Select its root and Go **OpenPype โ†’ Create...** and select **Point Cache**. After that, publishing will create corresponding **abc** files. +When creating the instance, a objectset child `proxy` will be created. Meshes in the `proxy` objectset will be the viewport representation where loading supports proxies. Proxy representations are stored as `resources` of the subset. + Example setup: ![Maya - Point Cache Example](assets/maya-pointcache_setup.png) @@ -315,6 +317,7 @@ Example setup: :::note Publish on farm If your studio has Deadline configured, artists could choose to offload potentially long running export of pointache and publish it to the farm. Only thing that is necessary is to toggle `Farm` property in created pointcache instance to True. +::: ### Loading Point Caches @@ -601,3 +604,20 @@ about customizing review process refer to [admin section](project_settings/setti If you don't move `modelMain` into `reviewMain`, review will be generated but it will be published as separate entity. + + +## Inventory Actions + +### Connect Geometry + +This action will connect geometries between containers. + +#### Usage + +Select 1 container of type `animation` or `pointcache`, then 1+ container of any type. + +#### Details + +The action searches the selected containers for 1 animation container of type `animation` or `pointcache`. This animation container will be connected to the rest of the selected containers. Matching geometries between containers is done by comparing the attribute `cbId`. + +The connection between geometries is done with a live blendshape. diff --git a/website/docs/artist_hosts_maya_arnold.md b/website/docs/artist_hosts_maya_arnold.md new file mode 100644 index 0000000000..b3c02a0894 --- /dev/null +++ b/website/docs/artist_hosts_maya_arnold.md @@ -0,0 +1,30 @@ +--- +id: artist_hosts_maya_arnold +title: Arnold for Maya +sidebar_label: Arnold +--- +## Arnold Scene Source (.ass) +Arnold Scene Source can be published as a single file or a sequence of files, determined by the frame range. + +When creating the instance, two objectsets are created; `content` and `proxy`. Meshes in the `proxy` objectset will be the viewport representation when loading as `standin`. Proxy representations are stored as `resources` of the subset. + +### Arnold Scene Source Proxy Workflow +In order to utilize operators and proxies, the content and proxy nodes need to share the same names (including the shape names). This is done by parenting the content and proxy nodes into separate groups. For example: + +![Arnold Scene Source](assets/maya-arnold_scene_source.png) + +## Standin +Arnold Scene Source `ass` and Alembic `abc` are supported to load as standins. + +### Standin Proxy Workflow +If a subset has a proxy representation, this will be used as display in the viewport. At render time the standin path will be replaced using the recommended string replacement workflow; + +https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_maya_operators_am_Updating_procedural_file_paths_with_string_replace_html + +Since the content and proxy nodes share the same names and hierarchy, any manually shader assignments will be shared. + + +:::note for advanced users +You can stop the proxy swapping by disabling the string replacement operator found in the container. +![Arnold Standin](assets/maya-arnold_standin.png) +::: diff --git a/website/docs/artist_hosts_maya_xgen.md b/website/docs/artist_hosts_maya_xgen.md index 8b0174a29f..ec5f2ed921 100644 --- a/website/docs/artist_hosts_maya_xgen.md +++ b/website/docs/artist_hosts_maya_xgen.md @@ -4,26 +4,96 @@ title: Xgen for Maya sidebar_label: Xgen --- -## Working with Xgen in OpenPype +OpenPype supports Xgen classic with the follow workflow. It eases the otherwise cumbersome issues around Xgen's side car files and hidden behaviour inside Maya. The workflow supports publishing, loading and updating of Xgen collections, along with connecting animation from geometry and (guide) curves. -OpenPype support publishing and loading of Xgen interactive grooms. You can publish -them as mayaAscii files with scalps that can be loaded into another maya scene, or as -alembic caches. +## Setup -### Publishing Xgen Grooms +### Settings -To prepare xgen for publishing just select all the descriptions that should be published together and the create Xgen Subset in the scene using - **OpenPype menu** โ†’ **Create**... and select **Xgen Interactive**. Leave Use selection checked. +Go to project settings > `Maya` > enable `Open Workfile Post Initialization`; -For actual publishing of your groom to go **OpenPype โ†’ Publish** and then press โ–ถ to publish. This will export `.ma` file containing your grooms with any geometries they are attached to and also a baked cache in `.abc` format +`project_settings/maya/open_workfile_post_initialization` +This is due to two errors occurring when opening workfile containing referenced xgen nodes on launch of Maya, specifically: -:::tip adding more descriptions -You can add multiple xgen description into the subset you are about to publish, simply by -adding them to the maya set that was created for you. Please make sure that only xgen description nodes are present inside of the set and not the scalp geometry. -::: +- ``Critical``: Duplicate collection errors on launching workfile. This is because Maya first imports Xgen when referencing in external Maya files, then imports Xgen again when the reference edits are applied. +``` +Importing XGen Collections... +# Error: XGen: Failed to find description ball_xgenMain_01_:parent in collection ball_xgenMain_01_:collection. Abort applying delta: P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_sh040_Lighting_v001__ball_xgenMain_01___collection.xgen # +# Error: XGen: Tried to import a duplicate collection, ball_xgenMain_02_:collection, from file P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_sh040_Lighting_v001__ball_xgenMain_02___collection.xgen. Aborting import. # +``` +- ``Non-critical``: Errors on opening workfile and failed opening of published xgen. This is because Maya imports Xgen when referencing in external Maya files but the reference edits that ensure the location of the Xgen files are correct, has not been applied yet. +``` +Importing XGen Collections... +# Error: XGen: Failed to open file: P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_ball_xgenMain_v035__ball_rigMain_01___collection.xgen # +# Error: XGen: Failed to import collection from file P:/PROJECTS/OP01_CG_demo/shots/sh040/work/Lighting/cg_ball_xgenMain_v035__ball_rigMain_01___collection.xgen # +``` -### Loading Xgen +Go to project settings > `Deadline` > `Publish plugins` > `Maya Submit to Deadline` > disable `Use Published scene`; -You can use published xgens by loading them using OpenPype Publisher. You can choose to reference or import xgen. We don't have any automatic mesh linking at the moment and it is expected, that groom is published with a scalp, that can then be manually attached to your animated mesh for example. +`project_settings/deadline/publish/MayaSubmitDeadline/use_published` -The alembic representation can be loaded too and it contains the groom converted to curves. Keep in mind that the density of the alembic directly depends on your viewport xgen density at the point of export. +This is due to temporary workaround while fixing rendering with published scenes. + +## Create + +Create an Xgen instance to publish. This needs to contain only **one Xgen collection**. + +`OpenPype > Create... > Xgen` + +You can create multiple Xgen instances if you have multiple collections to publish. + +### Publish + +The publishing process will grab geometry used for Xgen along with any external files used in the collection's descriptions. This creates an isolated Maya file with just the Xgen collection's dependencies, so you can use any nested geometry when creating the Xgen description. An Xgen version will consist of: + +- Maya file (`.ma`) - this contains the geometry and the connections to the Xgen collection and descriptions. +- Xgen file (`.xgen`) - this contains the Xgen collection and description. +- Resource files (`.ptx`, `.xuv`) - this contains Xgen side car files used in the collection and descriptions. + +## Load + +Open the Loader tool, `OpenPype > Loader...`, and navigate to the published Xgen version. On right-click you'll get the option `Reference Xgen (ma)` +When loading an Xgen version the following happens: + +- References in the Maya file. +- Copies the Xgen file (`.xgen`) to the current workspace. +- Modifies the Xgen file copy to load the current workspace first then the published Xgen collection. +- Makes a custom attribute on the Xgen collection, `float_ignore`, which can be seen under the `Expressions` tab of the `Xgen` UI. This is done to initialize the Xgen delta file workflow. +- Setup an Xgen delta file (`.xgd`) to store any workspace changes of the published Xgen version. + +When the loading is done, Xgen collection will be in the Xgen delta file workflow which means any changes done in the Maya workfile will be stored in the current workspace. The published Xgen collection will remain intact, even if the user assigns maps to any attributes or otherwise modifies any attribute. + +### Updating + +When there are changes to the Xgen version, the user will be notified when opening the workfile or publishing. Since the Xgen is referenced, it follows the standard Maya referencing system and overrides. + +For example publishing `xgenMain` version 1 with the attribute `renderer` set to `None`, then version 2 has `renderer` set to `Arnold Renderer`. When updating from version 1 to 2, the `renderer` attribute will be updated to `Arnold Renderer` unless there is a local override. + +### Connect Patches + +When loading in an Xgen version, it does not have any connections to anything in the workfile, so its static in the position it was published in. Use the [Connect Geometry](artist_hosts_maya#connect-geometry) action to connect Xgen to any matching loaded animated geometry. + +### Connect Guides + +Along with patches you can also connect the Xgen guides to an Alembic cache. + +#### Usage + +Select 1 animation container, of family `animation` or `pointcache`, then the Xgen containers to connect to. Right-click > `Actions` > `Connect Xgen`. + +***Note: Only alembic (`.abc`) representations are allowed.*** + +#### Details + +Connecting the guide will make Xgen use the Alembic directly, setting the attributes under `Guide Animation`, so the Alembic needs to contain the same amount of curves as guides in the Xgen. + +The animation container gets connected with the Xgen container, so if the animation container is updated so will the Xgen container's attribute. + +## Rendering + +To render with Xgen, follow the [Rendering With OpenPype](artist_hosts_maya#rendering-with-openpype) guide. + +### Details + +When submitting a workfile with Xgen, all Xgen related files will be collected and published as the workfiles resources. This means the published workfile is no longer referencing the workspace Xgen files. diff --git a/website/docs/assets/3dsmax_SavingFirstFile2_OP.png b/website/docs/assets/3dsmax_SavingFirstFile2_OP.png new file mode 100644 index 0000000000..4066ee0f1a Binary files /dev/null and b/website/docs/assets/3dsmax_SavingFirstFile2_OP.png differ diff --git a/website/docs/assets/3dsmax_SavingFirstFile_OP.png b/website/docs/assets/3dsmax_SavingFirstFile_OP.png new file mode 100644 index 0000000000..c4832ca6bb Binary files /dev/null and b/website/docs/assets/3dsmax_SavingFirstFile_OP.png differ diff --git a/website/docs/assets/3dsmax_context.png b/website/docs/assets/3dsmax_context.png new file mode 100644 index 0000000000..9b84cb2587 Binary files /dev/null and b/website/docs/assets/3dsmax_context.png differ diff --git a/website/docs/assets/3dsmax_menu_OP.png b/website/docs/assets/3dsmax_menu_OP.png new file mode 100644 index 0000000000..bce2f9aac0 Binary files /dev/null and b/website/docs/assets/3dsmax_menu_OP.png differ diff --git a/website/docs/assets/3dsmax_menu_first_OP.png b/website/docs/assets/3dsmax_menu_first_OP.png new file mode 100644 index 0000000000..c3a7b00cbb Binary files /dev/null and b/website/docs/assets/3dsmax_menu_first_OP.png differ diff --git a/website/docs/assets/3dsmax_model_OP.png b/website/docs/assets/3dsmax_model_OP.png new file mode 100644 index 0000000000..293c06642c Binary files /dev/null and b/website/docs/assets/3dsmax_model_OP.png differ diff --git a/website/docs/assets/3dsmax_tray_OP.png b/website/docs/assets/3dsmax_tray_OP.png new file mode 100644 index 0000000000..cfd0b07ef6 Binary files /dev/null and b/website/docs/assets/3dsmax_tray_OP.png differ diff --git a/website/docs/assets/maya-arnold_scene_source.png b/website/docs/assets/maya-arnold_scene_source.png new file mode 100644 index 0000000000..4150b78aac Binary files /dev/null and b/website/docs/assets/maya-arnold_scene_source.png differ diff --git a/website/docs/assets/maya-arnold_standin.png b/website/docs/assets/maya-arnold_standin.png new file mode 100644 index 0000000000..74571a86fa Binary files /dev/null and b/website/docs/assets/maya-arnold_standin.png differ diff --git a/website/docs/assets/maya-pointcache_setup.png b/website/docs/assets/maya-pointcache_setup.png index 8904baa239..b2dc126901 100644 Binary files a/website/docs/assets/maya-pointcache_setup.png and b/website/docs/assets/maya-pointcache_setup.png differ diff --git a/website/sidebars.js b/website/sidebars.js index cc945a019e..93887e00f6 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -44,11 +44,13 @@ module.exports = { "artist_hosts_maya_multiverse", "artist_hosts_maya_yeti", "artist_hosts_maya_xgen", + "artist_hosts_maya_arnold", "artist_hosts_maya_vray", "artist_hosts_maya_redshift", ], }, "artist_hosts_blender", + "artist_hosts_3dsmax", "artist_hosts_harmony", "artist_hosts_houdini", "artist_hosts_aftereffects", @@ -85,6 +87,7 @@ module.exports = { type: "category", label: "Configuration", items: [ + "admin_environment", "admin_settings", "admin_settings_system", "admin_settings_project_anatomy", diff --git a/website/yarn.lock b/website/yarn.lock index 9af21c7500..559c58f931 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -4273,9 +4273,9 @@ htmlparser2@^6.1.0: entities "^2.0.0" http-cache-semantics@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" - integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + version "4.1.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a" + integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== http-deceiver@^1.2.7: version "1.2.7" @@ -7180,9 +7180,9 @@ typedarray-to-buffer@^3.1.5: is-typedarray "^1.0.0" ua-parser-js@^0.7.30: - version "0.7.31" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6" - integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ== + version "0.7.33" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.33.tgz#1d04acb4ccef9293df6f70f2c3d22f3030d8b532" + integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw== unherit@^1.0.4: version "1.1.3"