diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 2267ce2185b..322e905fdde 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -23,11 +23,37 @@ jobs: runs-on: ubuntu-22.04 permissions: contents: write + outputs: + mount_platform: ${{ steps.vars.outputs.mount_platform }} + mount_ln: ${{ steps.vars.outputs.mount_ln }} + community_ref: ${{ steps.vars.outputs.community_ref }} + internal_ref: ${{ steps.vars.outputs.internal_ref }} + community_tag: ${{ steps.vars.outputs.community_tag }} + internal_tag: ${{ steps.vars.outputs.internal_tag }} + cmake_options: ${{ steps.vars.outputs.cmake_options }} steps: + - name: Calculate vars + id: vars + run: | + echo 'mount_platform=source="${{ github.workspace }}",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached' >> $GITHUB_OUTPUT + echo 'mount_ln=source="${{ github.workspace }}/LN",target=/hpcc-dev/LN,type=bind,consistency=cached' >> $GITHUB_OUTPUT + community_ref=${{ github.ref }} + echo "community_ref=$community_ref" >> $GITHUB_OUTPUT + echo "internal_ref=$(echo $community_ref | sed 's/community/internal/')" >> $GITHUB_OUTPUT + community_tag=$(echo $community_ref | cut -d'/' -f3) + echo "community_tag=$community_tag" >> $GITHUB_OUTPUT + echo "internal_tag=$(echo $community_tag | sed 's/community/internal/')" >> $GITHUB_OUTPUT + echo "cmake_options=-DCMAKE_BUILD_TYPE=RelWithDebInfo -DVCPKG_FILES_DIR=/hpcc-dev -DCPACK_THREADS=0 -DUSE_OPTIONAL=OFF" >> $GITHUB_OUTPUT + + - name: Print vars + run: | + echo "${{ toJSON(steps.vars.outputs) }})" + - name: Release HPCC-Platform uses: ncipollo/release-action@v1.12.0 with: - generateReleaseNotes: true + allowUpdates: true + generateReleaseNotes: false prerelease: ${{ contains(github.ref, '-rc') }} - name: Release LN uses: ncipollo/release-action@v1.12.0 @@ -35,10 +61,11 @@ jobs: owner: ${{ secrets.LNB_ACTOR }} repo: LN token: ${{ secrets.LNB_TOKEN }} - generateReleaseNotes: true + tag: ${{ steps.vars.outputs.internal_tag }} + allowUpdates: true + generateReleaseNotes: false prerelease: ${{ contains(github.ref, '-rc') }} - build-platform: name: Build Platform needs: release @@ -69,12 +96,13 @@ jobs: - name: Calculate vars id: vars run: | - echo 'mount_platform=source="${{ github.workspace }}",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached' >> $GITHUB_OUTPUT - echo 'mount_ln=source="${{ github.workspace }}/LN",target=/hpcc-dev/LN,type=bind,consistency=cached' >> $GITHUB_OUTPUT - echo "branch_name=$(echo ${{ github.ref }} | cut -d'/' -f3)" >> $GITHUB_OUTPUT cd vcpkg echo "vcpkg_sha_short=$(git rev-parse --short=8 HEAD)" >> $GITHUB_OUTPUT - echo "cmake_options=-DCMAKE_BUILD_TYPE=RelWithDebInfo -DVCPKG_FILES_DIR=/hpcc-dev -DCPACK_THREADS=0 -DUSE_OPTIONAL=OFF" >> $GITHUB_OUTPUT + + - name: Print vars + run: | + echo "${{ toJSON(needs.release.outputs) }})" + echo "${{ toJSON(steps.vars.outputs) }})" - name: Set up Docker Buildx id: buildx @@ -94,10 +122,10 @@ jobs: context: dockerfiles/vcpkg push: true tags: | - ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ steps.vars.outputs.branch_name }} + ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ needs.release.outputs.community_tag }} ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:latest cache-from: | - ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ steps.vars.outputs.branch_name }} + ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ needs.release.outputs.community_tag }} ${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:latest build-args: | VCPKG_REF=${{ steps.vars.outputs.vcpkg_sha_short }} @@ -110,11 +138,11 @@ jobs: for plugin in "${plugins[@]}"; do sudo rm -f ./build/CMakeCache.txt sudo rm -rf ./build/CMakeFiles - docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ steps.vars.outputs.branch_name }} - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ steps.vars.outputs.cmake_options }} -D$plugin=ON -DCONTAINERIZED=OFF -DCPACK_STRIP_FILES=OFF" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ steps.vars.outputs.cmake_options }} -D$plugin=ON -DCONTAINERIZED=OFF -DCPACK_STRIP_FILES=ON" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ needs.release.outputs.community_tag }} + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ needs.release.outputs.cmake_options }} -D$plugin=ON -DCONTAINERIZED=OFF -DCPACK_STRIP_FILES=OFF" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ needs.release.outputs.cmake_options }} -D$plugin=ON -DCONTAINERIZED=OFF -DCPACK_STRIP_FILES=ON" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" done - name: CMake Containerized Packages @@ -122,11 +150,11 @@ jobs: run: | sudo rm -f ./build/CMakeCache.txt sudo rm -rf ./build/CMakeFiles - docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ steps.vars.outputs.branch_name }} - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ steps.vars.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=ON -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=OFF" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ steps.vars.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=ON -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=ON" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ needs.release.outputs.community_tag }} + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ needs.release.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=ON -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=OFF" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build ${{ needs.release.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=ON -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=ON" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" - name: Upload Assets uses: ncipollo/release-action@v1.12.0 @@ -140,7 +168,7 @@ jobs: uses: actions/checkout@v3 with: repository: ${{ github.repository_owner }}/LN - ref: ${{ github.ref }} + ref: ${{ needs.release.outputs.internal_ref }} path: ${{ github.workspace }}/LN token: ${{ secrets.LNB_TOKEN }} @@ -149,11 +177,11 @@ jobs: run: | sudo rm -f ./build/CMakeCache.txt sudo rm -rf ./build/CMakeFiles - docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ steps.vars.outputs.branch_name }} - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} --mount ${{ steps.vars.outputs.mount_ln }} $docker_label "cmake -S /hpcc-dev/LN -B /hpcc-dev/HPCC-Platform/build -DHPCC_SOURCE_DIR=/hpcc-dev/HPCC-Platform ${{ steps.vars.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=OFF -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=OFF" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} --mount ${{ steps.vars.outputs.mount_ln }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} --mount ${{ steps.vars.outputs.mount_ln }} $docker_label "cmake -S /hpcc-dev/LN -B /hpcc-dev/HPCC-Platform/build -DHPCC_SOURCE_DIR=/hpcc-dev/HPCC-Platform ${{ steps.vars.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=OFF -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=ON" - docker run --rm --mount ${{ steps.vars.outputs.mount_platform }} --mount ${{ steps.vars.outputs.mount_ln }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker_label=${{ secrets.DOCKER_USERNAME }}/build-${{ matrix.os }}:${{ needs.release.outputs.community_tag }} + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} --mount ${{ needs.release.outputs.mount_ln }} $docker_label "cmake -S /hpcc-dev/LN -B /hpcc-dev/HPCC-Platform/build -DHPCC_SOURCE_DIR=/hpcc-dev/HPCC-Platform ${{ needs.release.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=OFF -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=OFF" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} --mount ${{ needs.release.outputs.mount_ln }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} --mount ${{ needs.release.outputs.mount_ln }} $docker_label "cmake -S /hpcc-dev/LN -B /hpcc-dev/HPCC-Platform/build -DHPCC_SOURCE_DIR=/hpcc-dev/HPCC-Platform ${{ needs.release.outputs.cmake_options }} -DINCLUDE_PLUGINS=ON -DCONTAINERIZED=OFF -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON -DSUPPRESS_SPARK=ON -DCPACK_STRIP_FILES=ON" + docker run --rm --mount ${{ needs.release.outputs.mount_platform }} --mount ${{ needs.release.outputs.mount_ln }} $docker_label "cmake --build /hpcc-dev/HPCC-Platform/build --parallel $(nproc) --target package" - name: Upload LN Assets if: ${{ matrix.ln }} @@ -201,18 +229,10 @@ jobs: echo ${{ matrix.os }} ${{ matrix.triplet }} echo "Checkout to $Env:GITHUB_WORKSPACE" - - name: Calculate vars - id: vars - shell: "bash" - run: | - community_ref=${{ github.ref }} - echo "community_ref=$community_ref" >> $GITHUB_OUTPUT - echo "internal_ref=$(echo $community_ref | sed 's/community/internal/')" >> $GITHUB_OUTPUT - - name: Print vars shell: "bash" run: | - echo "${{ toJSON(steps.vars.outputs) }})" + echo "${{ toJSON(needs.release.outputs) }})" - name: OSX Dependencies if: ${{ contains(matrix.os, 'macos') }} @@ -268,7 +288,7 @@ jobs: uses: actions/checkout@v3 with: repository: ${{ github.repository_owner }}/LN - ref: ${{ steps.vars.outputs.internal_ref }} + ref: ${{ needs.release.outputs.internal_ref }} path: ${{ github.workspace }}/LN token: ${{ secrets.LNB_TOKEN }} diff --git a/.github/workflows/build-containers-pr.yml b/.github/workflows/build-containers-pr.yml index d28fcf7a552..e8ec908bb27 100644 --- a/.github/workflows/build-containers-pr.yml +++ b/.github/workflows/build-containers-pr.yml @@ -36,7 +36,7 @@ jobs: - name: vars id: vars run: | - echo ::set-output name=base_ver::8.6 + echo ::set-output name=base_ver::2022.11.14-rc5 # echo ::set-output name=container_registry::ghcr.io # echo ::set-output name=cr_user::${{ github.repository_owner }} echo ::set-output name=container_registry::docker.io diff --git a/.github/workflows/build-containers-target-branch.yml b/.github/workflows/build-containers-target-branch.yml index 46e5afc1820..6ed02b9117d 100644 --- a/.github/workflows/build-containers-target-branch.yml +++ b/.github/workflows/build-containers-target-branch.yml @@ -36,7 +36,7 @@ jobs: - name: vars id: vars run: | - echo ::set-output name=base_ver::8.6 + echo ::set-output name=base_ver::2022.11.14-rc5 # echo ::set-output name=container_registry::ghcr.io # echo ::set-output name=cr_user::${{ github.repository_owner }} echo ::set-output name=container_registry::docker.io diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index 0717468ec88..e52f6b44cd8 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -1181,6 +1181,7 @@ protected: friend class CDistributedFile; SecAccessFlags getFilePermissions(const char *lname,IUserDescriptor *user,unsigned auditflags); SecAccessFlags getNodePermissions(const IpAddress &ip,IUserDescriptor *user,unsigned auditflags); SecAccessFlags getFDescPermissions(IFileDescriptor *,IUserDescriptor *user,unsigned auditflags=0); + SecAccessFlags getDropZoneScopePermissions(const char *dropZoneName,const char *dropZonePath,IUserDescriptor *user,unsigned auditflags=0); void setDefaultUser(IUserDescriptor *user); IUserDescriptor* queryDefaultUser(); @@ -11828,6 +11829,15 @@ SecAccessFlags CDistributedFileDirectory::getFDescPermissions(IFileDescriptor *f return retPerms; } +SecAccessFlags CDistributedFileDirectory::getDropZoneScopePermissions(const char *dropZoneName,const char *dropZonePath,IUserDescriptor *user,unsigned auditflags) +{ + CDfsLogicalFileName dlfn; + dlfn.setPlaneExternal(dropZoneName,dropZonePath); + StringBuffer scopes; + dlfn.getScopes(scopes); + return getScopePermissions(scopes,user,auditflags); +} + void CDistributedFileDirectory::setDefaultUser(IUserDescriptor *user) { if (user) diff --git a/dali/base/dadfs.hpp b/dali/base/dadfs.hpp index 127ae773e94..3f787293018 100644 --- a/dali/base/dadfs.hpp +++ b/dali/base/dadfs.hpp @@ -646,6 +646,7 @@ interface IDistributedFileDirectory: extends IInterface virtual IUserDescriptor* queryDefaultUser()=0; virtual SecAccessFlags getNodePermissions(const IpAddress &ip,IUserDescriptor *user,unsigned auditflags=0)=0; virtual SecAccessFlags getFDescPermissions(IFileDescriptor *,IUserDescriptor *user,unsigned auditflags=0)=0; + virtual SecAccessFlags getDropZoneScopePermissions(const char *dropZoneName,const char *dropZonePath,IUserDescriptor *user,unsigned auditflags=0)=0; virtual DistributedFileCompareResult fileCompare(const char *lfn1,const char *lfn2,DistributedFileCompareMode mode,StringBuffer &errstr,IUserDescriptor *user)=0; virtual bool filePhysicalVerify(const char *lfn1,IUserDescriptor *user,bool includecrc,StringBuffer &errstr)=0; diff --git a/dali/base/dautils.cpp b/dali/base/dautils.cpp index a9dea004e79..99b43ddd819 100644 --- a/dali/base/dautils.cpp +++ b/dali/base/dautils.cpp @@ -46,18 +46,20 @@ #define SDS_CONNECT_TIMEOUT (1000*60*60*2) // better than infinite #define MIN_REDIRECTION_LOAD_INTERVAL 1000 +static IPropertyTree *getPlaneHostGroup(IPropertyTree *plane) +{ + if (plane->hasProp("@hostGroup")) + return getHostGroup(plane->queryProp("@hostGroup"), true); + else if (plane->hasProp("hosts")) + return LINK(plane); // plane itself holds 'hosts' + return nullptr; +} bool isHostInPlane(IPropertyTree *plane, const char *host, bool ipMatch) { - Owned planeGroup; - if (plane->hasProp("@hostGroup")) - planeGroup.setown(getHostGroup(plane->queryProp("@hostGroup"), true)); - else - { - if (!plane->hasProp("hosts")) - return false; - planeGroup.set(plane); // plane itself holds 'hosts' - } + Owned planeGroup = getPlaneHostGroup(plane); + if (!planeGroup) + return false; Owned hostsIter = planeGroup->getElements("hosts"); SocketEndpoint hostEp; if (ipMatch) @@ -79,12 +81,8 @@ bool isHostInPlane(IPropertyTree *plane, const char *host, bool ipMatch) bool getPlaneHost(StringBuffer &host, IPropertyTree *plane, unsigned which) { - Owned hostGroup; - if (plane->hasProp("@hostGroup")) - hostGroup.setown(getHostGroup(plane->queryProp("@hostGroup"), true)); - else if (plane->hasProp("hosts")) - hostGroup.set(plane); // the plane holds the "hosts" - else + Owned hostGroup = getPlaneHostGroup(plane); + if (!hostGroup) return false; if (which >= hostGroup->getCount("hosts")) @@ -94,6 +92,17 @@ bool getPlaneHost(StringBuffer &host, IPropertyTree *plane, unsigned which) return true; } +void getPlaneHosts(StringArray &hosts, IPropertyTree *plane) +{ + Owned hostGroup = getPlaneHostGroup(plane); + if (hostGroup) + { + Owned hostsIter = hostGroup->getElements("hosts"); + ForEach (*hostsIter) + hosts.append(hostsIter->query().queryProp(nullptr)); + } +} + constexpr const char * lz_plane_path = "storage/planes[@category='lz']"; IPropertyTreeIterator * getDropZonePlanesIterator(const char * name) diff --git a/dali/base/dautils.hpp b/dali/base/dautils.hpp index 75e850b48e4..d3a25edf177 100644 --- a/dali/base/dautils.hpp +++ b/dali/base/dautils.hpp @@ -545,6 +545,7 @@ extern da_decl IPropertyTree * getDropZonePlane(const char * name); extern da_decl IPropertyTree * findDropZonePlane(const char * path, const char * host, bool ipMatch); extern da_decl bool isHostInPlane(IPropertyTree *plane, const char *host, bool ipMatch); extern da_decl bool getPlaneHost(StringBuffer &host, IPropertyTree *plane, unsigned which); +extern da_decl void getPlaneHosts(StringArray &hosts, IPropertyTree *plane); extern da_decl void setPageCacheTimeoutMilliSeconds(unsigned timeoutSeconds); extern da_decl void setMaxPageCacheItems(unsigned _maxPageCacheItems); extern da_decl IRemoteConnection* connectXPathOrFile(const char* path, bool safe, StringBuffer& xpath); diff --git a/dockerfiles/buildall-common.sh b/dockerfiles/buildall-common.sh index 42b3f9300ac..8a64695faf6 100755 --- a/dockerfiles/buildall-common.sh +++ b/dockerfiles/buildall-common.sh @@ -20,7 +20,7 @@ # Build script to create and publish Docker containers corresponding to a GitHub tag # This script is normally invoked via GitHub actions, whenever a new tag is pushed -BASE_VER=8.6 # The docker hub label for the platform-build-base image. Changes rarely. +BASE_VER=2022.11.14-rc5 # The docker hub label for the platform-build-base image. Changes rarely. BUILD_TAG=$(git describe --exact-match --tags || true) # The git tag for the images we are building BUILD_LABEL=${BUILD_TAG} # The docker hub label for all other components BUILD_USER=hpcc-systems # The github repo owner diff --git a/dockerfiles/platform-build-base/Dockerfile b/dockerfiles/platform-build-base/Dockerfile index 43a664fe4c8..2ff69fe7284 100644 --- a/dockerfiles/platform-build-base/Dockerfile +++ b/dockerfiles/platform-build-base/Dockerfile @@ -17,6 +17,8 @@ # Build container image containing all 3rd-party tools required to build HPCC platform +# DEPRECATED --- DEPRECATED --- DEPRECATED + FROM ubuntu:20.04 as build ENV DEBIAN_FRONTEND=noninteractive ARG BASE_VER diff --git a/dockerfiles/platform-build/Dockerfile b/dockerfiles/platform-build/Dockerfile index 8aba8d0cf29..945693449f2 100644 --- a/dockerfiles/platform-build/Dockerfile +++ b/dockerfiles/platform-build/Dockerfile @@ -17,7 +17,7 @@ # Base container image that builds all HPCC platform components -ARG BASE_VER=8.12-rc2 +ARG BASE_VER=2022.11.14-rc5 ARG CR_USER=hpccsystems ARG CR_REPO=docker.io ARG CR_CONTAINER_NAME=platform-build-base @@ -38,7 +38,7 @@ RUN apt-get install -y dirmngr gnupg apt-transport-https ca-certificates softwar lsb-release \ jq -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF RUN groupadd -g 10001 hpcc RUN useradd -s /bin/bash -r -m -N -c "hpcc runtime User" -u 10000 -g hpcc hpcc diff --git a/dockerfiles/vcpkg/amazonlinux.dockerfile b/dockerfiles/vcpkg/amazonlinux.dockerfile index ee40387a718..29034fa165a 100644 --- a/dockerfiles/vcpkg/amazonlinux.dockerfile +++ b/dockerfiles/vcpkg/amazonlinux.dockerfile @@ -1,5 +1,5 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-amazonlinux:$VCPKG_REF +FROM hpccsystems/platform-build-base-amazonlinux:$VCPKG_REF RUN amazon-linux-extras install java-openjdk11 && yum install -y \ java-11-openjdk-devel \ diff --git a/dockerfiles/vcpkg/build.sh b/dockerfiles/vcpkg/build.sh index 5f2caddc2ef..43303461e90 100755 --- a/dockerfiles/vcpkg/build.sh +++ b/dockerfiles/vcpkg/build.sh @@ -22,25 +22,27 @@ echo "VCPKG_REF: $VCPKG_REF" echo "DOCKER_USERNAME: $DOCKER_USERNAME" echo "DOCKER_PASSWORD: $DOCKER_PASSWORD" -docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD +# docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD function doBuild() { docker build --progress plain --pull --rm -f "$SCRIPT_DIR/$1.dockerfile" \ -t build-$1:$GITHUB_REF \ -t build-$1:latest \ + --build-arg DOCKER_NAMESPACE=$DOCKER_USERNAME \ --build-arg VCPKG_REF=$VCPKG_REF \ "$SCRIPT_DIR/." docker run --rm --mount source="$(pwd)",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached build-$1:$GITHUB_REF \ - "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build-$1 \${CMAKE_OPTIONS}" - + "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/HPCC-Platform/build-$1 ${CMAKE_OPTIONS}" docker run --rm --mount source="$(pwd)",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached build-$1:$GITHUB_REF \ - "cmake --build \${BUILD_FOLDER} --parallel $(nproc)" + "cmake --build /hpcc-dev/HPCC-Platform/build-$1 --parallel $(nproc)" # docker run -it --mount source="$(pwd)",target=/hpcc-dev/HPCC-Platform,type=bind,consistency=cached build-ubuntu-22.04:latest bash } -CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DVCPKG_FILES_DIR=/hpcc-dev -DCPACK_THREADS=0 -DUSE_OPTIONAL=OFF -DINCLUDE_PLUGINS=ON -DSUPPRESS_REMBED=ON -DSUPPRESS_V8EMBED=ON" +CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DVCPKG_FILES_DIR=/hpcc-dev -DCPACK_THREADS=0 -DUSE_OPTIONAL=OFF -DINCLUDE_PLUGINS=ON -DSUPPRESS_V8EMBED=ON" + +doBuild amazonlinux doBuild ubuntu-22.04 doBuild ubuntu-20.04 doBuild ubuntu-18.04 diff --git a/dockerfiles/vcpkg/centos-7.dockerfile b/dockerfiles/vcpkg/centos-7.dockerfile index b728a366d62..83791b07498 100644 --- a/dockerfiles/vcpkg/centos-7.dockerfile +++ b/dockerfiles/vcpkg/centos-7.dockerfile @@ -1,9 +1,20 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-centos-7:$VCPKG_REF +FROM hpccsystems/platform-build-base-centos-7:$VCPKG_REF RUN yum install -y \ java-11-openjdk-devel \ - python3-devel + python3-devel \ + wget \ + epel-release +RUN yum update -y && yum install -y R-core-devel + +ENV Rcpp_package=Rcpp_0.12.19.tar.gz +ENV RInside_package=RInside_0.2.12.tar.gz + +RUN wget https://cran.r-project.org/src/contrib/Archive/Rcpp/${Rcpp_package} +RUN wget https://cran.r-project.org/src/contrib/Archive/RInside/${RInside_package} +RUN R CMD INSTALL ${Rcpp_package} ${RInside_package} +RUN rm -f ${Rcpp_package} ${RInside_package} WORKDIR /hpcc-dev diff --git a/dockerfiles/vcpkg/centos-8.dockerfile b/dockerfiles/vcpkg/centos-8.dockerfile index d0c75fed4e9..1e22c7c3466 100644 --- a/dockerfiles/vcpkg/centos-8.dockerfile +++ b/dockerfiles/vcpkg/centos-8.dockerfile @@ -1,9 +1,14 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-centos-8:$VCPKG_REF +FROM hpccsystems/platform-build-base-centos-8:$VCPKG_REF RUN yum remove -y java-1.* && yum install -y \ java-11-openjdk-devel \ - python3-devel + python3-devel \ + epel-release +RUN yum install -y \ + R-core-devel \ + R-Rcpp-devel \ + R-RInside-devel WORKDIR /hpcc-dev diff --git a/dockerfiles/vcpkg/ubuntu-18.04.dockerfile b/dockerfiles/vcpkg/ubuntu-18.04.dockerfile index 4a53639bf13..a9695dc1e2f 100644 --- a/dockerfiles/vcpkg/ubuntu-18.04.dockerfile +++ b/dockerfiles/vcpkg/ubuntu-18.04.dockerfile @@ -1,9 +1,13 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-ubuntu-18.04:$VCPKG_REF +FROM hpccsystems/platform-build-base-ubuntu-18.04:$VCPKG_REF RUN apt-get update && apt-get install --no-install-recommends -y \ default-jdk \ - python3-dev + python3-dev \ + r-base \ + r-cran-rcpp \ + r-cran-rinside \ + r-cran-inline WORKDIR /hpcc-dev diff --git a/dockerfiles/vcpkg/ubuntu-20.04.dockerfile b/dockerfiles/vcpkg/ubuntu-20.04.dockerfile index e69381ea94e..7211cccebde 100644 --- a/dockerfiles/vcpkg/ubuntu-20.04.dockerfile +++ b/dockerfiles/vcpkg/ubuntu-20.04.dockerfile @@ -1,9 +1,17 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-ubuntu-20.04:$VCPKG_REF +FROM hpccsystems/platform-build-base-ubuntu-20.04:$VCPKG_REF + +ENV RInside_package=RInside_0.2.14.tar.gz RUN apt-get update && apt-get install --no-install-recommends -y \ default-jdk \ - python3-dev + python3-dev \ + wget \ + r-base \ + r-cran-rcpp +RUN wget https://cran.r-project.org/src/contrib/Archive/RInside/${RInside_package} +RUN R CMD INSTALL ${RInside_package} +RUN rm -f ${RInside_package} WORKDIR /hpcc-dev diff --git a/dockerfiles/vcpkg/ubuntu-22.04.dockerfile b/dockerfiles/vcpkg/ubuntu-22.04.dockerfile index c3a0019edb4..1b279204680 100644 --- a/dockerfiles/vcpkg/ubuntu-22.04.dockerfile +++ b/dockerfiles/vcpkg/ubuntu-22.04.dockerfile @@ -1,9 +1,13 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-ubuntu-22.04:$VCPKG_REF +FROM hpccsystems/platform-build-base-ubuntu-22.04:$VCPKG_REF RUN apt-get update && apt-get install --no-install-recommends -y \ default-jdk \ - python3-dev + python3-dev \ + r-base \ + r-cran-rcpp \ + r-cran-rinside \ + r-cran-inline WORKDIR /hpcc-dev diff --git a/dockerfiles/vcpkg/ubuntu-22.10.dockerfile b/dockerfiles/vcpkg/ubuntu-22.10.dockerfile index b558655ffd6..4d9bacc56c6 100644 --- a/dockerfiles/vcpkg/ubuntu-22.10.dockerfile +++ b/dockerfiles/vcpkg/ubuntu-22.10.dockerfile @@ -1,5 +1,5 @@ ARG VCPKG_REF=latest -FROM hpccbuilds/vcpkg-ubuntu-22.10:$VCPKG_REF +FROM hpccsystems/platform-build-base-ubuntu-22.10:$VCPKG_REF RUN apt-get update && apt-get install --no-install-recommends -y \ default-jdk \ diff --git a/esp/scm/ws_access.ecm b/esp/scm/ws_access.ecm index 0a801a8deb5..da34ed9dd5d 100644 --- a/esp/scm/ws_access.ecm +++ b/esp/scm/ws_access.ecm @@ -959,6 +959,21 @@ ESPresponse AccountPermissionsResponse [min_ver("1.03")] ESParray GroupPermissions; }; +ESPrequest AccountPermissionsV2Request +{ + string ResourceName; + string AccountName; + bool IsGroup; + bool IncludeGroup(false); +}; + +ESPresponse AccountPermissionsV2Response +{ + ESParray BasednNames; + ESParray Permissions; + ESParray GroupPermissions; +}; + ESPrequest [nil_remove] FilePermissionRequest { string FileName; @@ -991,7 +1006,7 @@ ESPresponse [nil_remove] UserAccountExportResponse [http_content("application/octet-stream")] binary Result; }; -ESPservice [version("1.16"), auth_feature("NONE"), exceptions_inline("./smc_xslt/exceptions.xslt")] ws_access +ESPservice [version("1.17"), auth_feature("NONE"), exceptions_inline("./smc_xslt/exceptions.xslt")] ws_access { ESPmethod [client_xslt("/esp/xslt/access_users.xslt")] Users(UserRequest, UserResponse); ESPmethod [client_xslt("/esp/xslt/access_useredit.xslt")] UserEdit(UserEditRequest, UserEditResponse); @@ -1034,6 +1049,7 @@ ESPservice [version("1.16"), auth_feature("NONE"), exceptions_inline("./smc_xslt ESPmethod [depr_ver("1.14"), client_xslt("/esp/xslt/access_permissionaddinput.xslt")] PermissionAddInput(PermissionAddRequest, PermissionAddResponse); ESPmethod [client_xslt("/esp/xslt/access_permissionchange.xslt")] PermissionAction(PermissionActionRequest, PermissionActionResponse); ESPmethod [client_xslt("/esp/xslt/access_accountpermissions.xslt")] AccountPermissions(AccountPermissionsRequest, AccountPermissionsResponse); + ESPmethod [min_ver("1.17")] AccountPermissionsV2(AccountPermissionsV2Request, AccountPermissionsV2Response); ESPmethod [client_xslt("/esp/xslt/access_filepermission.xslt")] FilePermission(FilePermissionRequest, FilePermissionResponse); ESPmethod [depr_ver("1.14"), client_xslt("/esp/xslt/access_permissionresetinput.xslt")] PermissionsResetInput(PermissionsResetInputRequest, PermissionsResetInputResponse); ESPmethod [client_xslt("/esp/xslt/access_permissionsreset.xslt")] PermissionsReset(PermissionsResetRequest, PermissionsResetResponse); diff --git a/esp/scm/ws_fs.ecm b/esp/scm/ws_fs.ecm index 5b1d9bd7645..6b4c9911482 100644 --- a/esp/scm/ws_fs.ecm +++ b/esp/scm/ws_fs.ecm @@ -534,6 +534,7 @@ ESPresponse [exceptions_inline] DFUWUFileResponse ESPrequest FileListRequest { + [min_ver("1.24")] string DropZoneName; string Netaddr; string Path; string Mask; @@ -696,7 +697,7 @@ ESPresponse [exceptions_inline, nil_remove] GetDFUServerQueuesResponse ESPservice [ auth_feature("DEFERRED"), - version("1.23"), + version("1.24"), exceptions_inline("./smc_xslt/exceptions.xslt")] FileSpray { ESPmethod EchoDateTime(EchoDateTime, EchoDateTimeResponse); diff --git a/esp/services/ws_access/ws_accessService.cpp b/esp/services/ws_access/ws_accessService.cpp index 32d9959a24a..7d3ad43b66d 100644 --- a/esp/services/ws_access/ws_accessService.cpp +++ b/esp/services/ws_access/ws_accessService.cpp @@ -37,6 +37,22 @@ #define MAX_RESOURCES_DISPLAY 3000 static const long MAXXLSTRANSFER = 5000000; +SecResourceType str2RType(const char* str) +{ + if (isEmptyString(str)) + return RT_DEFAULT; + else if (strieq(str, "module")) + return RT_MODULE; + else if (strieq(str, "service")) + return RT_SERVICE; + else if (strieq(str, "file")) + return RT_FILE_SCOPE; + else if (strieq(str, "workunit")) + return RT_WORKUNIT_SCOPE; + else + return RT_DEFAULT; +} + void Cws_accessEx::checkUser(IEspContext& context, CLdapSecManager* secmgr, const char* rtype, const char* rtitle, unsigned int SecAccessFlags) { if (secmgr == nullptr) @@ -4009,6 +4025,462 @@ bool Cws_accessEx::onAccountPermissions(IEspContext &context, IEspAccountPermiss return true; } +//List permissions for a given account in a given BaseDN resource or all BaseDN resources. +//Revised based on onAccountPermissions() which lists permissions for a given account in all BaseDN resources. +bool Cws_accessEx::onAccountPermissionsV2(IEspContext &context, IEspAccountPermissionsV2Request &req, + IEspAccountPermissionsV2Response &resp) +{ + class CAccountsInResource : public CInterface + { + StringAttr resourceName; + StringArray accountNames; + public: + CAccountsInResource(const char *_resourceName) : resourceName(_resourceName) {} + + inline StringArray &getAccountNames() { return accountNames; }; + inline void addUniqueAccountName(const char *name) { accountNames.appendUniq(name); }; + inline bool findAccountName(const char *name) { return accountNames.find(name) != NotFound; } + }; + + class CAccountsInBaseDN : public CInterface + { + StringAttr baseDNName; + CIArrayOf accountsInResources; + public: + CAccountsInBaseDN(const char *_baseDNName) : baseDNName(_baseDNName) {}; + + inline const char *getBaseDNName() { return baseDNName.get(); }; + inline CIArrayOf &getAccountsInResources() { return accountsInResources; }; + }; + + class CAccountPermissionsHelper : public CSimpleInterface + { + IEspContext *context = nullptr; + CLdapSecManager *secMGR = nullptr; + + StringBuffer accountNameReq; + StringAttr baseDNNameReq; + bool isGroupAccountReq = false; + bool includeGroup = false; + + StringArray groupsAccountBelongsTo; + StringAttr moduleBaseDN; //Used by appendAccountPermissionsForCodeGenResource() + CIArrayOf accountsInBaseDNs; //Used by setBaseDNNamesForMissingPermissions(). + bool hasAuthUsersPerm = false; //May change in appendAccountPermission() + bool hasEveryonePerm = false; //May change in appendAccountPermission() + Owned authUsersGroupPermission, everyOneGroupPermission; + IArrayOf resourcesInOneBaseDN; + + bool getResourcePermissions(const char *baseDN, SecResourceType rType, + const char *resourceName, IArrayOf &permissions) + { + bool success = true; + try + { + secMGR->getPermissionsArray(baseDN, rType, resourceName, permissions); + } + catch(IException *e) //exception may be thrown when no permission for the resource + { + e->Release(); + success = false; + } + return success; + } + void readAccountPermissionsInOneBaseDN(IArrayOf &allBaseDNs, + IEspDnStruct &curBaseDN, IArrayOf &accountPermissions, + IArrayOf &groupAccountPermissions) + { + const char *baseDNName = curBaseDN.getName(); + const char *baseDN = curBaseDN.getBasedn(); + const char *rTypeStr = curBaseDN.getRtype(); + SecResourceType rType = str2RType(rTypeStr); + Owned accountsInBaseDN = new CAccountsInBaseDN(baseDNName); + + //Read the resources for the BaseDN Resource. + if (secMGR->getResources(rType, baseDN, resourcesInOneBaseDN)) + { + ForEachItemIn(i, resourcesInOneBaseDN) + { + ISecResource &r = resourcesInOneBaseDN.item(i); + const char *resourceName = r.getName(); + if (isEmptyString(resourceName)) + continue; + + //Use the same code as in onAccountPermissions() to skip some RT_MODULE resources. + //The permission codegenerator.cpp is saved as a service permission (not a module permission) + //when it is added for a user. + if ((rType == RT_MODULE) && (strieq(resourceName, "codegenerator.cpp") || strnicmp(resourceName, "repository", 10))) + continue; + + IArrayOf permissions; + if (getResourcePermissions(baseDN, rType, resourceName, permissions)) //get the permissions for this resource using secMGR->getPermissionsArray() + { + checkAndAppendAccountPermissions(baseDNName, resourceName, permissions, accountPermissions, groupAccountPermissions); + appendAccountsInResources(resourceName, permissions, accountsInBaseDN->getAccountsInResources()); + } + } + }//If failed, log? + + if (rType == RT_WORKUNIT_SCOPE) + appendAccountPermissionsForWUScopeResource(baseDNName, baseDN, accountPermissions, groupAccountPermissions); + else if ((rType == RT_SERVICE) && !moduleBaseDN.isEmpty()) + appendAccountPermissionsForCodeGenResource(baseDNName, moduleBaseDN, accountPermissions, groupAccountPermissions); + + resourcesInOneBaseDN.kill(); //Clean it for possible next BaseDN. + accountsInBaseDNs.append(*accountsInBaseDN.getClear()); + } + void checkAndAppendAccountPermissions(const char *baseDNName, const char *resourceName, + IArrayOf &permissions, IArrayOf &accountPermissions, + IArrayOf &groupAccountPermissions) + { + ForEachItemIn(i, permissions) + { + CPermission &perm = permissions.item(i); + if (doesPermissionAccountMatchThisAccount(perm)) + { //The account in the perm matches with this account. The match means: 1. both accounts + //have the same account name; or 2. this account belongs to a group and the name of the + //group account is the same as the account in the perm. Create an IEspAccountPermission + //using the resourceName and the perm and add it to the permission group where the + //permission belongs to (accountPermissions, authUsersPermissions, etc). + Owned newPermission = createNewAccountPermission(baseDNName, resourceName, perm); + appendAccountPermission(newPermission, perm, accountPermissions, groupAccountPermissions); + } + } + } + bool doesPermissionAccountMatchThisAccount(CPermission &perm) + { + int accountType = perm.getAccount_type(); + if (isGroupAccountReq && accountType == USER_ACT) + return false; //The account in the perm is not a group account. + + const char *actName = perm.getAccount_name(); + if (isEmptyString(actName)) + return false; + + //If the accountType matches with isGroupAccountReq, validate the actName. + if ((!isGroupAccountReq && (accountType == USER_ACT)) || (isGroupAccountReq && (accountType == GROUP_ACT))) + return streq(actName, accountNameReq); //The actName must match with the accountNameReq. + + //Now, there is only one possibility left: isGroupAccountReq = false and accountType = GROUP_ACT. + //isGroupAccountReq = false: the AccountPermissionsForResource call is for an individual account. + //accountType = GROUP_ACT: the perm is for a group account; actName is the group name. + //We need to check whether the individual is a member of this group. + return groupsAccountBelongsTo.find(actName) != NotFound; + } + IEspAccountPermission *createNewAccountPermission(const char *baseDNName, + const char *resourceName, CPermission &perm) + { + //Use the same code as in onAccountPermissions(). + Owned permission = createAccountPermission(); + permission->setBasednName(baseDNName); + permission->setResourceName(resourceName); + + int allows = perm.getAllows(); + int denies = perm.getDenies(); + if((allows & NewSecAccess_Access) == NewSecAccess_Access) + permission->setAllow_access(true); + if((allows & NewSecAccess_Read) == NewSecAccess_Read) + permission->setAllow_read(true); + if((allows & NewSecAccess_Write) == NewSecAccess_Write) + permission->setAllow_write(true); + if((allows & NewSecAccess_Full) == NewSecAccess_Full) + permission->setAllow_full(true); + if((denies & NewSecAccess_Access) == NewSecAccess_Access) + permission->setDeny_access(true); + if((denies & NewSecAccess_Read) == NewSecAccess_Read) + permission->setDeny_read(true); + if((denies & NewSecAccess_Write) == NewSecAccess_Write) + permission->setDeny_write(true); + if((denies & NewSecAccess_Full) == NewSecAccess_Full) + permission->setDeny_full(true); + return permission.getClear(); + } + void appendAccountPermission(IEspAccountPermission *permissionToBeAppended, + CPermission &perm, IArrayOf &accountPermissions, + IArrayOf &groupAccountPermissions) + { + //Use similar logic as in onAccountPermissions(). + //Append the Account Permission (permissionToBeAppended) to accountPermissions, groupAccountPermissions, + //authUsersPermissions, or everyonePermissions. + const char *actName = perm.getAccount_name(); + int accountType = perm.getAccount_type(); + if ((!isGroupAccountReq && accountType == USER_ACT) || (isGroupAccountReq && accountType == GROUP_ACT)) + { + //Append the Account Permission to accountPermissions if: a. the requested account is not a group account + //and this perm is not for a group account; or b. the requested account is a group account and this perm is + //for a group account + accountPermissions.append(*LINK(permissionToBeAppended)); + return; + } + + if (streq(actName, "Authenticated Users")) + { + //Append the Account Permission to authUsersPermissions if this perm is for Authenticated Users. + IArrayOf& authUsersPermissions = authUsersGroupPermission->getPermissions(); + authUsersPermissions.append(*LINK(permissionToBeAppended)); + hasAuthUsersPerm = true; + return; + } + + if (streq(actName, "everyone")) + { + //Append the Account Permission to everyonePermissions if this perm is for everyone. + IArrayOf& everyonePermissions = everyOneGroupPermission->getPermissions(); + everyonePermissions.append(*LINK(permissionToBeAppended)); + hasEveryonePerm = true; + return; + } + + ForEachItemIn(i, groupAccountPermissions) + { + IEspGroupAccountPermission &groupPermission = groupAccountPermissions.item(i); + if (!streq(actName, groupPermission.getGroupName())) + continue; + + //This perm is for a group account which is already in the groupPermission. + //Append the Account Permission into the groupPermission. + IArrayOf &permissions = groupPermission.getPermissions(); + permissions.append(*LINK(permissionToBeAppended)); + return; + } + + //This perm is for a group account which is not in the groupAccountPermissions yet. + //Create a groupPermission. Append the Account Permission into the groupPermission. + //Append the groupPermission to the groupAccountPermissions. + Owned groupPermission = createGroupAccountPermissionEx(actName); + IArrayOf &permissions = groupPermission->getPermissions(); + permissions.append(*LINK(permissionToBeAppended)); + groupAccountPermissions.append(*groupPermission.getLink()); + } + IEspGroupAccountPermission *createGroupAccountPermissionEx(const char *accountName) + { + Owned groupPermission = createGroupAccountPermission(); + groupPermission->setGroupName(accountName); + return groupPermission.getClear(); + } + void appendAccountPermissionsForWUScopeResource(const char *baseDNName, const char *baseDN, + IArrayOf &accountPermissions, + IArrayOf &groupAccountPermissions) + { + //Use the same code as in onAccountPermissions() to find out the deftBaseDN and deftName. + StringBuffer deftBaseDN, deftName; + const char *comma = strchr(baseDN, ','); + const char *eqsign = strchr(baseDN, '='); + if (eqsign != nullptr) + { + if(comma == nullptr) + deftName.append(eqsign + 1); + else + { + deftName.append(comma - eqsign - 1, eqsign + 1); + deftBaseDN.append(comma + 1); + } + } + + //Based on the code in LdapUtils::normalizeDn(), the deftBaseDN can be empty. + if (deftName.isEmpty()) + return; + + IArrayOf permissions; + if (getResourcePermissions(deftBaseDN, RT_WORKUNIT_SCOPE, deftName, permissions)) + checkAndAppendAccountPermissions(baseDNName, deftName, permissions, accountPermissions, groupAccountPermissions); + } + void getModuleBaseDN(IArrayOf &allBaseDNs, StringAttr &moduleBaseDN) + { + //Use the same code as in onAccountPermissions() to find out the moduleBaseDN. + ForEachItemIn(i, allBaseDNs) + { + IEspDnStruct &dn = allBaseDNs.item(i); + const char *aName = dn.getName(); + const char *aBaseDN = dn.getBasedn(); + const char *aRType = dn.getRtype(); + const char *aRtitle = dn.getRtitle(); + if (!isEmptyString(aName) && !isEmptyString(aBaseDN) && !isEmptyString(aRtitle) && + !isEmptyString(aRType) && strieq(aRType, "module")) + { + moduleBaseDN.set(aBaseDN); + break; + } + } + } + void appendAccountPermissionsForCodeGenResource(const char *baseDNName, const char *moduleBaseDN, + IArrayOf &accountPermissions, IArrayOf &groupAccountPermissions) + { + IArrayOf permissions; + if (getResourcePermissions(moduleBaseDN, RT_SERVICE, "codegenerator.cpp", permissions)) + checkAndAppendAccountPermissions(baseDNName, "codegenerator.cpp", permissions, accountPermissions, groupAccountPermissions); + } + //Collect the names of the accounts which have permissions in the resources of a BaseDN. + void appendAccountsInResources(const char *resourceName, IArrayOf &permissions, + CIArrayOf &accountsInResources) + { + Owned accountsInResource = new CAccountsInResource(resourceName); + + ForEachItemIn(i, permissions) + { + CPermission &perm = permissions.item(i); + const char *accountName = perm.getAccount_name(); + int accountType = perm.getAccount_type(); + if (isEmptyString(accountName)) + continue; + + StringBuffer accountNameEx; + if (GROUP_ACT == accountType) + accountNameEx.append("G|"); + accountNameEx.append(accountName); + accountsInResource->addUniqueAccountName(accountNameEx); + } + accountsInResources.append(*accountsInResource.getClear()); + } + //Similar to onAccountPermissions(): + //For the account stored in the accountNameReq and related group accounts, loop + //through every resources in every BaseDNs. For each BaseDN, if the account is + //not set for one of its resources, add the BaseDN name to a BaseDN list of this + //account. A caller may use the list to enable the Add Permision functions for + //the BaseDN. + void setBaseDNNamesForMissingPermissions(IEspAccountPermissionsV2Response &resp, + IArrayOf &groupAccountPermissions) + { + StringArray missingPermissionBasednNames; + getBaseDNNamesForAccountMissingPermissions(accountNameReq, isGroupAccountReq, missingPermissionBasednNames); + if (missingPermissionBasednNames.length() > 0) + resp.setBasednNames(missingPermissionBasednNames); + + ForEachItemIn(i, groupAccountPermissions) + { + IEspGroupAccountPermission &groupPermission = groupAccountPermissions.item(i); + + StringArray basednNames; + getBaseDNNamesForAccountMissingPermissions(groupPermission.getGroupName(), 1, basednNames); + if (basednNames.length() > 0) + groupPermission.setBasednNames(basednNames); + } + } + //For the account stored in the accountName, loop through every resources in every BaseDNs. + //For each BaseDN, if the account is not in one of its resources, add the BaseDN name to the basednNames. + void getBaseDNNamesForAccountMissingPermissions(const char *accountName, bool isGroup, + StringArray &basednNames) + { + StringBuffer accountNameEx; + if (isGroup) + accountNameEx.append("G|"); + accountNameEx.append(accountName); + + //There may be multiple accounts already in each BaseDN. + ForEachItemIn(i, accountsInBaseDNs) + { //for accounts in one BaseDN: + CAccountsInBaseDN &accountsInBaseDN = accountsInBaseDNs.item(i); + //One BaseDN may have multiple resources. + CIArrayOf &accountsInResources = accountsInBaseDN.getAccountsInResources(); + ForEachItemIn(k, accountsInResources) + { //for accounts in one resource winthin BaseDN: + CAccountsInResource &accountsInResource = accountsInResources.item(k); + if (!accountsInResource.findAccountName(accountNameEx)) + { + //Not find the account in this resource. Add the BaseDN name to the basednNames. + basednNames.append(accountsInBaseDN.getBaseDNName()); + break; + } + } + } + } + + public: + CAccountPermissionsHelper(IEspContext *ctx, CLdapSecManager *secmgr) : context(ctx), secMGR(secmgr) { } + + void readReq(IEspAccountPermissionsV2Request &req, const char *accountReq, const char *userID) + { + baseDNNameReq.set(req.getResourceName()); + + isGroupAccountReq = req.getIsGroup(); + if (!isEmptyString(accountReq)) + accountNameReq.set(accountReq); + else + {//send back the permissions for the current user. + accountNameReq.set(userID); + isGroupAccountReq = false; + } + + includeGroup = req.getIncludeGroup(); + if (!isGroupAccountReq && includeGroup) + secMGR->getGroups(accountNameReq, groupsAccountBelongsTo); + groupsAccountBelongsTo.append("Authenticated Users"); + groupsAccountBelongsTo.append("everyone"); + } + + void getAccountPermissions(IArrayOf &allBaseDNs, IEspAccountPermissionsV2Response &resp) + { + //accountPermissions: the permissions for the requested account (accountNameReq). The account + //could be a group account or a personal account. + //groupAccountPermissions: the permissions for group accounts which are not in the accountPermissions, + //the authUsersPermissions and the everyonePermissions. + IArrayOf accountPermissions; + IArrayOf groupAccountPermissions; + + //"Authenticated Users" and "Everyone" are default user groups. Create the permission containers for those default groups. + //The permission containers for other groups are created in appendAccountPermission() when needed. + authUsersGroupPermission.setown(createGroupAccountPermissionEx("Authenticated Users")); + everyOneGroupPermission.setown(createGroupAccountPermissionEx("Everyone")); + + getModuleBaseDN(allBaseDNs, moduleBaseDN); + ForEachItemIn(i, allBaseDNs) + { + IEspDnStruct& curBaseDN = allBaseDNs.item(i); + if (baseDNNameReq.isEmpty()) //Get account permissions for all BaseDNs. + readAccountPermissionsInOneBaseDN(allBaseDNs, curBaseDN, accountPermissions, groupAccountPermissions); + else if (strieq(curBaseDN.getName(), baseDNNameReq.get())) + { + readAccountPermissionsInOneBaseDN(allBaseDNs, curBaseDN, accountPermissions, groupAccountPermissions); + break; + } + } + + if (hasAuthUsersPerm) + groupAccountPermissions.append(*authUsersGroupPermission.getLink()); + + if (hasEveryonePerm) + groupAccountPermissions.append(*everyOneGroupPermission.getLink()); + + setBaseDNNamesForMissingPermissions(resp, groupAccountPermissions); + + if (groupAccountPermissions.length() > 0) + resp.setGroupPermissions(groupAccountPermissions); + + if (accountPermissions.length() > 0) + resp.setPermissions(accountPermissions); + } + }; + + try + { + CLdapSecManager *secMGR = queryLDAPSecurityManager(context); + if (!secMGR) + throw makeStringException(ECLWATCH_INVALID_SEC_MANAGER, MSG_SEC_MANAGER_IS_NULL); + + //Check user and access + StringBuffer userID; + context.getUserID(userID); + if (userID.isEmpty()) + throw makeStringException(ECLWATCH_INVALID_INPUT, "Could not get user ID."); + + const char *accountName = req.getAccountName(); + if (!isEmptyString(accountName) && !streq(accountName, userID.str())) + checkUser(context, secMGR); + + //Make sure BaseDN settings loaded + setBasedns(context); + + CAccountPermissionsHelper helper(&context, secMGR); + helper.readReq(req, accountName, userID); + helper.getAccountPermissions(m_basedns, resp); + } + catch(IException *e) + { + FORWARDEXCEPTION(context, e, ECLWATCH_INTERNAL_ERROR); + } + return true; +} + bool Cws_accessEx::onFilePermission(IEspContext &context, IEspFilePermissionRequest &req, IEspFilePermissionResponse &resp) { try diff --git a/esp/services/ws_access/ws_accessService.hpp b/esp/services/ws_access/ws_accessService.hpp index f67f826218c..7909259d10f 100644 --- a/esp/services/ws_access/ws_accessService.hpp +++ b/esp/services/ws_access/ws_accessService.hpp @@ -92,6 +92,7 @@ class Cws_accessEx : public Cws_access const char* getPasswordExpiration(ISecUser *usr, StringBuffer &passwordExpiration); void checkUser(IEspContext &context, CLdapSecManager *ldapSecMgr, const char *rtype = nullptr, const char *rtitle = nullptr, unsigned int SecAccessFlags = SecAccess_Full); CLdapSecManager* queryLDAPSecurityManagerAndCheckUser(IEspContext &context, const char *rtype = nullptr, const char *rtitle = nullptr, unsigned int SecAccessFlags = SecAccess_Full); + void createResourceArrayForResources(const char *baseDN, SecResourceType rType, IArrayOf &resources, IArrayOf &resourceArray); public: IMPLEMENT_IINTERFACE; @@ -144,6 +145,7 @@ class Cws_accessEx : public Cws_access virtual bool onUserSudoersInput(IEspContext &context, IEspUserSudoersInputRequest &req, IEspUserSudoersInputResponse &resp); virtual bool onUserSudoers(IEspContext &context, IEspUserSudoersRequest &req, IEspUserSudoersResponse &resp); virtual bool onAccountPermissions(IEspContext &context, IEspAccountPermissionsRequest &req, IEspAccountPermissionsResponse &resp); + virtual bool onAccountPermissionsV2(IEspContext &context, IEspAccountPermissionsV2Request &req, IEspAccountPermissionsV2Response &resp); virtual bool onFilePermission(IEspContext &context, IEspFilePermissionRequest &req, IEspFilePermissionResponse &resp); virtual bool onPermissionsResetInput(IEspContext &context, IEspPermissionsResetInputRequest &req, IEspPermissionsResetInputResponse &resp); virtual bool onPermissionsReset(IEspContext &context, IEspPermissionsResetRequest &req, IEspPermissionsResetResponse &resp); diff --git a/esp/services/ws_fs/ws_fsBinding.cpp b/esp/services/ws_fs/ws_fsBinding.cpp index e4de73d1344..013bf543d51 100644 --- a/esp/services/ws_fs/ws_fsBinding.cpp +++ b/esp/services/ws_fs/ws_fsBinding.cpp @@ -393,12 +393,17 @@ int CFileSpraySoapBindingEx::downloadFile(IEspContext &context, CHttpRequest* re if (!context.validateFeatureAccess(FILE_SPRAY_URL, SecAccess_Full, false)) throw MakeStringException(ECLWATCH_FILE_SPRAY_ACCESS_DENIED, "Failed to download file. Permission denied."); - StringBuffer netAddressStr, osStr, pathStr, nameStr; + StringBuffer netAddressStr, osStr, pathStr, nameStr, dropZoneName; request->getParameter("NetAddress", netAddressStr); request->getParameter("OS", osStr); request->getParameter("Path", pathStr); request->getParameter("Name", nameStr); + request->getParameter("DropZoneName", dropZoneName); + SecAccessFlags permission = getDropZoneScopePermissions(context, dropZoneName, pathStr, netAddressStr); + if (permission < SecAccess_Read) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Access DropZone Scope %s %s %s not allowed for user %s (permission:%s). Read Access Required.", + dropZoneName.str(), netAddressStr.str(), pathStr.str(), context.queryUserId(), getSecAccessFlagName(permission)); #if 0 StringArray files; IProperties* params = request->queryParameters(); @@ -479,11 +484,16 @@ int CFileSpraySoapBindingEx::downloadFile(IEspContext &context, CHttpRequest* re int CFileSpraySoapBindingEx::onStartUpload(IEspContext& ctx, CHttpRequest* request, CHttpResponse* response, const char* serv, const char* method) { - StringBuffer netAddress, path; + StringBuffer netAddress, path, dropZoneName; request->getParameter("NetAddress", netAddress); request->getParameter("Path", path); + request->getParameter("DropZoneName", dropZoneName); if (!validateDropZonePath(nullptr, netAddress, path)) //The path should be the absolute path for the dropzone. throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Invalid Landing Zone path %s", path.str()); + SecAccessFlags permission = getDropZoneScopePermissions(ctx, dropZoneName, path, netAddress); + if (permission < SecAccess_Full) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Access DropZone Scope %s %s %s not allowed for user %s (permission:%s). Full Access Required.", + dropZoneName.str(), netAddress.str(), path.str(), ctx.queryUserId(), getSecAccessFlagName(permission)); return EspHttpBinding::onStartUpload(ctx, request, response, serv, method); } diff --git a/esp/services/ws_fs/ws_fsService.cpp b/esp/services/ws_fs/ws_fsService.cpp index 97c61dab6f4..53a9fc7d8fe 100644 --- a/esp/services/ws_fs/ws_fsService.cpp +++ b/esp/services/ws_fs/ws_fsService.cpp @@ -2338,100 +2338,6 @@ void CFileSprayEx::getDropZoneInfoByDestPlane(double clientVersion, const char* getDropZoneHost(destPlane, dropZone, hostip); } -void CFileSprayEx::getDropZoneInfoByIP(double clientVersion, const char* ip, const char* destFileIn, StringBuffer& destFileOut, StringBuffer& umask) -{ -#ifndef _CONTAINERIZED - if (destFileIn && *destFileIn) - destFileOut.set(destFileIn); - - if (!ip || !*ip) - throw MakeStringExceptionDirect(ECLWATCH_INVALID_IP, "Network address must be specified for a drop zone!"); - - Owned factory = getEnvironmentFactory(true); - Owned constEnv = factory->openEnvironment(); - - StringBuffer destFile; - if (isAbsolutePath(destFileIn)) - { - destFile.set(destFileIn); - Owned dropZone = constEnv->getDropZoneByAddressPath(ip, destFile.str()); - if (!dropZone) - { - if (constEnv->isDropZoneRestrictionEnabled()) - throw MakeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "No drop zone configured for '%s' and '%s'. Check your system drop zone configuration.", ip, destFile.str()); - else - { - LOG(MCdebugInfo, unknownJob, "No drop zone configured for '%s' and '%s'. Check your system drop zone configuration.", ip, destFile.str()); - return; - } - } - - - SCMStringBuffer directory, maskBuf; - dropZone->getDirectory(directory); - destFileOut.set(destFile.str()); - dropZone->getUMask(maskBuf); - if (maskBuf.length()) - umask.set(maskBuf.str()); - - return; - } - - Owned dropZoneItr = constEnv->getDropZoneIteratorByAddress(ip); - if (dropZoneItr->count() < 1) - { - if (constEnv->isDropZoneRestrictionEnabled()) - throw MakeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "Drop zone not found for network address '%s'. Check your system drop zone configuration.", ip); - else - { - LOG(MCdebugInfo, unknownJob, "Drop zone not found for network address '%s'. Check your system drop zone configuration.", ip); - return; - } - } - - bool dzFound = false; - ForEach(*dropZoneItr) - { - IConstDropZoneInfo& dropZoneInfo = dropZoneItr->query(); - - SCMStringBuffer dropZoneDirectory, dropZoneUMask; - dropZoneInfo.getDirectory(dropZoneDirectory); - dropZoneInfo.getUMask(dropZoneUMask); - if (!dropZoneDirectory.length()) - continue; - - if (!dzFound) - { - dzFound = true; - destFileOut.set(dropZoneDirectory.str()); - addPathSepChar(destFileOut); - destFileOut.append(destFileIn); - if (dropZoneUMask.length()) - umask.set(dropZoneUMask.str()); - } - else - { - if (constEnv->isDropZoneRestrictionEnabled()) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "> 1 drop zones found for network address '%s'.", ip); - else - { - LOG(MCdebugInfo, unknownJob, "> 1 drop zones found for network address '%s'.", ip); - return; - } - } - } - if (!dzFound) - { - if (constEnv->isDropZoneRestrictionEnabled()) - throw MakeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "No valid drop zone found for network address '%s'. Check your system drop zone configuration.", ip); - else - LOG(MCdebugInfo, unknownJob, "No valid drop zone found for network address '%s'. Check your system drop zone configuration.", ip); - } -#else - throw makeStringException(-1, "Internal error: CFileSprayEx::getDropZoneInfoByIP should not be called in containerized environment"); -#endif -} - static StringBuffer & expandLogicalAsPhysical(StringBuffer & target, const char * name, const char * separator) { const char * cur = name; @@ -2472,30 +2378,17 @@ bool CFileSprayEx::onDespray(IEspContext &context, IEspDespray &req, IEspDespray MemoryBuffer& dstxml = (MemoryBuffer&)req.getDstxml(); if(dstxml.length() == 0) { -#ifdef _CONTAINERIZED if (isEmptyString(destPlane)) destPlane = req.getDestGroup(); // allow eclwatch to continue providing storage plane as 'destgroup' field if (isEmptyString(destPlane)) { if (destip.isEmpty()) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "Neither destination storage plane or destination IP specified."); - Owned planesIter = getDropZonePlanesIterator(); - ForEach(*planesIter) - { - IPropertyTree &lzPlane = planesIter->query(); - if (isHostInPlane(&lzPlane, destip, true)) - { - destPlane = lzPlane.queryProp("@name"); - break; - } - } - if (isEmptyString(destPlane)) - throw makeStringException(ECLWATCH_INVALID_INPUT, "Destination IP does not match a hosts based storage plane."); + throw makeStringException(ECLWATCH_INVALID_INPUT, "Neither destination storage plane or destination IP specified."); + Owned plane = findDropZonePlane(destPath, destip, true); + if (!plane) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "DropZone Plane not found for host %s path %s.", destip.str(), destPath.str()); + destPlane = plane->queryProp("@name"); } -#else - if (isEmptyString(destPlane) && destip.isEmpty()) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "Destination network IP/storage plane not specified."); -#endif //If the destination filename is not provided, calculate a relative filename from the logical filename if(!destfile || !*destfile) @@ -2526,10 +2419,7 @@ bool CFileSprayEx::onDespray(IEspContext &context, IEspDespray &req, IEspDespray if(dstxml.length() == 0) { StringBuffer destfileWithPath, umask; - if (!isEmptyString(destPlane)) - getDropZoneInfoByDestPlane(version, destPlane, destfile, destfileWithPath, umask, destip); - else - getDropZoneInfoByIP(version, destip, destfile, destfileWithPath, umask); + getDropZoneInfoByDestPlane(version, destPlane, destfile, destfileWithPath, umask, destip); RemoteFilename rfn; SocketEndpoint ep(destip.str()); @@ -2922,9 +2812,10 @@ bool CFileSprayEx::onFileList(IEspContext &context, IEspFileListRequest &req, IE throw MakeStringException(ECLWATCH_INVALID_INPUT, "Path not specified."); double version = context.getClientVersion(); + const char* dropZoneName = req.getDropZoneName(); const char* netaddr = req.getNetaddr(); - if (!netaddr || !*netaddr) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "Network address not specified."); + if (isEmptyString(dropZoneName) && isEmptyString(netaddr)) + throw makeStringException(ECLWATCH_INVALID_INPUT, "DropZoneName or Netaddr must be specified."); const char* fileNameMask = req.getMask(); bool directoryOnly = req.getDirectoryOnly(); PROGLOG("FileList: Netaddr %s, Path %s", netaddr, path); @@ -2946,55 +2837,36 @@ bool CFileSprayEx::onFileList(IEspContext &context, IEspFileListRequest &req, IE throw MakeStringException(ECLWATCH_ACCESS_TO_FILE_DENIED, "Only cfg or log file allowed."); } - if (!validateDropZonePath(nullptr, netaddr, sPath) && !validateConfigurationDirectory(nullptr, "log", nullptr, nullptr, sPath)) //The path should be the absolute path for the dropzone or log file. - throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Invalid file path %s", sPath.str()); - - RemoteFilename rfn; - SocketEndpoint ep; -#ifdef MACHINE_IP - ep.set(MACHINE_IP); -#else - ep.set(netaddr); - if (ep.isNull()) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "FileList: cannot resolve network IP from %s.", netaddr); -#endif - rfn.setPath(ep, sPath.str()); - Owned f = createIFile(rfn); - if (f->isDirectory()!=fileBool::foundYes) - throw MakeStringException(ECLWATCH_INVALID_DIRECTORY, "%s is not a directory.", path); + if (isEmptyString(dropZoneName)) + dropZoneName = findDropZonePlaneName(sPath, netaddr); + SecAccessFlags permission = getDropZoneScopePermissions(context, dropZoneName, sPath, nullptr); + if (permission < SecAccess_Read) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Access DropZone Scope %s %s not allowed for user %s (permission:%s). Read Access Required.", + dropZoneName, sPath.str(), context.queryUserId(), getSecAccessFlagName(permission)); - IArrayOf files; - Owned di = f->directoryFiles(NULL, false, true); - if(di.get() != NULL) + StringArray hosts; + if (isEmptyString(netaddr)) { - ForEach(*di) - { - StringBuffer fname; - di->getName(fname); - - if (fname.length() == 0 || (directoryOnly && !di->isDir()) || (!di->isDir() && !isEmptyString(fileNameMask) && !WildMatch(fname.str(), fileNameMask, true))) - continue; + Owned dropZone = getDropZonePlane(dropZoneName); + if (!dropZone) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Unknown landing zone: %s", dropZoneName); + getPlaneHosts(hosts, dropZone); + if (!hosts.ordinality()) + hosts.append("localhost"); + } + else + hosts.append(netaddr); - Owned onefile = createPhysicalFileStruct(); - - onefile->setName(fname.str()); - onefile->setIsDir(di->isDir()); - onefile->setFilesize(di->getFileSize()); - CDateTime modtime; - StringBuffer timestr; - di->getModifiedTime(modtime); - unsigned y,m,d,h,min,sec,nsec; - modtime.getDate(y,m,d,true); - modtime.getTime(h,min,sec,nsec,true); - timestr.appendf("%04d-%02d-%02d %02d:%02d:%02d", y,m,d,h,min,sec); - onefile->setModifiedtime(timestr.str()); - files.append(*onefile.getLink()); - } + IArrayOf& files = resp.getFiles(); + ForEachItemIn(i, hosts) + { + const char* host = hosts.item(i); + if (validateDropZonePath(nullptr, host, sPath)) + getDropZoneFiles(context, dropZoneName, host, sPath, fileNameMask, directoryOnly, files); } sPath.replace('\\', '/');//XSLT cannot handle backslashes resp.setPath(sPath); - resp.setFiles(files); resp.setNetaddr(netaddr); if (osStr && *osStr) { @@ -3049,22 +2921,15 @@ bool CFileSprayEx::checkDropZoneIPAndPath(double clientVersion, const char* drop return false; } -void CFileSprayEx::addDropZoneFile(IEspContext& context, IDirectoryIterator* di, const char* name, const char pathSep, const char* server, IArrayOf& files) +void CFileSprayEx::addDropZoneFile(IEspContext& context, IDirectoryIterator* di, const char* name, const char* path, const char* server, IArrayOf& files) { - Owned aFile = createPhysicalFileStruct(); + double version = context.getClientVersion(); - const char* pName = strrchr(name, pathSep); - if (!pName) - aFile->setName(name); - else - { - StringBuffer sPath; - sPath.append(pName - name, name); - aFile->setPath(sPath.str()); + Owned aFile = createPhysicalFileStruct(); - pName++; //skip the PathSepChar - aFile->setName(pName); - } + aFile->setName(name); + if (!isEmptyString(path)) + aFile->setPath(path); aFile->setIsDir(di->isDir()); CDateTime modtime; @@ -3076,34 +2941,50 @@ void CFileSprayEx::addDropZoneFile(IEspContext& context, IDirectoryIterator* di, timestr.appendf("%04d-%02d-%02d %02d:%02d:%02d", y,m,d,h,min,sec); aFile->setModifiedtime(timestr.str()); aFile->setFilesize(di->getFileSize()); - aFile->setServer(server); + if (version >= 1.23) + aFile->setServer(server); files.append(*aFile.getLink()); } -void CFileSprayEx::searchDropZoneFiles(IEspContext& context, const char* server, const char* dir, const char* nameFilter, IArrayOf& files, unsigned& filesFound) +bool CFileSprayEx::searchDropZoneFiles(IEspContext& context, const char* dropZoneName, const char* server, + const char* dir, const char* relDir, const char* nameFilter, IArrayOf& files, unsigned& filesFound) { + if (getDropZoneScopePermissions(context, dropZoneName, dir, server) < SecAccess_Read) + return false; + RemoteFilename rfn; SocketEndpoint ep(server); rfn.setPath(ep, dir); Owned f = createIFile(rfn); if(f->isDirectory()!=fileBool::foundYes) - throw MakeStringException(ECLWATCH_INVALID_DIRECTORY, "%s is not a directory.", dir); + throw makeStringExceptionV(ECLWATCH_INVALID_DIRECTORY, "%s is not a directory.", dir); - const char pathSep = getPathSepChar(dir); - Owned di = f->directoryFiles(nameFilter, true, true); + Owned di = f->directoryFiles(nullptr, false, true); ForEach(*di) { StringBuffer fname; di->getName(fname); - if (!fname.length()) + + if (di->isDir()) + { + StringBuffer fullPath(dir), relPath(relDir); + addPathSepChar(fullPath).append(fname); + if (!relPath.isEmpty()) + addPathSepChar(relPath); + relPath.append(fname); + if (!searchDropZoneFiles(context, dropZoneName, server, fullPath, relPath, nameFilter, files, filesFound)) + continue; + } + if (!isEmptyString(nameFilter) && !WildMatch(fname, nameFilter, false)) continue; + addDropZoneFile(context, di, fname.str(), relDir, server, files); + filesFound++; if (filesFound > dropZoneFileSearchMaxFiles) break; - - addDropZoneFile(context, di, fname.str(), pathSep, server, files); } + return true; } bool CFileSprayEx::onDropZoneFileSearch(IEspContext &context, IEspDropZoneFileSearchRequest &req, IEspDropZoneFileSearchResponse &resp) @@ -3137,7 +3018,7 @@ bool CFileSprayEx::onDropZoneFileSearch(IEspContext &context, IEspDropZoneFileSe double version = context.getClientVersion(); bool serverFound = false; unsigned filesFound = 0; - IArrayOf files; + IArrayOf &files = resp.getFiles(); bool isIPAddressReq = isIPAddress(dropZoneServerReq); IArrayOf allTpDropZones; CTpWrapper tpWrapper; @@ -3157,7 +3038,7 @@ bool CFileSprayEx::onDropZoneFileSearch(IEspContext &context, IEspDropZoneFileSe IConstTpMachine& tpMachine = tpMachines.item(ii); if (isEmptyString(dropZoneServerReq) || matchNetAddressRequest(dropZoneServerReq, isIPAddressReq, tpMachine)) { - searchDropZoneFiles(context, tpMachine.getNetaddress(), dropZone.getPath(), nameFilter, files, filesFound); + searchDropZoneFiles(context, dropZoneName, tpMachine.getNetaddress(), dropZone.getPath(), nullptr, nameFilter, files, filesFound); serverFound = true; } } @@ -3170,7 +3051,6 @@ bool CFileSprayEx::onDropZoneFileSearch(IEspContext &context, IEspDropZoneFileSe VStringBuffer msg("More than %u files are found. Only %u files are returned.", dropZoneFileSearchMaxFiles, dropZoneFileSearchMaxFiles); resp.setWarning(msg.str()); } - resp.setFiles(files); } catch(IException* e) { @@ -3271,62 +3151,33 @@ bool CFileSprayEx::onOpenSave(IEspContext &context, IEspOpenSaveRequest &req, IE return true; } -bool CFileSprayEx::getDropZoneFiles(IEspContext &context, const char* dropZone, const char* netaddr, const char* path, - IEspDropZoneFilesRequest &req, IEspDropZoneFilesResponse &resp) +void CFileSprayEx::getDropZoneFiles(IEspContext &context, const char *dropZoneName, const char *host, const char *path, const char *fileNameMask, bool directoryOnly, IArrayOf &files) { - if (!checkDropZoneIPAndPath(context.getClientVersion(), dropZone, netaddr, path)) - throw MakeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "Dropzone is not found in the environment settings."); - - bool directoryOnly = req.getDirectoryOnly(); - + SocketEndpoint ep(host); RemoteFilename rfn; - SocketEndpoint ep; -#ifdef MACHINE_IP - ep.set(MACHINE_IP); -#else - ep.set(netaddr); - if (ep.isNull()) - throw MakeStringException(ECLWATCH_INVALID_INPUT, "CFileSprayEx::getDropZoneFiles: cannot resolve network IP from %s.", netaddr); -#endif - rfn.setPath(ep, path); Owned f = createIFile(rfn); - if(f->isDirectory()!=fileBool::foundYes) - throw MakeStringException(ECLWATCH_INVALID_DIRECTORY, "%s is not a directory.", path); + if (f->isDirectory()!=fileBool::foundYes) + throw makeStringExceptionV(ECLWATCH_INVALID_DIRECTORY, "%s is not a directory.", path); - IArrayOf files; - Owned di = f->directoryFiles(NULL, false, true); - if(di.get() != NULL) + Owned di = f->directoryFiles(nullptr, false, true); + ForEach(*di) { - ForEach(*di) - { - StringBuffer fname; - di->getName(fname); + StringBuffer fileName; + di->getName(fileName); - if (fname.length() == 0 || (directoryOnly && !di->isDir())) - continue; - - Owned onefile = createPhysicalFileStruct(); + if ((directoryOnly && !di->isDir()) || (!di->isDir() && !isEmptyString(fileNameMask) && !WildMatch(fileName.str(), fileNameMask, true))) + continue; - onefile->setName(fname.str()); - onefile->setIsDir(di->isDir()); - onefile->setFilesize(di->getFileSize()); - CDateTime modtime; - StringBuffer timestr; - di->getModifiedTime(modtime); - unsigned y,m,d,h,min,sec,nsec; - modtime.getDate(y,m,d,true); - modtime.getTime(h,min,sec,nsec,true); - timestr.appendf("%04d-%02d-%02d %02d:%02d:%02d", y,m,d,h,min,sec); - onefile->setModifiedtime(timestr.str()); - onefile->setServer(netaddr); - files.append(*onefile.getLink()); + if (di->isDir()) + { + VStringBuffer fullPath("%s%s", path, fileName.str()); + if (getDropZoneScopePermissions(context, dropZoneName, fullPath, nullptr) < SecAccess_Read) + continue; } - } - - resp.setFiles(files); - return true; + addDropZoneFile(context, di, fileName, path, host, files); + } } void CFileSprayEx::getServersInDropZone(const char *dropZoneName, IArrayOf &dropZoneList, bool isECLWatchVisibleOnly, StringArray &serverList) @@ -3426,8 +3277,19 @@ bool CFileSprayEx::onDropZoneFiles(IEspContext &context, IEspDropZoneFilesReques } addPathSepChar(directoryStr); + if (isEmptyString(dzName)) + dzName = findDropZonePlaneName(directoryStr, netAddress); + if (getDropZoneScopePermissions(context, dzName, directoryStr, nullptr) < SecAccess_Read) + return false; + + bool directoryOnly = req.getDirectoryOnly(); + IArrayOf &files = resp.getFiles(); if (!isEmptyString(netAddress)) - getDropZoneFiles(context, dzName, netAddress, directoryStr, req, resp); + { + if (!checkDropZoneIPAndPath(context.getClientVersion(), dzName, netAddress, directoryStr)) + throw makeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "Dropzone is not found in the environment settings."); + getDropZoneFiles(context, dzName, netAddress, directoryStr, nullptr, directoryOnly, files); + } else { //Find out all DropZone servers inside the DropZone. @@ -3437,7 +3299,11 @@ bool CFileSprayEx::onDropZoneFiles(IEspContext &context, IEspDropZoneFilesReques return true; ForEachItemIn(itr, servers) - getDropZoneFiles(context, dzName, servers.item(itr), directoryStr, req, resp); + { + const char* host = servers.item(itr); + if (checkDropZoneIPAndPath(context.getClientVersion(), dzName, host, directoryStr)) + getDropZoneFiles(context, dzName, host, directoryStr, nullptr, directoryOnly, files); + } } resp.setDropZoneName(dzName); @@ -3481,6 +3347,8 @@ bool CFileSprayEx::onDeleteDropZoneFiles(IEspContext &context, IEspDeleteDropZon if (!checkDropZoneIPAndPath(version, dzName, netAddress, path.str())) throw MakeStringException(ECLWATCH_DROP_ZONE_NOT_FOUND, "Dropzone is not found in the environment settings."); + checkDropZoneFileScopeAccess(context, dzName, netAddress, path, files, SecAccess_Full); + RemoteFilename rfn; SocketEndpoint ep(netAddress); if (ep.isNull()) @@ -3544,6 +3412,76 @@ bool CFileSprayEx::onDeleteDropZoneFiles(IEspContext &context, IEspDeleteDropZon return true; } +void CFileSprayEx::checkDropZoneFileScopeAccess(IEspContext &context, const char *dropZoneName, const char *netAddress, + const char *dropZonePath, const StringArray &dropZoneFiles, SecAccessFlags accessReq) +{ + const char *accessReqName = getSecAccessFlagName(accessReq); + if (isEmptyString(dropZoneName)) + dropZoneName = findDropZonePlaneName(dropZonePath, netAddress); + SecAccessFlags permission = getDropZoneScopePermissions(context, dropZoneName, dropZonePath, nullptr); + if (permission < accessReq) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "Access DropZone Scope %s %s not allowed for user %s (permission:%s). %s Permission Required.", + dropZoneName, dropZonePath, accessReqName, context.queryUserId(), getSecAccessFlagName(permission)); + + RemoteFilename rfn; + SocketEndpoint ep(netAddress); + rfn.setIp(ep); + + StringBuffer errorMessage; + MapStringTo uniquePath; + const char pathSep = getPathSepChar(dropZonePath); + ForEachItemIn(i, dropZoneFiles) + { + const char *fileNameWithPath = dropZoneFiles.item(i); + if (isEmptyString(fileNameWithPath)) + continue; + + StringBuffer fileToDelete(dropZonePath); + addPathSepChar(fileToDelete).append(fileNameWithPath); + + StringBuffer pathToCheck; + rfn.setRemotePath(fileToDelete.str()); + Owned rFile = createIFile(rfn); + if (rFile->isDirectory() == fileBool::foundYes) + pathToCheck.append(fileNameWithPath); + else + { + splitDirTail(fileNameWithPath, pathToCheck); + if (pathToCheck.isEmpty()) + continue; + } + + //a subfolder or a file under a subfolder. Check whether accessing the subfolder is allowed. + bool *found = uniquePath.getValue(pathToCheck.str()); + if (found) + { + if (!*found) //found a path denied + errorMessage.append("; ").append(fileNameWithPath); + continue; + } + + StringBuffer fullPath(dropZonePath); + addPathSepChar(fullPath).append(pathToCheck); + SecAccessFlags permission = getDropZoneScopePermissions(context, dropZoneName, fullPath, nullptr); + if (permission < accessReq) + { + uniquePath.setValue(pathToCheck.str(), false); //add a path denied + if (errorMessage.isEmpty()) + errorMessage.setf("User %s (permission:%s): failed to access the DropZone Scopes for the following file(s). %s Permission Required. %s", + context.queryUserId(), getSecAccessFlagName(permission), accessReqName, fileNameWithPath); + else + errorMessage.append("; ").append(fileNameWithPath); + } + else + { + uniquePath.setValue(pathToCheck.str(), true); //add a path allowed + } + } + + if (!errorMessage.isEmpty()) + throw makeStringException(ECLWATCH_INVALID_INPUT, errorMessage.str()); +} + void CFileSprayEx::appendGroupNode(IArrayOf& groupNodes, const char* nodeName, const char* clusterType, bool replicateOutputs) { diff --git a/esp/services/ws_fs/ws_fsService.hpp b/esp/services/ws_fs/ws_fsService.hpp index 91ff5af82e6..7fdd01e0f83 100644 --- a/esp/services/ws_fs/ws_fsService.hpp +++ b/esp/services/ws_fs/ws_fsService.hpp @@ -143,19 +143,20 @@ class CFileSprayEx : public CFileSpray void getInfoFromSasha(IEspContext &context, const char *sashaServer, const char* wuid, IEspDFUWorkunit *info); bool getArchivedWUInfo(IEspContext &context, IEspGetDFUWorkunit &req, IEspGetDFUWorkunitResponse &resp); bool GetArchivedDFUWorkunits(IEspContext &context, IEspGetDFUWorkunits &req, IEspGetDFUWorkunitsResponse &resp); - bool getDropZoneFiles(IEspContext &context, const char* dropZone, const char* netaddr, const char* path, IEspDropZoneFilesRequest &req, IEspDropZoneFilesResponse &resp); + void getDropZoneFiles(IEspContext &context, const char *dropZoneName, const char *host, const char *path, const char *fileNameMask, bool directoryOnly, IArrayOf &files); bool ParseLogicalPath(const char * pLogicalPath, StringBuffer &title); bool ParseLogicalPath(const char * pLogicalPath, const char *group, const char* cluster, StringBuffer &folder, StringBuffer &title, StringBuffer &defaultFolder, StringBuffer &defaultReplicateFolder); StringBuffer& getAcceptLanguage(IEspContext& context, StringBuffer& acceptLanguage); void appendGroupNode(IArrayOf& groupNodes, const char* nodeName, const char* clusterType, bool replicateOutputs); bool getOneDFUWorkunit(IEspContext& context, const char* wuid, IEspGetDFUWorkunitsResponse& resp); - void getDropZoneInfoByIP(double clientVersion, const char* destIP, const char* destFile, StringBuffer& path, StringBuffer& mask); void getDropZoneInfoByDestPlane(double clientVersion, const char* destGroup, const char* destFileIn, StringBuffer& destFileOut, StringBuffer& umask, StringBuffer & hostip); bool checkDropZoneIPAndPath(double clientVersion, const char* dropZone, const char* netAddr, const char* path); - void addDropZoneFile(IEspContext& context, IDirectoryIterator* di, const char* name, const char pathSep, const char* server, IArrayOf&files); - void searchDropZoneFiles(IEspContext& context, const char* server, const char* dir, const char* nameFilter, IArrayOf& files, unsigned& filesFound); + void addDropZoneFile(IEspContext& context, IDirectoryIterator* di, const char* name, const char* path, const char* server, IArrayOf&files); + bool searchDropZoneFiles(IEspContext& context, const char* dropZone, const char* server, const char* dir, const char* relDir, const char* nameFilter, IArrayOf& files, unsigned& filesFound); void setDFUServerQueueReq(const char* dfuServerQueue, IDFUWorkUnit* wu); void setUserAuth(IEspContext &context, IDFUWorkUnit* wu); + void checkDropZoneFileScopeAccess(IEspContext &context, const char *dropZoneName, const char *netAddress, + const char *dropZonePath, const StringArray &dropZoneFiles, SecAccessFlags accessReq); }; #endif //_ESPWIZ_FileSpray_HPP__ diff --git a/esp/smc/SMCLib/TpCommon.cpp b/esp/smc/SMCLib/TpCommon.cpp index 08ac870cc49..f379227d272 100644 --- a/esp/smc/SMCLib/TpCommon.cpp +++ b/esp/smc/SMCLib/TpCommon.cpp @@ -156,3 +156,22 @@ extern TPWRAPPER_API bool validateDropZonePath(const char* dropZoneName, const c return false; } +extern TPWRAPPER_API const char* findDropZonePlaneName(const char* dropZonePath, const char* dropZoneHost) +{ + Owned plane = findDropZonePlane(dropZonePath, dropZoneHost, true); + if (!plane) + throw makeStringExceptionV(ECLWATCH_INVALID_INPUT, "findDropZonePlaneName(): DropZone not found for Host %s and Path %s.", dropZoneHost, dropZonePath); + return plane->queryProp("@name"); +} + +extern TPWRAPPER_API SecAccessFlags getDropZoneScopePermissions(IEspContext& context, const char* dropZoneName, const char* dropZonePath, const char* dropZoneHost) +{ + if (isEmptyString(dropZonePath)) + throw makeStringException(ECLWATCH_INVALID_CLUSTER_NAME, "getDropZoneScopePermissions(): DropZone path must be specified."); + if (isEmptyString(dropZoneName)) + dropZoneName = findDropZonePlaneName(dropZonePath, dropZoneHost); + + Owned userDesc = createUserDescriptor(); + userDesc->set(context.queryUserId(), context.queryPassword(), context.querySignature()); + return queryDistributedFileDirectory().getDropZoneScopePermissions(dropZoneName, dropZonePath, userDesc); +} diff --git a/esp/smc/SMCLib/TpWrapper.hpp b/esp/smc/SMCLib/TpWrapper.hpp index 29b6f28eebe..116a282b1e8 100644 --- a/esp/smc/SMCLib/TpWrapper.hpp +++ b/esp/smc/SMCLib/TpWrapper.hpp @@ -229,6 +229,8 @@ extern TPWRAPPER_API bool validateDataPlaneName(const char *remoteDali, const ch extern TPWRAPPER_API bool matchNetAddressRequest(const char* netAddressReg, bool ipReq, IConstTpMachine& tpMachine); extern TPWRAPPER_API bool validateDropZonePath(const char* dropZoneName, const char* netAddr, const char* pathToCheck); +extern TPWRAPPER_API const char* findDropZonePlaneName(const char* dropZonePath, const char* dropZoneHost); +extern TPWRAPPER_API SecAccessFlags getDropZoneScopePermissions(IEspContext& context, const char * dropZoneName, const char * dropZonePath, const char * dropZoneHost); #endif //_ESPWIZ_TpWrapper_HPP__ diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 263a1a121d3..b2afd8f9d98 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -5264,13 +5264,10 @@ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" }, "node_modules/json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, "bin": { "json5": "lib/cli.js" }, @@ -6061,12 +6058,6 @@ "node": "*" } }, - "node_modules/minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "dev": true - }, "node_modules/minipass": { "version": "3.1.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", @@ -12962,13 +12953,10 @@ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" }, "json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true }, "jsonparse": { "version": "1.3.1", @@ -13599,12 +13587,6 @@ "brace-expansion": "^1.1.7" } }, - "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "dev": true - }, "minipass": { "version": "3.1.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", diff --git a/esp/src/src-react/components/PackageMapParts.tsx b/esp/src/src-react/components/PackageMapParts.tsx index 921ace82339..111d712b261 100644 --- a/esp/src/src-react/components/PackageMapParts.tsx +++ b/esp/src/src-react/components/PackageMapParts.tsx @@ -1,15 +1,12 @@ import * as React from "react"; -import { CommandBar, ContextualMenuItemType, ICommandBarItemProps, MessageBar, MessageBarType } from "@fluentui/react"; -import { useConst } from "@fluentui/react-hooks"; +import { CommandBar, ContextualMenuItemType, ICommandBarItemProps, Link } from "@fluentui/react"; import { scopedLogger } from "@hpcc-js/util"; import { SizeMe } from "react-sizeme"; import * as parser from "dojox/xml/parser"; -import * as Observable from "dojo/store/Observable"; -import { Memory } from "src/store/Memory"; import * as WsPackageMaps from "src/WsPackageMaps"; import nlsHPCC from "src/nlsHPCC"; import { useConfirm } from "../hooks/confirm"; -import { useGrid } from "../hooks/grid"; +import { useFluentGrid } from "../hooks/grid"; import { pushUrl } from "../util/history"; import { ShortVerticalDivider } from "./Common"; import { AddPackageMapPart } from "./forms/AddPackageMapPart"; @@ -33,14 +30,12 @@ export const PackageMapParts: React.FunctionComponent = ({ const [_package, setPackage] = React.useState(undefined); const [showAddPartForm, setShowAddPartForm] = React.useState(false); const [uiState, setUIState] = React.useState({ ...defaultUIState }); - - const [showError, setShowError] = React.useState(false); - const [errorMessage, setErrorMessage] = React.useState(""); + const [data, setData] = React.useState([]); // Grid --- - const store = useConst(new Observable(new Memory("Part"))); - const { Grid, selection, refreshTable, copyButtons } = useGrid({ - store, + const { Grid, selection, copyButtons } = useFluentGrid({ + data, + primaryID: "Part", sort: { attribute: "Part", descending: false }, filename: "packageMapParts", columns: { @@ -48,44 +43,60 @@ export const PackageMapParts: React.FunctionComponent = ({ Part: { label: nlsHPCC.Parts, formatter: React.useCallback(function (part, row) { - return `${part}`; + return {part}; }, [name]) }, } }); + const refreshData = React.useCallback(() => { + WsPackageMaps.getPackageMapById({ packageMap: name }) + .then(({ GetPackageMapByIdResponse }) => { + const xml = parser.parse(GetPackageMapByIdResponse?.Info); + const parts = [...xml.getElementsByTagName("Part")].map(part => { + return { + Part: part.attributes[0].nodeValue + }; + }); + setData(parts); + }) + .catch(err => logger.error(err)) + ; + }, [name]); + + React.useEffect(() => { + refreshData(); + }, [refreshData]); + const [DeleteConfirm, setShowDeleteConfirm] = useConfirm({ title: nlsHPCC.Delete, message: nlsHPCC.YouAreAboutToDeleteThisPart, onSubmit: React.useCallback(() => { + const requests = []; selection.forEach((item, idx) => { - WsPackageMaps.RemovePartFromPackageMap({ - request: { - PackageMap: name.split("::")[1], - Target: _package?.Target, - PartName: item.Part - } - }) - .then(({ RemovePartFromPackageMapResponse, Exceptions }) => { - if (RemovePartFromPackageMapResponse?.status?.Code === 0) { - store.remove(item.Part); - refreshTable(); - } else if (Exceptions?.Exception.length > 0) { - setShowError(true); - setErrorMessage(Exceptions?.Exception[0].Message); + requests.push( + WsPackageMaps.RemovePartFromPackageMap({ + request: { + PackageMap: name.split("::")[1], + Target: _package?.Target, + PartName: item.Part } }) + ); + Promise + .all(requests) + .then(() => refreshData()) .catch(err => logger.error(err)) ; }); - }, [_package?.Target, name, refreshTable, selection, store]) + }, [_package?.Target, name, refreshData, selection]) }); // Command Bar --- const buttons = React.useMemo((): ICommandBarItemProps[] => [ { key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, - onClick: () => refreshTable() + onClick: () => refreshData() }, { key: "divider_1", itemType: ContextualMenuItemType.Divider, onRender: () => }, { @@ -109,23 +120,7 @@ export const PackageMapParts: React.FunctionComponent = ({ } } }, - ], [name, refreshTable, selection, setShowDeleteConfirm, uiState.hasSelection]); - - React.useEffect(() => { - WsPackageMaps.getPackageMapById({ packageMap: name }) - .then(({ GetPackageMapByIdResponse }) => { - const xml = parser.parse(GetPackageMapByIdResponse?.Info); - const parts = [...xml.getElementsByTagName("Part")].map(part => { - return { - Part: part.attributes[0].nodeValue - }; - }); - store.setData(parts); - refreshTable(); - }) - .catch(err => logger.error(err)) - ; - }, [store, name, refreshTable]); + ], [name, refreshData, selection, setShowDeleteConfirm, uiState.hasSelection]); React.useEffect(() => { WsPackageMaps.PackageMapQuery({}) @@ -148,11 +143,6 @@ export const PackageMapParts: React.FunctionComponent = ({ }, [selection]); return <> - {showError && - setShowError(false)} dismissButtonAriaLabel="Close"> - {errorMessage} - - } {({ size }) => } @@ -162,8 +152,8 @@ export const PackageMapParts: React.FunctionComponent = ({ /> } ; diff --git a/esp/src/src-react/components/forms/AddPackageMapPart.tsx b/esp/src/src-react/components/forms/AddPackageMapPart.tsx index 7fdc1813044..cae4d80a7c1 100644 --- a/esp/src/src-react/components/forms/AddPackageMapPart.tsx +++ b/esp/src/src-react/components/forms/AddPackageMapPart.tsx @@ -37,19 +37,17 @@ const defaultValues: AddPackageMapPartValues = { interface AddPackageMapPartProps { showForm: boolean; setShowForm: (_: boolean) => void; - store: any; packageMap: string; target: string; - refreshTable: (_: boolean) => void; + refreshData: () => void; } export const AddPackageMapPart: React.FunctionComponent = ({ showForm, setShowForm, - store, packageMap, target, - refreshTable, + refreshData, }) => { const { handleSubmit, control, reset } = useForm({ defaultValues }); @@ -66,8 +64,7 @@ export const AddPackageMapPart: React.FunctionComponent .then(({ AddPartToPackageMapResponse, Exceptions }) => { if (AddPartToPackageMapResponse?.status?.Code === 0) { closeForm(); - store.add({ Part: data.PartName }); - refreshTable(true); + if (refreshData) refreshData(); reset(defaultValues); } else if (Exceptions) { closeForm(); @@ -81,7 +78,7 @@ export const AddPackageMapPart: React.FunctionComponent logger.error(err); } )(); - }, [closeForm, handleSubmit, packageMap, refreshTable, reset, store, target]); + }, [closeForm, handleSubmit, packageMap, refreshData, reset, target]); return diff --git a/esp/src/src/nls/es/hpcc.ts b/esp/src/src/nls/es/hpcc.ts index 714d8c9b260..a541d9e0500 100644 --- a/esp/src/src/nls/es/hpcc.ts +++ b/esp/src/src/nls/es/hpcc.ts @@ -2,6 +2,7 @@ export = { Abort: "Aborte", AbortedBy: "Abortado por", AbortedTime: "Abortado a las", + AbortSelectedWorkunits: "¿Cancelar unidad(es) de trabajo seleccionada(s)? Su ID de inicio de sesión se registrará para esta acción dentro de la(s) WU(s).", About: "Acerca", AboutGraphControl: "Acerca del controlador gráfico", AboutHPCCSystems: "Acerca de HPCC Systems®", @@ -111,6 +112,7 @@ export = { Columns: "Columnas", Command: "Comando", Comment: "Comentario", + CompileCost: "Costo de compilación", Compiled: "Compilado", Compiling: "Compilando", Completed: "Completado", @@ -233,6 +235,7 @@ export = { Downloads: "Descargas", DownloadSelectionAsCSV: "Descargar la selección como CSV", DownloadToCSV: "Bajar en formato CSV", + DownloadToCSVNonFlatWarning: "Tenga en cuenta: la descarga de archivos que contienen conjuntos de datos anidados como datos separados por comas puede no tener el formato esperado", DropZone: "Zona de carga", DueToInctivity: "Se desconectará de todas las sesiones de ECL Watch en 3 minutos debido a inactividad.", Duration: "Duración", @@ -640,6 +643,7 @@ export = { PleaseSelectAUserToAdd: "Por favor escoja el usario para agregar", Plugins: "Complementos", Pods: "Pods", + PodsAccessError: "No se puede recuperar la lista de pods", Port: "Puerto", Prefix: "Prefijo", PrefixPlaceholder: "filename{:length}, filesize{:[B|L][1-8]}", @@ -875,6 +879,7 @@ export = { ThorProcess: "Proceso de Thor", ThreadID: "Identificación de subproceso", Time: "Tiempo", + Timeline: "Cronología", TimeMaxTotalExecuteMinutes: "Maximo tiempo total de ejecucion en minutos", TimeMeanTotalExecuteMinutes: "Total tiempo total de ejecucion en minutos", TimeMinTotalExecuteMinutes: "Minomo tiempo total de ejecucion en minutos", diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index 5ed0df6bb38..2e8769a9e39 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -354,7 +354,7 @@ export = { GetLogicalFilePart: "Logical File Part", GetProtectedList: "Protected List", GetValue: "Value", - GetPart: "Part", + GetPart: "Get Part", GetSoftwareInformation: "Get Software Information", Graph: "Graph", Graphs: "Graphs", diff --git a/esp/src/src/nls/zh/hpcc.ts b/esp/src/src/nls/zh/hpcc.ts index 38104b9d3be..52003b9d493 100644 --- a/esp/src/src/nls/zh/hpcc.ts +++ b/esp/src/src/nls/zh/hpcc.ts @@ -2,6 +2,7 @@ Abort: "终止", AbortedBy: "终止者", AbortedTime: "终止时间", + AbortSelectedWorkunits: "中止所选择的工作单元?你的登录代号和操作会记录在工作单元里。", About: "本系统简介", AboutGraphControl: "图形控制器简介", AboutHPCCSystems: "HPCC Systems®简介", @@ -111,6 +112,7 @@ Columns: "列", Command: "指令", Comment: "注释", + CompileCost: "编译费用", Compiled: "已编译", Compiling: "编译", Completed: "完成", @@ -233,6 +235,7 @@ Downloads: "下载", DownloadSelectionAsCSV: "下载CSV格式", DownloadToCSV: "下载成CSV", + DownloadToCSVNonFlatWarning: "注意:把含有嵌套格式的数据文件下载为逗号分隔文件可能产生意想不到的格式。", DropZone: "文件停放区", DueToInctivity: "如果三分钟之内没有活动,您将退出所有ECL Watch进程。", Duration: "时间段", @@ -640,6 +643,7 @@ PleaseSelectAUserToAdd: "请选择要添加的用户", Plugins: "插件", Pods: "Pods", + PodsAccessError: "无法获取运行Pod表", Port: "端口", Prefix: "前缀", PrefixPlaceholder: "文件名{:长度}, 文件大小{:[B|L][1-8]}", @@ -874,6 +878,7 @@ ThorProcess: "Thor 进程", ThreadID: "线程编号", Time: "时间", + Timeline: "时间点", TimeMaxTotalExecuteMinutes: "总运行时间最大值(分钟)", TimeMeanTotalExecuteMinutes: "总运行时间均值(分钟)", TimeMinTotalExecuteMinutes: "总运行时间最小值(分钟)", diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ed62f3433c3..e71000a7993 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -586,6 +586,9 @@ vaults: kind: {{ $vault.kind }} {{- if $vault.namespace }} namespace: {{ $vault.namespace }} + {{- end }} + {{- if (hasKey $vault "verify_server") }} + verify_server: {{ $vault.verify_server }} {{- end }} url: {{ $vault.url }} {{- if index $vault "client-secret" }} @@ -1651,11 +1654,16 @@ remote client certificates. Adding the following to ESP (Roxie support to be added later) remoteClients: - name: myRemoteClient + organization: myorg #optional + secretTemplate: #optional add annotations to generated secret for tools like kubed config-syncer + annotations: + kubed.appscode.com/sync: "hpcc=testns" #sync certificate to matching namespaces + Will generate certificates that can be deployed to the remote client. Will cause ESP to require client certificates when a socket connects. Will create a TLS based access control list which ESP will check to make sure a connections client certificate is enabled. -Pass in root, client (name), organization (optional), instance (myeclwatch), component (eclwatch), visibility +Pass in root, client (name), organization (optional), instance (myeclwatch), component (eclwatch), visibility, secretTemplate (optional) */}} {{- define "hpcc.addClientCertificate" }} {{- if (.root.Values.certificates | default dict).enabled -}} @@ -1676,6 +1684,7 @@ Pass in root, client (name), organization (optional), instance (myeclwatch), com {{- $component := .component -}} {{- $client := .client -}} {{- $organization := .organization -}} + {{- $secretTemplate := .secretTemplate -}} {{- if not $externalCert -}} {{- $_ := fail (printf "Remote certificate defined for non external facing service %s - %s." $component $instance) -}} {{- end }} @@ -1688,6 +1697,10 @@ metadata: spec: # Secret names are always required. secretName: client-{{ $issuerKeyName }}-{{ $component }}-{{ $instance }}-{{ $client }}-tls + {{- if $secretTemplate }} + secretTemplate: +{{ toYaml $secretTemplate | indent 4 }} + {{- end }} duration: 2160h # 90d renewBefore: 360h # 15d subject: diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index cd57e19851f..95b58448ed0 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -205,7 +205,7 @@ kind: ConfigMap {{- $instance := .name -}} {{- $visibility := .service.visibility -}} {{- range $remoteClient := .remoteClients }} - {{ include "hpcc.addClientCertificate" (dict "root" $ "client" $remoteClient.name "organization" $remoteClient.organization "instance" $instance "component" $application "visibility" $visibility) }} + {{ include "hpcc.addClientCertificate" (dict "root" $ "client" $remoteClient.name "organization" $remoteClient.organization "instance" $instance "component" $application "visibility" $visibility "secretTemplate" $remoteClient.secretTemplate) }} {{- end }} {{- end }} {{- end }} diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index 9ed322c10a4..328dcd233bd 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -819,6 +819,10 @@ "namespace": { "description": "the namespace to use when authenticating with, and accessing the vault", "type": "string" + }, + "verify_server": { + "description": "optional relax server verification for trouble shooting", + "type": "boolean" } }, "required": [ "name", "url" ], @@ -2848,6 +2852,24 @@ "name": { "type": "string", "description": "Remote client name" + }, + "organization": { + "type": "string", + "description": "Remote client organization" + }, + "secretTemplate": { + "type": "object", + "description": "cert-manager secretTemplate for this remoteClient secret", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { "type": "string" } + }, + "labels": { + "type": "object", + "additionalProperties": { "type": "string" } + } + } } } } diff --git a/helm/hpcc/values.yaml b/helm/hpcc/values.yaml index 029a65792bd..57f01d1aea1 100644 --- a/helm/hpcc/values.yaml +++ b/helm/hpcc/values.yaml @@ -566,8 +566,12 @@ esp: # Add remote clients to generated client certificates and make the ESP require that one of the generated certificates is provided by a client in order to connect # When setting up remote clients make sure that certificates.issuers.remote.enabled is set to true. # remoteClients: -# - name: myclient -# organization: mycompany +# - name: petfoodApplicationProd +# organization: petfoodDept +# secretTemplate: +# annotations: +# kubed.appscode.com/sync: "hpccenv=petfoodAppProd" # use kubed config-syncer to replicate certificate to namespace with matching annotation (also supports syncing with separate aks clusters) + service: ## port can be used to change the local port used by the pod. If omitted, the default port (8880) is used port: 8888 @@ -651,8 +655,6 @@ esp: application: sql2ecl auth: none replicas: 1 -# remoteClients: -# - name: sqlclient111 service: visibility: local servicePort: 8510 diff --git a/roxie/ccd/ccdqueue.cpp b/roxie/ccd/ccdqueue.cpp index 0614c2b800b..9aa28e6601c 100644 --- a/roxie/ccd/ccdqueue.cpp +++ b/roxie/ccd/ccdqueue.cpp @@ -1988,6 +1988,31 @@ class RoxieReceiverBase : implements IRoxieOutputQueueManager, public CInterface #pragma warning ( disable: 4355 ) #endif +static void throwPacketTooLarge(IRoxieQueryPacket *x, unsigned maxPacketSize) +{ + StringBuffer t; + unsigned traceLength = x->getTraceLength(); + if (traceLength) + { + const byte *traceInfo = x->queryTraceInfo(); + unsigned char loggingFlags = *traceInfo; + if (loggingFlags & LOGGING_FLAGSPRESENT) // should always be true.... but this flag is handy to avoid flags byte ever being NULL + { + traceInfo++; + traceLength--; + if (loggingFlags & LOGGING_TRACELEVELSET) + { + traceInfo++; + traceLength--; + } + t.append(traceLength, (const char *) traceInfo); + } + } + throw MakeStringException(ROXIE_PACKET_ERROR, "Maximum packet length %d exceeded sending packet %s (context length %u, continuation length %u, smart step length %u, trace length %u, total length %u", + maxPacketSize, t.str(), + x->getContextLength(), x->getContinuationLength(), x->getSmartStepInfoLength(), x->getTraceLength(), x->queryHeader().packetlength); +} + class RoxieThrottledPacketSender : public Thread { TokenBucket &bucket; @@ -2097,10 +2122,7 @@ class RoxieThrottledPacketSender : public Thread break; } if (length > maxPacketSize) - { - StringBuffer s; - throw MakeStringException(ROXIE_PACKET_ERROR, "Maximum packet length %d exceeded sending packet %s", maxPacketSize, header.toString(s).str()); - } + throwPacketTooLarge(x, maxPacketSize); enqueue(x); } @@ -2435,10 +2457,7 @@ class RoxieSocketQueueManager : public RoxieReceiverBase break; } if (length > maxPacketSize) - { - StringBuffer s; - throw MakeStringException(ROXIE_PACKET_ERROR, "Maximum packet length %d exceeded sending packet %s", maxPacketSize, header.toString(s).str()); - } + throwPacketTooLarge(x, maxPacketSize); x->noteTimeSent(); Owned serialized = x->serialize(); if (!channelWrite(serialized->queryHeader(), true)) diff --git a/system/httplib/httplib.h b/system/httplib/httplib.h index 2c85d63f5ca..50a68f1bc99 100644 --- a/system/httplib/httplib.h +++ b/system/httplib/httplib.h @@ -236,6 +236,9 @@ inline const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *asn1) { #include #endif +#include "platform.h" +#include "jlog.hpp" + /* * Declaration */ @@ -4632,7 +4635,11 @@ inline bool ClientImpl::send(const Request &req, Response &res) { } if (!is_alive) { - if (!create_and_connect_socket(socket_)) { return false; } + if (!create_and_connect_socket(socket_)) + { + OERRLOG("HTTPLIB Error create_and_connect_socket failed"); + return false; + } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT // TODO: refactoring @@ -4645,7 +4652,11 @@ inline bool ClientImpl::send(const Request &req, Response &res) { } } - if (!scli.initialize_ssl(socket_)) { return false; } + if (!scli.initialize_ssl(socket_)) + { + OERRLOG("HTTPLIB Error initialize_ssl failed"); + return false; + } } #endif } @@ -4660,7 +4671,11 @@ inline bool ClientImpl::send(const Request &req, Response &res) { if (close_connection || !ret) { stop_core(); } if (!ret) { - if (error_ == Error::Success) { error_ = Error::Unknown; } + if (error_ == Error::Success) + { + OERRLOG("HTTPLIB process_socket unknown error"); + error_ = Error::Unknown; + } } return ret; @@ -5870,6 +5885,7 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { [&](SSL *ssl) { if (server_certificate_verification_) { if (!load_certs()) { + OERRLOG("HTTPLIB Error loading ssl certs"); error_ = Error::SSLLoadingCerts; return false; } @@ -5877,6 +5893,7 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { } if (SSL_connect(ssl) != 1) { + OERRLOG("HTTPLIB Error connecting ssl"); error_ = Error::SSLConnection; return false; } @@ -5885,6 +5902,7 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { verify_result_ = SSL_get_verify_result(ssl); if (verify_result_ != X509_V_OK) { + OERRLOG("HTTPLIB Error verifying server certificate SSL_get_verify_result %ld", verify_result_); error_ = Error::SSLServerVerification; return false; } @@ -5892,11 +5910,13 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { auto server_cert = SSL_get_peer_certificate(ssl); if (server_cert == nullptr) { + OERRLOG("HTTPLIB Error getting server certificate SSL_get_peer_certificate"); error_ = Error::SSLServerVerification; return false; } if (!verify_host(server_cert)) { + OERRLOG("HTTPLIB Error self verifying server certificate verify_host"); X509_free(server_cert); error_ = Error::SSLServerVerification; return false; diff --git a/system/jlib/jsecrets.cpp b/system/jlib/jsecrets.cpp index a6b63eb0d25..89ac8dc735b 100644 --- a/system/jlib/jsecrets.cpp +++ b/system/jlib/jsecrets.cpp @@ -256,6 +256,7 @@ class CVault StringBuffer clientToken; time_t clientTokenExpiration = 0; bool clientTokenRenewable = false; + bool verify_server = true; public: CVault(IPropertyTree *vault) @@ -263,14 +264,24 @@ class CVault cache.setown(createPTree()); StringBuffer url; replaceEnvVariables(url, vault->queryProp("@url"), false); + PROGLOG("vault url %s", url.str()); if (url.length()) splitUrlSchemeHostPort(url.str(), username, password, schemeHostPort, path); + + if (username.length() || password.length()) + WARNLOG("vault: unexpected use of basic auth in url, user=%s", username.str()); + name.set(vault->queryProp("@name")); kind = getSecretType(vault->queryProp("@kind")); vaultNamespace.set(vault->queryProp("@namespace")); if (vaultNamespace.length()) + { addPathSepChar(vaultNamespace, '/'); + PROGLOG("vault: namespace %s", vaultNamespace.str()); + } + verify_server = vault->getPropBool("@verify_server", true); + PROGLOG("Vault: httplib verify_server=%s", boolToStr(verify_server)); //set up vault client auth [appRole, clientToken (aka "token from the sky"), or kubernetes auth] appRoleId.set(vault->queryProp("@appRoleId")); @@ -336,7 +347,9 @@ class CVault void processClientTokenResponse(httplib::Result &res) { if (!res) - vaultAuthError("missing login response"); + vaultAuthErrorV("missing login response, error %d", res.error()); + if (res.error()!=0) + OERRLOG("JSECRETS login calling HTTPLIB POST returned error %d", res.error()); if (res->status != 200) vaultAuthErrorV("[%d](%d) - response: %s", res->status, res.error(), res->body.c_str()); const char *json = res->body.c_str(); @@ -363,6 +376,7 @@ class CVault { if (clientTokenExpiration==0) return false; + double remaining = difftime(clientTokenExpiration, time(nullptr)); if (remaining <= 0) { @@ -391,6 +405,8 @@ class CVault std::string json; json.append("{\"jwt\": \"").append(login_token.str()).append("\", \"role\": \"").append(k8sAuthRole.str()).append("\"}"); httplib::Client cli(schemeHostPort.str()); + cli.enable_server_certificate_verification(verify_server); + if (username.length() && password.length()) cli.set_basic_auth(username, password); httplib::Headers headers; @@ -418,12 +434,16 @@ class CVault std::string json; json.append("{\"role_id\": \"").append(appRoleId).append("\", \"secret_id\": \"").append(appRoleSecretId).append("\"}"); + httplib::Client cli(schemeHostPort.str()); + cli.enable_server_certificate_verification(verify_server); + if (username.length() && password.length()) cli.set_basic_auth(username, password); httplib::Headers headers; if (vaultNamespace.length()) headers.emplace("X-Vault-Namespace", vaultNamespace.str()); + httplib::Result res = cli.Post("/v1/auth/approle/login", headers, json, "application/json"); processClientTokenResponse(res); } @@ -480,6 +500,8 @@ class CVault checkAuthentication(permissionDenied); httplib::Client cli(schemeHostPort.str()); + cli.enable_server_certificate_verification(verify_server); + if (username.length() && password.length()) cli.set_basic_auth(username.str(), password.str()); @@ -490,6 +512,7 @@ class CVault headers.emplace("X-Vault-Namespace", vaultNamespace.str()); httplib::Result res = cli.Get(location, headers); + if (res) { if (res->status == 200) @@ -504,15 +527,15 @@ class CVault //try again forcing relogin, but only once. Just in case the token was invalidated but hasn't passed expiration time (for example max usage count exceeded). if (permissionDenied==false) return requestSecretAtLocation(rkind, content, location, secret, version, true); - OERRLOG("Vault %s permission denied accessing secret (check namespace=%s?) %s.%s [%d](%d) - response: %s", name.str(), vaultNamespace.str(), secret, version ? version : "", res->status, res.error(), res->body.c_str()); + OERRLOG("Vault %s permission denied accessing secret (check namespace=%s?) %s.%s location %s [%d](%d) - response: %s", name.str(), vaultNamespace.str(), secret, version ? version : "", location ? location : "null", res->status, res.error(), res->body.c_str()); } else { - OERRLOG("Vault %s error accessing secret %s.%s [%d](%d) - response: %s", name.str(), secret, version ? version : "", res->status, res.error(), res->body.c_str()); + OERRLOG("Vault %s error accessing secret %s.%s location %s [%d](%d) - response: %s", name.str(), secret, version ? version : "", location ? location : "null", res->status, res.error(), res->body.c_str()); } } else - OERRLOG("Error: Vault %s http error (%d) accessing secret %s.%s", name.str(), res.error(), secret, version ? version : ""); + OERRLOG("Error: Vault %s http error (%d) accessing secret %s.%s location %s", name.str(), res.error(), secret, version ? version : "", location ? location : "null"); return false; } bool requestSecret(CVaultKind &rkind, StringBuffer &content, const char *secret, const char *version) diff --git a/tools/esdlcomp/CMakeLists.txt b/tools/esdlcomp/CMakeLists.txt index bf855bec51a..a38fda7ba4c 100644 --- a/tools/esdlcomp/CMakeLists.txt +++ b/tools/esdlcomp/CMakeLists.txt @@ -52,6 +52,7 @@ if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG) ADD_DEFINITIONS( -O0 ) set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/esdllex.cpp PROPERTIES COMPILE_FLAGS "-Wno-sign-compare -Wno-unused-function -Wno-unneeded-internal-declaration") set_source_files_properties(esdlcomp.cpp PROPERTIES COMPILE_FLAGS "-Wno-unused-function") + set_source_files_properties(esdlgram.cpp PROPERTIES COMPILE_FLAGS "-Wno-free-nonheap-object") endif () HPCC_ADD_LIBRARY ( esdlcomp SHARED ${SRCS} ) diff --git a/tools/hidl/CMakeLists.txt b/tools/hidl/CMakeLists.txt index 8843803e13e..07e52b208ea 100644 --- a/tools/hidl/CMakeLists.txt +++ b/tools/hidl/CMakeLists.txt @@ -51,6 +51,11 @@ include_directories ( set_source_files_properties (${CMAKE_CURRENT_BINARY_DIR}/hidlgram.cpp PROPERTIES COMPILE_FLAGS -fno-strict-aliasing) SET (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STRICT_CXX_FLAGS}") + +if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_CLANG) + set_source_files_properties(hidlgram.cpp PROPERTIES COMPILE_FLAGS "-Wno-free-nonheap-object") +endif () + ADD_DEFINITIONS( -D_CONSOLE ) HPCC_ADD_EXECUTABLE ( hidl ${SRCS} ) #install ( TARGETS hidl RUNTIME DESTINATION ${EXEC_DIR} ) diff --git a/tools/roxie/extract-roxie-timings.py b/tools/roxie/extract-roxie-timings.py index d1c2b314e5b..1841abb4011 100755 --- a/tools/roxie/extract-roxie-timings.py +++ b/tools/roxie/extract-roxie-timings.py @@ -85,10 +85,12 @@ def printRow(curRow): parser.add_argument("--all", "-a", help="Combine all services into a single result", action='store_true') parser.add_argument("--nosummary", "-n", help="Avoid including a summary", action='store_true') parser.add_argument("--summaryonly", "-s", help="Only generate a summary", action='store_true') + parser.add_argument("--ignorecase", "-i", help="Use case-insensitve query names", action='store_true') args = parser.parse_args() combineServices = args.all suppressDetails = args.summaryonly - reportSummary = not args.nosummary or args.summayonly + reportSummary = not args.nosummary or args.summaryonly + ignoreQueryCase = args.ignorecase csv.field_size_limit(0x100000) with open(args.filename, encoding='latin1') as csv_file: @@ -104,6 +106,9 @@ def printRow(curRow): mapping = rowText.split(); serviceName = completeMatch.group(1) + if ignoreQueryCase: + serviceName = serviceName.lower() + idMatch = idPattern.search(mapping[0]) if idMatch: if combineServices: diff --git a/tools/tagging/gorc.sh b/tools/tagging/gorc.sh index b7330672349..62e13e19ba6 100755 --- a/tools/tagging/gorc.sh +++ b/tools/tagging/gorc.sh @@ -46,5 +46,5 @@ read -n 1 -s for f in $all ; do cd $gitroot/$f echo "Process $f" - $hpccdir/cmake_modules/go_rc.sh $* + $hpccdir/cmake_modules/go_rc.sh done diff --git a/vcpkg b/vcpkg index 348818aeb27..d6209d9f6c8 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit 348818aeb27f6204e3abb185c11094c773f0066b +Subproject commit d6209d9f6c80a9bbbf195f3bf3fbdce72462887e diff --git a/vcpkg.json b/vcpkg.json index 0a64fdb9430..80a1a90c3b8 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -1,7 +1,7 @@ { "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json", "name": "hpcc-platform", - "version": "8.8.0", + "version": "8.12.0", "dependencies": [ "apr", "apr-util", @@ -100,4 +100,4 @@ }, "zlib" ] -} \ No newline at end of file +}