From d2f9d51b0fb89d1cc78e31e49d5a197199a4e784 Mon Sep 17 00:00:00 2001 From: Qiu Jian Date: Sun, 24 Nov 2024 07:40:29 +0800 Subject: [PATCH] fix: slow update tc --- go.mod | 12 +- go.sum | 25 +- pkg/agent/server/flowman.go | 64 +- pkg/agent/server/guest.go | 13 +- pkg/agent/server/hostlocal.go | 8 +- pkg/agent/server/tcman.go | 19 +- pkg/agent/server/watch.go | 48 +- pkg/agent/utils/flowset.go | 30 +- pkg/agent/utils/flowsource.go | 2 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 14 + .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 14 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 -- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 220 +++++- .../fsnotify/fsnotify/CONTRIBUTING.md | 162 +++- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 228 ++++-- .../fsnotify/fsnotify/backend_fen.go | 484 ++++++++++++ .../fsnotify/fsnotify/backend_inotify.go | 658 ++++++++++++++++ .../fsnotify/fsnotify/backend_kqueue.go | 733 ++++++++++++++++++ .../fsnotify/fsnotify/backend_other.go | 23 + .../fsnotify/fsnotify/backend_windows.go | 682 ++++++++++++++++ vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 495 +++++++++++- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 --------- .../fsnotify/fsnotify/inotify_poller.go | 187 ----- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 ++ .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 ++ .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 ++ .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 ------------- .../fsnotify/fsnotify/open_mode_bsd.go | 12 - .../fsnotify/fsnotify/open_mode_darwin.go | 13 - .../fsnotify/fsnotify/system_bsd.go | 7 + .../fsnotify/fsnotify/system_darwin.go | 8 + .../github.com/fsnotify/fsnotify/windows.go | 586 -------------- vendor/modules.txt | 15 +- .../pkg/apis/compute/cloudaccount_const.go | 4 +- .../cloudmux/pkg/apis/compute/guest_const.go | 2 + .../x/cloudmux/pkg/apis/compute/host_const.go | 2 + .../pkg/apis/compute/storage_const.go | 11 + .../x/cloudmux/pkg/cloudprovider/instance.go | 7 + .../pkg/cloudprovider/mount_target.go | 5 + .../x/cloudmux/pkg/cloudprovider/resources.go | 15 + .../x/cloudmux/pkg/multicloud/esxi/host.go | 19 + .../pkg/multicloud/esxi/storagecache.go | 1 + .../x/cloudmux/pkg/multicloud/host_base.go | 6 + .../cloudmux/pkg/multicloud/instance_base.go | 4 + .../x/cloudmux/pkg/multicloud/nas_base.go | 9 + vendor/yunion.io/x/jsonutils/jsonutils.go | 33 +- .../x/onecloud/pkg/apis/compute/api.go | 7 + .../onecloud/pkg/apis/compute/cloudaccount.go | 8 +- .../pkg/apis/compute/cloudaccount_const.go | 13 +- .../pkg/apis/compute/cloudprovider_quota.go | 2 - .../x/onecloud/pkg/apis/compute/container.go | 36 +- .../x/onecloud/pkg/apis/compute/disk.go | 28 + .../x/onecloud/pkg/apis/compute/disk_const.go | 1 + .../x/onecloud/pkg/apis/compute/filesystem.go | 11 +- .../onecloud/pkg/apis/compute/guest_const.go | 4 + .../x/onecloud/pkg/apis/compute/guests.go | 27 +- .../x/onecloud/pkg/apis/compute/host.go | 6 +- .../x/onecloud/pkg/apis/compute/host_const.go | 5 + .../pkg/apis/compute/isolated_device.go | 1 + .../onecloud/pkg/apis/compute/loadbalancer.go | 6 + .../x/onecloud/pkg/apis/compute/secgroup.go | 5 - .../x/onecloud/pkg/apis/compute/snapshot.go | 3 + .../x/onecloud/pkg/apis/compute/storage.go | 10 +- .../pkg/apis/compute/storage_const.go | 13 + .../pkg/apis/compute/zz_generated.model.go | 51 +- vendor/yunion.io/x/onecloud/pkg/apis/const.go | 8 + .../x/onecloud/pkg/apis/container.go | 57 +- .../x/onecloud/pkg/apis/host/container.go | 32 +- .../x/onecloud/pkg/apis/identity/aksk.go | 1 + .../x/onecloud/pkg/apis/identity/consts.go | 1 + .../onecloud/pkg/apis/identity/credential.go | 12 + .../x/onecloud/pkg/apis/monitor/alert.go | 38 +- .../pkg/apis/monitor/alertdashboard.go | 11 +- .../x/onecloud/pkg/apis/monitor/alertjoint.go | 28 + .../pkg/apis/monitor/alertnotification.go | 11 +- .../onecloud/pkg/apis/monitor/alertrecord.go | 1 + .../x/onecloud/pkg/apis/monitor/template.go | 8 +- .../pkg/apis/monitor/unifiedmonitor_query.go | 23 +- .../pkg/apis/monitor/zz_generated.model.go | 15 +- .../x/onecloud/pkg/apis/scheduler/api.go | 15 +- .../pkg/cloudcommon/cmdline/parser.go | 17 + .../x/onecloud/pkg/cloudcommon/consts/db.go | 10 + .../pkg/cloudcommon/db/opslog_const.go | 6 + .../pkg/cloudcommon/db/sharablebase.go | 10 +- .../pkg/cloudcommon/db/standalone_anon.go | 29 +- .../onecloud/pkg/cloudcommon/db/statusbase.go | 9 +- .../pkg/cloudcommon/db/taskman/noop_task.go | 50 ++ .../pkg/cloudcommon/db/taskman/subtasks.go | 1 + .../pkg/cloudcommon/db/taskman/tasks.go | 171 ++-- .../pkg/cloudcommon/options/options.go | 8 +- .../x/onecloud/pkg/compute/models/app.go | 2 + .../pkg/compute/models/app_environment.go | 2 + .../pkg/compute/models/backup_storage.go | 2 + .../pkg/compute/models/baremetalagents.go | 2 + .../pkg/compute/models/capabilities.go | 15 +- .../pkg/compute/models/cloudaccounts.go | 20 +- .../pkg/compute/models/cloudimages.go | 2 + .../compute/models/cloudprovider_quotas.go | 1 + .../compute/models/cloudprovidercapacities.go | 1 + .../onecloud/pkg/compute/models/cloudsync.go | 121 ++- .../onecloud/pkg/compute/models/containers.go | 317 +++++++- .../pkg/compute/models/dbinstance_accounts.go | 2 + .../pkg/compute/models/dbinstance_backups.go | 2 + .../compute/models/dbinstance_databases.go | 2 + .../compute/models/dbinstance_parameters.go | 2 + .../compute/models/dbinstance_privileges.go | 2 + .../pkg/compute/models/dbinstance_skus.go | 2 + .../pkg/compute/models/dbinstances.go | 2 + .../pkg/compute/models/diskbackups.go | 2 + .../x/onecloud/pkg/compute/models/disks.go | 18 +- .../onecloud/pkg/compute/models/dnsrecords.go | 2 + .../x/onecloud/pkg/compute/models/dnsvpcs.go | 2 + .../compute/models/elasticcache_accounts.go | 3 +- .../pkg/compute/models/elasticcache_acls.go | 3 +- .../compute/models/elasticcache_backups.go | 3 +- .../compute/models/elasticcache_parameters.go | 3 +- .../pkg/compute/models/elasticcache_skus.go | 2 + .../onecloud/pkg/compute/models/elasticips.go | 2 + .../pkg/compute/models/external_projects.go | 2 + .../onecloud/pkg/compute/models/filesystem.go | 65 +- .../onecloud/pkg/compute/models/globalvpcs.go | 2 + .../x/onecloud/pkg/compute/models/groups.go | 2 + .../pkg/compute/models/guest_actions.go | 109 ++- .../pkg/compute/models/guest_queries.go | 1 + .../x/onecloud/pkg/compute/models/guests.go | 60 +- .../x/onecloud/pkg/compute/models/hosts.go | 199 ++++- .../onecloud/pkg/compute/models/hostwires.go | 2 +- .../pkg/compute/models/instance_backup.go | 2 + .../pkg/compute/models/isolated_devices.go | 475 ++++++++++-- .../pkg/compute/models/kube_clusters.go | 2 + .../pkg/compute/models/kube_node_pools.go | 2 + .../onecloud/pkg/compute/models/kube_nodes.go | 2 + .../pkg/compute/models/loadbalanceracls.go | 2 + .../pkg/compute/models/loadbalanceragents.go | 2 + .../models/loadbalancerbackendgroups.go | 2 + .../compute/models/loadbalancerbackends.go | 2 + .../models/loadbalancercertificates.go | 2 + .../compute/models/loadbalancerclusters.go | 2 + .../models/loadbalancerlistenerrules.go | 2 + .../compute/models/loadbalancerlisteners.go | 2 + .../pkg/compute/models/loadbalancers.go | 60 ++ .../x/onecloud/pkg/compute/models/mongodb.go | 2 + .../onecloud/pkg/compute/models/natdtable.go | 2 + .../pkg/compute/models/natgateways.go | 2 + .../onecloud/pkg/compute/models/natstable.go | 2 + .../pkg/compute/models/net_tap_flows.go | 2 + .../pkg/compute/models/net_tap_services.go | 2 + .../pkg/compute/models/netinterfaces.go | 2 +- .../pkg/compute/models/networkaddresses.go | 2 + .../pkg/compute/models/networkinterfaces.go | 2 + .../x/onecloud/pkg/compute/models/networks.go | 4 +- .../onecloud/pkg/compute/models/pod_driver.go | 5 +- .../x/onecloud/pkg/compute/models/purge.go | 20 + .../pkg/compute/models/regiondrivers.go | 1 + .../pkg/compute/models/resource_syncstatus.go | 3 +- .../pkg/compute/models/scaling_activity.go | 2 + .../pkg/compute/models/scaling_group.go | 2 + .../pkg/compute/models/scaling_policy.go | 2 + .../pkg/compute/models/secgrouprules.go | 2 + .../onecloud/pkg/compute/models/secgroups.go | 2 + .../pkg/compute/models/server_skus.go | 2 + .../pkg/compute/models/service_catalog.go | 2 + .../pkg/compute/models/snapshotpolicy.go | 2 + .../onecloud/pkg/compute/models/snapshots.go | 5 + .../x/onecloud/pkg/compute/models/specs.go | 15 +- .../pkg/compute/models/sslcertificate.go | 2 + .../pkg/compute/models/storageresource.go | 11 + .../x/onecloud/pkg/compute/models/storages.go | 105 ++- .../x/onecloud/pkg/compute/models/vpcs.go | 10 +- .../pkg/compute/models/waf_regexsets.go | 2 + .../x/onecloud/pkg/compute/options/options.go | 2 +- .../pkg/hostman/guestman/desc/desc.go | 3 +- .../pkg/hostman/hostutils/hostutils.go | 4 +- .../isolated_device/isolated_device.go | 108 ++- .../x/onecloud/pkg/hostman/options/options.go | 18 +- .../pkg/hostman/system_service/telegraf.go | 149 +++- .../mcclient/modules/compute/mod_metadatas.go | 22 +- .../modules/identity/mod_credentials.go | 15 + .../pkg/mcclient/modules/monitor/alert.go | 4 +- .../pkg/mcclient/modules/monitor/helper.go | 19 +- .../modules/scheduler/mod_scheduler.go | 13 + .../x/onecloud/pkg/mcclient/options/base.go | 4 +- .../pkg/mcclient/options/webconsole.go | 5 + .../x/onecloud/pkg/monitor/tsdb/models.go | 4 +- .../pkg/monitor/tsdb/query_endpoint.go | 2 + .../onecloud/pkg/util/fileutils2/fileutils.go | 65 +- .../x/onecloud/pkg/util/logclient/consts.go | 3 + .../x/onecloud/pkg/util/pod/cgroup.go | 6 + .../yunion.io/x/onecloud/pkg/util/pod/pod.go | 144 +++- .../onecloud/pkg/util/pod/securitycontext.go | 57 ++ .../x/onecloud/pkg/util/sysutils/nics.go | 12 +- 208 files changed, 7083 insertions(+), 2613 deletions(-) create mode 100644 vendor/github.com/fsnotify/fsnotify/.cirrus.yml delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/system_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/system_darwin.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertjoint.go create mode 100644 vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/noop_task.go create mode 100644 vendor/yunion.io/x/onecloud/pkg/util/pod/securitycontext.go diff --git a/go.mod b/go.mod index d8492826..f5d6f2af 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/coreos/go-iptables v0.6.0 github.com/digitalocean/go-openvswitch v0.0.20240920 - github.com/fsnotify/fsnotify v1.5.4 + github.com/fsnotify/fsnotify v1.8.0 github.com/golang/protobuf v1.5.3 github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/errors v0.9.1 @@ -17,10 +17,10 @@ require ( golang.org/x/net v0.21.0 google.golang.org/grpc v1.62.0 google.golang.org/protobuf v1.32.0 - yunion.io/x/jsonutils v1.0.1-0.20240203102553-4096f103b401 + yunion.io/x/jsonutils v1.0.1-0.20240930100528-1671a2d0d22f yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91 - yunion.io/x/onecloud v0.0.0-20241009134947-8e0507d0029f - yunion.io/x/pkg v1.10.1-0.20240905110705-77c46e716318 + yunion.io/x/onecloud v0.0.0-20241217113931-4132f245acc4 + yunion.io/x/pkg v1.10.2 ) require ( @@ -119,8 +119,8 @@ require ( moul.io/http2curl/v2 v2.3.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.0.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect - yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240929084351-30a36ccf2201 // indirect - yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32 // indirect + yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20241216075633-a8665686cf63 // indirect + yunion.io/x/executor v0.0.0-20241205080005-48f5b1212256 // indirect yunion.io/x/s3cli v0.0.0-20190917004522-13ac36d8687e // indirect yunion.io/x/sqlchemy v1.1.3-0.20240926163039-d41512b264e1 // indirect yunion.io/x/structarg v0.0.0-20231017124457-df4d5009457c // indirect diff --git a/go.sum b/go.sum index 72575004..b126b999 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,8 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gernest/wow v0.1.0/go.mod h1:dEPabJRi5BneI1Nev1VWo0ZlcTWibHWp43qxKms4elY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -605,7 +605,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -783,25 +782,25 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240929084351-30a36ccf2201 h1:Qe+41cwNdSangAUPCEMV4YQWB8Twdusn1hZfdciK3Ro= -yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240929084351-30a36ccf2201/go.mod h1:rj/pb3DitJlQaQD8UW1oxx/KD+PzDZqoywzqRJaFE9A= -yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32 h1:v7POYkQwo1XzOxBoIoRVr/k0V9Y5JyjpshlIFa9raug= -yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32/go.mod h1:Uxuou9WQIeJXNpy7t2fPLL0BYLvLiMvGQwY7Qc6aSws= +yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20241216075633-a8665686cf63 h1:j5XE/WcpbSolZfeQueXmtxiVVxdQWC2Qr9RtNnzEwR0= +yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20241216075633-a8665686cf63/go.mod h1:rj/pb3DitJlQaQD8UW1oxx/KD+PzDZqoywzqRJaFE9A= +yunion.io/x/executor v0.0.0-20241205080005-48f5b1212256 h1:kLKQ6zbgPDQflRwoHFAjxNChcbhXIFgsUVLkJwiXu/8= +yunion.io/x/executor v0.0.0-20241205080005-48f5b1212256/go.mod h1:Uxuou9WQIeJXNpy7t2fPLL0BYLvLiMvGQwY7Qc6aSws= yunion.io/x/go-openvswitch v0.0.20240920 h1:imjK9g2EZ3IzBdNZ3UEYobJJYw6bQCvUvgoTOIyis1o= yunion.io/x/go-openvswitch v0.0.20240920/go.mod h1:OAtI/pEmN/EvxlkixiYp2nMQQEtEqzHcpWeE2AW2Bb8= yunion.io/x/jsonutils v0.0.0-20190625054549-a964e1e8a051/go.mod h1:4N0/RVzsYL3kH3WE/H1BjUQdFiWu50JGCFQuuy+Z634= -yunion.io/x/jsonutils v1.0.1-0.20240203102553-4096f103b401 h1:4l6ELFSQ0MBVInscZ8/yOtSWF0cwH5BT1ATN6dCtAqc= -yunion.io/x/jsonutils v1.0.1-0.20240203102553-4096f103b401/go.mod h1:VK4Z93dgiKgAijcSqbMKmGaBMJuHulR16Hz4K015ZPo= +yunion.io/x/jsonutils v1.0.1-0.20240930100528-1671a2d0d22f h1:N7V0uLqX9mED1HEPeIKcYpkWglnHL5hyn4ENX+VBeqM= +yunion.io/x/jsonutils v1.0.1-0.20240930100528-1671a2d0d22f/go.mod h1:VK4Z93dgiKgAijcSqbMKmGaBMJuHulR16Hz4K015ZPo= yunion.io/x/log v0.0.0-20190514041436-04ce53b17c6b/go.mod h1:+gauLs73omeJAPlsXcevLsJLKixV+sR/E7WSYTSx1fE= yunion.io/x/log v0.0.0-20190629062853-9f6483a7103d/go.mod h1:LC6f/4FozL0iaAbnFt2eDX9jlsyo3WiOUPm03d7+U4U= yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91 h1:inY5o3LDa/zgsIZuPN0HmpzKIsu/lLgsBmMttuDPGj4= yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91/go.mod h1:LC6f/4FozL0iaAbnFt2eDX9jlsyo3WiOUPm03d7+U4U= -yunion.io/x/onecloud v0.0.0-20241009134947-8e0507d0029f h1:wTat6DgcTgPW362zaviuWr3novfUHw3KwldfhRUfdko= -yunion.io/x/onecloud v0.0.0-20241009134947-8e0507d0029f/go.mod h1:NxDO0eScgEYp5NJPV8BWURe2/copiNBFqDIBAEBoFZw= +yunion.io/x/onecloud v0.0.0-20241217113931-4132f245acc4 h1:y7VBqX89ZAZmIjYdAb6hvOZhoBdsNOzzPwUARl7wccU= +yunion.io/x/onecloud v0.0.0-20241217113931-4132f245acc4/go.mod h1:bMRZe2muTlMeYiX7yHJwz5pTZtGMHeQK/BIrT4IdXRY= yunion.io/x/pkg v0.0.0-20190620104149-945c25821dbf/go.mod h1:t6rEGG2sQ4J7DhFxSZVOTjNd0YO/KlfWQyK1W4tog+E= yunion.io/x/pkg v0.0.0-20190628082551-f4033ba2ea30/go.mod h1:t6rEGG2sQ4J7DhFxSZVOTjNd0YO/KlfWQyK1W4tog+E= -yunion.io/x/pkg v1.10.1-0.20240905110705-77c46e716318 h1:Fm7I8ypXHxeObY4u/VUGz78NsambemzTZ9fECyGKNi8= -yunion.io/x/pkg v1.10.1-0.20240905110705-77c46e716318/go.mod h1:0Bwxqd9MA3ACi119/l02FprY/o9gHahmYC2bsSbnVpM= +yunion.io/x/pkg v1.10.2 h1:oZhedvlvDsebIWcOvrOMJQ31SxzjxxqUXUBYseBjh7w= +yunion.io/x/pkg v1.10.2/go.mod h1:0Bwxqd9MA3ACi119/l02FprY/o9gHahmYC2bsSbnVpM= yunion.io/x/s3cli v0.0.0-20190917004522-13ac36d8687e h1:v+EzIadodSwkdZ/7bremd7J8J50Cise/HCylsOJngmo= yunion.io/x/s3cli v0.0.0-20190917004522-13ac36d8687e/go.mod h1:0iFKpOs1y4lbCxeOmq3Xx/0AcQoewVPwj62eRluioEo= yunion.io/x/sqlchemy v1.1.3-0.20240926163039-d41512b264e1 h1:HWPqY1I5JSmM6Sks6FyK9hnq/MjL7FDghM6M8DXHob0= diff --git a/pkg/agent/server/flowman.go b/pkg/agent/server/flowman.go index 6c6d63f0..b873aeee 100644 --- a/pkg/agent/server/flowman.go +++ b/pkg/agent/server/flowman.go @@ -15,9 +15,7 @@ package server import ( - "bytes" "context" - "fmt" "reflect" "sync" "sync/atomic" @@ -99,11 +97,26 @@ var ( } ) +func (fm *FlowMan) mergeFlows() *utils.FlowSet { + merge := utils.NewFlowSet() + for _, fs := range fm.flowSets { + merge.Merge(fs) + } + return merge +} + func (fm *FlowMan) doCheck() { + log.Infof("flowman %s: do check waitCount %d", fm.bridge, fm.waitCount) if atomic.LoadInt32(&fm.waitCount) != 0 { return } - defer log.Infof("flowman %s: check done", fm.bridge) + start := time.Now() + + defer func() { + log.Infof("flowman %s: check done %f", fm.bridge, time.Since(start).Seconds()) + }() + + log.Infof("flowman %s: start check", fm.bridge) var err error // fs0: current flows fs0, err := fm.doDumpFlows(excludeOvsTables) @@ -111,34 +124,14 @@ func (fm *FlowMan) doCheck() { log.Errorf("FlowMan doCheck doDumpFlows fail %s", err) return } - fsAdd := utils.NewFlowSet() - fsDel := utils.NewFlowSet() - // flows1: flows to install - // flows1 := []*ovs.Flow{} - for _, fs1 := range fm.flowSets { - for _, f1 := range fs1.Flows() { - // flows1 = append(flows1, f1) - if !fs0.Contains(f1) { - fsAdd.Add(f1) - } - } - } - for _, f0 := range fs0.Flows() { - found := false - for _, fs1 := range fm.flowSets { - if fs1.Contains(f0) { - found = true - break - } - } - if !found { - fsDel.Add(f0) - } - } - flowsAdd := fsAdd.Flows() - flowsDel := fsDel.Flows() + log.Infof("flowman %s: %d flows in table", fm.bridge, fs0.Len()) + + merged := fm.mergeFlows() + log.Infof("flowman %s: %d flows in table and %d flows in memory", fm.bridge, fs0.Len(), merged.Len()) + flowsAdd, flowsDel := fs0.Diff(merged) fm.doCommitChange(flowsAdd, flowsDel) - if len(flowsAdd) > 0 || len(flowsDel) > 0 { + + /*if len(flowsAdd) > 0 || len(flowsDel) > 0 { buf := &bytes.Buffer{} buf.WriteString(fmt.Sprintf("flowman %s: commit:\n", fm.bridge)) //fm.bufWriteFlows(buf, "000-flow", fs0.Flows()) @@ -146,17 +139,18 @@ func (fm *FlowMan) doCheck() { fm.bufWriteFlows(buf, "add-flow", flowsAdd) fm.bufWriteFlows(buf, "del-flow", flowsDel) log.Infof("%s", buf.String()) - } + }*/ } -func (fm *FlowMan) bufWriteFlows(buf *bytes.Buffer, prefix string, flows []*ovs.Flow) { +/*func (fm *FlowMan) bufWriteFlows(buf *bytes.Buffer, prefix string, flows []*ovs.Flow) { for i, f := range flows { txt, _ := f.MarshalText() buf.WriteString(fmt.Sprintf("%s:%2d: %s\n", prefix, i, txt)) } -} +}*/ func (fm *FlowMan) doCommitChange(flowsAdd, flowsDel []*ovs.Flow) error { + log.Infof("FlowMan %s doCommitChange flowsAdd %d flowsDel %d", fm.bridge, len(flowsAdd), len(flowsDel)) ofCli := ovs.New(ovs.Strict(), ovs.Debug(false)).OpenFlow err := ofCli.AddFlowBundle(fm.bridge, func(tx *ovs.FlowTransaction) error { mfs := make([]*ovs.MatchFlow, len(flowsDel)) @@ -291,6 +285,7 @@ func (fm *FlowMan) DelFlow(ctx context.Context, of *ovs.Flow) { } func (fm *FlowMan) SyncFlows(ctx context.Context) { + log.Debugf("flowman %s: SyncFlows", fm.bridge) cmd := &flowManCmd{ Type: flowManCmdSyncFlows, } @@ -298,6 +293,7 @@ func (fm *FlowMan) SyncFlows(ctx context.Context) { } func (fm *FlowMan) updateFlows(ctx context.Context, who string, ofs []*ovs.Flow) { + log.Debugf("flowman %s: updateFlows %s", fm.bridge, who) { v := ctx.Value("waitData") // The caller is responsible for coordinating access @@ -324,7 +320,7 @@ func (fm *FlowMan) updateFlows(ctx context.Context, who string, ofs []*ovs.Flow) fm.sendCmd(ctx, cmd) } -func (fm *FlowMan) waitDecr(ctx context.Context, n int32) { +func (fm *FlowMan) waitDecr(n int32) { atomic.AddInt32(&fm.waitCount, -n) } diff --git a/pkg/agent/server/guest.go b/pkg/agent/server/guest.go index e2d025e8..b7cfbd64 100644 --- a/pkg/agent/server/guest.go +++ b/pkg/agent/server/guest.go @@ -192,7 +192,7 @@ func (g *Guest) clearClassicFlows(ctx context.Context) { g.clearPending() } -func (g *Guest) updateTc(ctx context.Context) { +func (g *Guest) updateTc(ctx context.Context, sync bool) { if g.watcher.tcMan == nil { return } @@ -201,7 +201,7 @@ func (g *Guest) updateTc(ctx context.Context) { d := nic.TcData() data = append(data, d) } - g.watcher.tcMan.AddIfaces(ctx, g.Who(), data) + g.watcher.tcMan.AddIfaces(ctx, g.Who(), data, sync) } func (g *Guest) clearTc(ctx context.Context) { @@ -238,13 +238,18 @@ func (g *Guest) clearOvn(ctx context.Context) { ovnMdMan.SetGuestNICs(ctx, g.Id, nil) } -func (g *Guest) UpdateSettings(ctx context.Context) { +func (g *Guest) UpdateSettings(ctx context.Context, sync bool) { + start := time.Now() err := g.refresh(ctx) + log.Infof("guest UpdateSettings refresh %f", time.Since(start).Seconds()) switch err { case nil: g.updateClassicFlows(ctx) - g.updateTc(ctx) + log.Infof("guest UpdateSettings updateClassicFlows %f", time.Since(start).Seconds()) + g.updateTc(ctx, sync) + log.Infof("guest UpdateSettings updateTc %f", time.Since(start).Seconds()) g.updateOvn(ctx) + log.Infof("guest UpdateSettings updateOvn %f", time.Since(start).Seconds()) if g.HostId != "" { g.watcher.agent.HostId(g.HostId) } diff --git a/pkg/agent/server/hostlocal.go b/pkg/agent/server/hostlocal.go index 32537e08..4623beec 100644 --- a/pkg/agent/server/hostlocal.go +++ b/pkg/agent/server/hostlocal.go @@ -69,7 +69,7 @@ func (hl *HostLocal) updateFlows(ctx context.Context) { } } -func (hl *HostLocal) updateTc(ctx context.Context) { +func (hl *HostLocal) updateTc(ctx context.Context, sync bool) { if hl.watcher.tcMan == nil { return } @@ -81,10 +81,10 @@ func (hl *HostLocal) updateTc(ctx context.Context) { } data = append(data, td) } - hl.watcher.tcMan.AddIfaces(ctx, "hostlocal", data) + hl.watcher.tcMan.AddIfaces(ctx, "hostlocal", data, sync) } -func (hl *HostLocal) UpdateSettings(ctx context.Context) { +func (hl *HostLocal) UpdateSettings(ctx context.Context, sync bool) { hl.updateFlows(ctx) - hl.updateTc(ctx) + hl.updateTc(ctx, sync) } diff --git a/pkg/agent/server/tcman.go b/pkg/agent/server/tcman.go index b5ed84a2..150a6344 100644 --- a/pkg/agent/server/tcman.go +++ b/pkg/agent/server/tcman.go @@ -105,12 +105,15 @@ type TcManCmdType int const ( TcManCmdAdd = iota TcManCmdDel + TcManCmdSync ) type TcManCmd struct { typ TcManCmdType who string section *TcManSection + // if the command is executed synchronizedly + sync bool } // TODO @@ -216,9 +219,13 @@ func (tm *TcMan) doCmd(ctx context.Context, cmd *TcManCmd) { } else { section.Update(cmd.section) } - tm.doCheckSection(ctx, section) + if cmd.sync { + tm.doCheckSection(ctx, section) + } case TcManCmdDel: delete(tm.book, cmd.who) + case TcManCmdSync: + tm.doIdleCheck(ctx) } } @@ -231,7 +238,7 @@ func (tm *TcMan) sendCmd(ctx context.Context, cmd *TcManCmd) { } } -func (tm *TcMan) AddIfaces(ctx context.Context, who string, data []*utils.TcData) { +func (tm *TcMan) AddIfaces(ctx context.Context, who string, data []*utils.TcData, sync bool) { section := &TcManSection{ pages: map[string]*TcManPage{}, } @@ -253,6 +260,7 @@ func (tm *TcMan) AddIfaces(ctx context.Context, who string, data []*utils.TcData typ: TcManCmdAdd, who: who, section: section, + sync: sync, } tm.sendCmd(ctx, cmd) } @@ -264,3 +272,10 @@ func (tm *TcMan) ClearIfaces(ctx context.Context, who string) { } tm.sendCmd(ctx, cmd) } + +func (tm *TcMan) SyncAll(ctx context.Context) { + cmd := &TcManCmd{ + typ: TcManCmdSync, + } + tm.sendCmd(ctx, cmd) +} diff --git a/pkg/agent/server/watch.go b/pkg/agent/server/watch.go index 530434e1..60a390a5 100644 --- a/pkg/agent/server/watch.go +++ b/pkg/agent/server/watch.go @@ -17,10 +17,12 @@ package server import ( "context" "fmt" - "io/ioutil" + "os" "path" "path/filepath" + "reflect" "regexp" + "runtime" "sync" "time" @@ -36,10 +38,10 @@ import ( var REGEX_UUID = regexp.MustCompile(`^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$`) -type pendingGuest struct { +/*type pendingGuest struct { guest *utils.Guest firstSeen time.Time -} +}*/ type wCmd int @@ -127,7 +129,7 @@ func (w *watchEvent) String() string { func (w *serversWatcher) scan(ctx context.Context) { serversPath := w.hostConfig.ServersPath - fis, err := ioutil.ReadDir(serversPath) + fis, err := os.ReadDir(serversPath) if err != nil { log.Errorf("scan servers path %s failed: %s", serversPath, err) return @@ -138,12 +140,16 @@ func (w *serversWatcher) scan(ctx context.Context) { } id := fi.Name() if REGEX_UUID.MatchString(id) { + guestStart := time.Now() + log.Infof("scan guest %s", id) path := path.Join(serversPath, id) g, err := w.addGuestWatch(id, path) if err != nil { - log.Errorf("watch guest failed during scan: %s: %s", path, err) + log.Errorf("inotify events watch guest failed during scan: %s: %s", path, err) } - g.UpdateSettings(ctx) + log.Infof("end of scan guest %s addGuestWatch: %f", id, time.Since(guestStart).Seconds()) + g.UpdateSettings(ctx, false) + log.Infof("end of scan guest %s: %f", id, time.Since(guestStart).Seconds()) } } } @@ -165,14 +171,23 @@ func (w *serversWatcher) addGuestWatch(id, path string) (*Guest, error) { return g, err } +func GetFunctionName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + func (w *serversWatcher) withWait(ctx context.Context, f func(context.Context)) { waitData := map[string]*FlowManWaitData{} ctx = context.WithValue(ctx, "waitData", waitData) + start := time.Now() + funcName := GetFunctionName(f) + log.Debugf("[serversWatcher] start wait %s context ....", funcName) f(ctx) + log.Debugf("[serversWatcher] end wait %s context %f....", funcName, time.Since(start).Seconds()) for _, wd := range waitData { - wd.FlowMan.waitDecr(ctx, wd.Count) + wd.FlowMan.waitDecr(wd.Count) wd.FlowMan.SyncFlows(ctx) } + w.tcMan.SyncAll(ctx) } func (w *serversWatcher) hasRecentPending() bool { @@ -198,7 +213,7 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { // start watcher before scan w.watcher, err = fsnotify.NewWatcher() if err != nil { - log.Errorf("creating inotify watcher failed: %s", err) + log.Errorf("creating inotify events watcher failed: %s", err) return } defer w.watcher.Close() @@ -225,8 +240,9 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { // init scan w.hostLocal = NewHostLocal(w) w.withWait(ctx, func(ctx context.Context) { - w.hostLocal.UpdateSettings(ctx) + w.hostLocal.UpdateSettings(ctx, false) w.scan(ctx) + log.Infof("serversWatcher.Start: Finish initial guests scan") }) refreshTicker := time.NewTicker(WatcherRefreshRate) @@ -244,11 +260,12 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { log.Errorf("fsnotity.watch.Events error") goto out } + log.Infof("receive inotify events!") wev := w.watchEvent(&ev) if wev == nil { - log.Debugf("inotify event ignored: %s", ev) + log.Debugf("inotify events ignored: %s", ev) } else { - log.Debugf("to handle inotify event %s %s", ev, wev) + log.Debugf("to handle inotify events %s %s", ev, wev) guestId := wev.guestId guestPath := wev.guestPath switch wev.evType { @@ -258,7 +275,7 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { if err != nil { log.Errorf("watch guest failed: %s: %s", guestPath, err) } - g.UpdateSettings(ctx) + g.UpdateSettings(ctx, true) case watchEventTypeDelServerDir: if g, ok := w.guests[guestId]; ok { // this is needed for containers @@ -267,8 +284,9 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { } log.Infof("guest path deleted: %s", guestPath) case watchEventTypeUpdServer: + log.Infof("watchEventTypeUpdServer %s", guestId) if g, ok := w.guests[guestId]; ok { - g.UpdateSettings(ctx) + g.UpdateSettings(ctx, true) } else { log.Warningf("unexpected guest update event: %s", guestPath) } @@ -286,14 +304,14 @@ func (w *serversWatcher) Start(ctx context.Context, agent *AgentServer) { w.withWait(ctx, func(ctx context.Context) { for _, g := range w.guests { if g.IsPending() { - g.UpdateSettings(ctx) + g.UpdateSettings(ctx, false) } } }) case <-refreshTicker.C: log.Infof("watcher refresh time ;)") w.withWait(ctx, func(ctx context.Context) { - w.hostLocal.UpdateSettings(ctx) + w.hostLocal.UpdateSettings(ctx, false) w.scan(ctx) // for _, g := range w.guests { // g.UpdateSettings(ctx) diff --git a/pkg/agent/utils/flowset.go b/pkg/agent/utils/flowset.go index 0017af9a..bfa2557b 100644 --- a/pkg/agent/utils/flowset.go +++ b/pkg/agent/utils/flowset.go @@ -15,12 +15,23 @@ package utils import ( + "sort" "strings" "github.com/digitalocean/go-openvswitch/ovs" + "yunion.io/x/pkg/errors" ) +type sortedFlows []*ovs.Flow + +func (a sortedFlows) Len() int { return len(a) } +func (a sortedFlows) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortedFlows) Less(i, j int) bool { + result := CompareOVSFlow(a[i], a[j]) + return result < 0 +} + type FlowSet struct { flows []*ovs.Flow } @@ -30,11 +41,8 @@ func NewFlowSet() *FlowSet { } func NewFlowSetFromList(flows []*ovs.Flow) *FlowSet { - fs := NewFlowSet() - for _, of := range flows { - fs.Add(of) - } - return fs + sort.Sort(sortedFlows(flows)) + return &FlowSet{flows: flows} } func (fs *FlowSet) findFlowIndex(f *ovs.Flow) (int, bool) { @@ -58,6 +66,10 @@ func (fs *FlowSet) Flows() []*ovs.Flow { return fs.flows } +func (fs *FlowSet) Len() int { + return len(fs.flows) +} + func (fs *FlowSet) Add(f *ovs.Flow) bool { i, find := fs.findFlowIndex(f) if !find { @@ -96,15 +108,15 @@ func (fs *FlowSet) DumpFlows() (string, error) { return buf.String(), nil } -/*func (fs *FlowSet) Merge(fs1 *FlowSet) { +func (fs *FlowSet) Merge(fs1 *FlowSet) { for _, f := range fs1.flows { fs.Add(f) } -}*/ +} // Diff return dels,adds that are needed to make the current set has the same // elements as with fs1 -/*func (fs0 *FlowSet) Diff(fs1 *FlowSet) (flowsAdd, flowsDel []*ovs.Flow) { +func (fs0 *FlowSet) Diff(fs1 *FlowSet) (flowsAdd, flowsDel []*ovs.Flow) { flowsAdd = []*ovs.Flow{} flowsDel = []*ovs.Flow{} @@ -130,4 +142,4 @@ func (fs *FlowSet) DumpFlows() (string, error) { flowsAdd = append(flowsAdd, fs1.flows[j:]...) } return -}*/ +} diff --git a/pkg/agent/utils/flowsource.go b/pkg/agent/utils/flowsource.go index 6a9dcd68..2ffb009d 100644 --- a/pkg/agent/utils/flowsource.go +++ b/pkg/agent/utils/flowsource.go @@ -38,7 +38,7 @@ type FlowSource interface { func F(table, priority int, matches, actions string) *ovs.Flow { txt := fmt.Sprintf("table=%d,priority=%d,%s,actions=%s", table, priority, matches, actions) - log.Debugln(txt) + // log.Debugln(txt) of := &ovs.Flow{} err := of.UnmarshalText([]byte(txt)) if err != nil { diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml new file mode 100644 index 00000000..f4e7dbf3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -0,0 +1,14 @@ +freebsd_task: + name: 'FreeBSD' + freebsd_instance: + image_family: freebsd-14-1 + install_script: + - pkg update -f + - pkg install -y go + test_script: + # run tests as user "cirrus" instead of root + - pw useradd cirrus -m + - chown -R cirrus:cirrus . + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad89585..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf..daea9dd6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,10 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify +/fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f..fa854785 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,11 +1,199 @@ # Changelog -All notable changes to this project will be documented in this file. +1.8.0 2023-10-31 +---------------- -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +### Additions -## [Unreleased] +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 + +1.7.0 - 2023-10-22 +------------------ +This version of fsnotify needs Go 1.17. + +### Additions + +- illumos: add FEN backend to support illumos and Solaris. ([#371]) + +- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful + in cases where you can't control the kernel buffer and receive a large number + of events in bursts. ([#550], [#572]) + +- all: add `AddWith()`, which is identical to `Add()` but allows passing + options. ([#521]) + +- windows: allow setting the ReadDirectoryChangesW() buffer size with + `fsnotify.WithBufferSize()`; the default of 64K is the highest value that + works on all platforms and is enough for most purposes, but in some cases a + highest buffer is needed. ([#521]) + +### Changes and fixes + +- inotify: remove watcher if a watched path is renamed ([#518]) + + After a rename the reported name wasn't updated, or even an empty string. + Inotify doesn't provide any good facilities to update it, so just remove the + watcher. This is already how it worked on kqueue and FEN. + + On Windows this does work, and remains working. + +- windows: don't listen for file attribute changes ([#520]) + + File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, + with no way to see if they're a file write or attribute change, so would show + up as a fsnotify.Write event. This is never useful, and could result in many + spurious Write events. + +- windows: return `ErrEventOverflow` if the buffer is full ([#525]) + + Before it would merely return "short read", making it hard to detect this + error. + +- kqueue: make sure events for all files are delivered properly when removing a + watched directory ([#526]) + + Previously they would get sent with `""` (empty string) or `"."` as the path + name. + +- kqueue: don't emit spurious Create events for symbolic links ([#524]) + + The link would get resolved but kqueue would "forget" it already saw the link + itself, resulting on a Create for every Write event for the directory. + +- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) + +- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in + `backend_other.go`, making it easier to use on unsupported platforms such as + WASM, AIX, etc. ([#528]) + +- other: use the `backend_other.go` no-op if the `appengine` build tag is set; + Google AppEngine forbids usage of the unsafe package so the inotify backend + won't compile there. + +[#371]: https://github.com/fsnotify/fsnotify/pull/371 +[#516]: https://github.com/fsnotify/fsnotify/pull/516 +[#518]: https://github.com/fsnotify/fsnotify/pull/518 +[#520]: https://github.com/fsnotify/fsnotify/pull/520 +[#521]: https://github.com/fsnotify/fsnotify/pull/521 +[#524]: https://github.com/fsnotify/fsnotify/pull/524 +[#525]: https://github.com/fsnotify/fsnotify/pull/525 +[#526]: https://github.com/fsnotify/fsnotify/pull/526 +[#528]: https://github.com/fsnotify/fsnotify/pull/528 +[#537]: https://github.com/fsnotify/fsnotify/pull/537 +[#550]: https://github.com/fsnotify/fsnotify/pull/550 +[#572]: https://github.com/fsnotify/fsnotify/pull/572 + +1.6.0 - 2022-10-13 +------------------ +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 ## [1.5.4] - 2022-04-25 @@ -40,6 +228,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563..e4ac2a2f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,144 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discuss changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: -### How fsnotify is Developed + script -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork + Output: + desired output -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. +For example: -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) + # Create a new empty file with some data. + watch / + echo data >/file -Contribute upstream: + Output: + create /file + write /file -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). +script +------ +The script is a "shell-like" script: -### Testing + cmd arg arg -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. +Comments are supported with `#`: -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + # Comment + cmd arg arg # Comment -### Maintainers +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". -Help maintaining fsnotify is welcome. To be a maintainer: +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. + touch "/file with spaces" -All code changes should be internal pull requests. +End-of-line escapes with `\` are not supported. -Releases are tagged using [Semantic Versioning](http://semver.org/). +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + + +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb..fb03ade7 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef..e480733d 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,184 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, BSD, and illumos. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.17 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +--- -Cross platform: Windows, Linux, BSD and macOS. +Platform support: -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +| Backend | OS | Status | +| :-------------------- | :--------- | :------------------------------------------------------------------------ | +| inotify | Linux | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FEN | illumos | Supported | +| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | +| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | +| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | +| USN Journals | Windows | [Needs support in x/sys/windows][usn] | +| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | -\* Android and iOS are untested. +Linux and illumos should include Android and Solaris, but these are currently +untested. -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 +[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 +[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: + + % go run ./cmd/fsnotify -Please refer to [CONTRIBUTING][] before opening an issue or pull request. +Further detailed documentation can be found in godoc: +https://pkg.go.dev/github.com/fsnotify/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +Yes. You can read both channels in the same goroutine using `select` (you don't +need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +### Why do I get many Chmod events? +Some programs may generate a lot of attribute changes; for example Spotlight on +macOS, anti-virus programs, backup applications, and some others are known to do +this. As a rule, it's typically best to ignore Chmod events. They're often not +useful, and tend to cause problems. -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). +Spotlight indexing on macOS can result in multiple events (see [#15]). A +temporary workaround is to add your folder(s) to the *Spotlight Privacy +settings* until we have a native FSEvents implementation (see [#11]). -**How many files can be watched at once?** +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +### Watching a file doesn't work well +Watching individual files (rather than directories) is generally not recommended +as many programs (especially editors) update files atomically: it will write to +a temporary file which is then moved to to destination, overwriting the original +(or some variant thereof). The watcher on the original file is now lost, as that +no longer exists. -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +The upshot of this is that a power failure or crash won't leave a half-written +file. -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +Watch the parent directory and use `Event.Name` to filter out files you're not +interested in. There is an example of this in `cmd/fsnotify/file.go`. -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -## Related Projects +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000..c349c326 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,484 @@ +//go:build solaris + +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type fen struct { + Events chan Event + Errors chan error + + mu sync.Mutex + port *unix.EventPort + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), + done: make(chan struct{}), + } + + var err error + w.port, err = unix.NewEventPort() + if err != nil { + return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) + } + + go w.readEvents() + return w, nil +} + +// sendEvent attempts to send an event to the user, returning true if the event +// was put in the channel successfully and false if the watcher has been closed. +func (w *fen) sendEvent(name string, op Op) (sent bool) { + select { + case <-w.done: + return false + case w.Events <- Event{Name: name, Op: op}: + return true + } +} + +// sendError attempts to send an error to the user, returning true if the error +// was put in the channel successfully and false if the watcher has been closed. +func (w *fen) sendError(err error) (sent bool) { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *fen) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *fen) Close() error { + // Take the lock used by associateFile to prevent lingering events from + // being processed after the close + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return nil + } + close(w.done) + return w.port.Close() +} + +func (w *fen) Add(name string) error { return w.AddWith(name) } + +func (w *fen) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + // Currently we resolve symlinks that were explicitly requested to be + // watched. Otherwise we would use LStat here. + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Associate all files in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, true, w.associateFile) + if err != nil { + return err + } + + w.mu.Lock() + w.dirs[name] = with.op + w.mu.Unlock() + return nil + } + + err = w.associateFile(name, stat, true) + if err != nil { + return err + } + + w.mu.Lock() + w.watches[name] = with.op + w.mu.Unlock() + return nil +} + +func (w *fen) Remove(name string) error { + if w.isClosed() { + return nil + } + if !w.port.PathIsWatched(name) { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + // The user has expressed an intent. Immediately remove this name from + // whichever watch list it might be in. If it's not in there the delete + // doesn't cause harm. + w.mu.Lock() + delete(w.watches, name) + delete(w.dirs, name) + w.mu.Unlock() + + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Remove associations for every file in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, false, w.dissociateFile) + if err != nil { + return err + } + return nil + } + + err = w.port.DissociatePath(name) + if err != nil { + return err + } + + return nil +} + +// readEvents contains the main loop that runs in a goroutine watching for events. +func (w *fen) readEvents() { + // If this function returns, the watcher has been closed and we can close + // these channels + defer func() { + close(w.Errors) + close(w.Events) + }() + + pevents := make([]unix.PortEvent, 8) + for { + count, err := w.port.Get(pevents, 1, nil) + if err != nil && err != unix.ETIME { + // Interrupted system call (count should be 0) ignore and continue + if errors.Is(err, unix.EINTR) && count == 0 { + continue + } + // Get failed because we called w.Close() + if errors.Is(err, unix.EBADF) && w.isClosed() { + return + } + // There was an error not caused by calling w.Close() + if !w.sendError(err) { + return + } + } + + p := pevents[:count] + for _, pevent := range p { + if pevent.Source != unix.PORT_SOURCE_FILE { + // Event from unexpected source received; should never happen. + if !w.sendError(errors.New("Event from unexpected source received")) { + return + } + continue + } + + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + + err = w.handleEvent(&pevent) + if !w.sendError(err) { + return + } + } + } +} + +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + // Handle all children of the directory. + for _, entry := range files { + finfo, err := entry.Info() + if err != nil { + return err + } + err = handler(filepath.Join(path, finfo.Name()), finfo, false) + if err != nil { + return err + } + } + + // And finally handle the directory itself. + return handler(path, stat, follow) +} + +// handleEvent might need to emit more than one fsnotify event if the events +// bitmap matches more than one event type (e.g. the file was both modified and +// had the attributes changed between when the association was created and the +// when event was returned) +func (w *fen) handleEvent(event *unix.PortEvent) error { + var ( + events = event.Events + path = event.Path + fmode = event.Cookie.(os.FileMode) + reRegister = true + ) + + w.mu.Lock() + _, watchedDir := w.dirs[path] + _, watchedPath := w.watches[path] + w.mu.Unlock() + isWatched := watchedDir || watchedPath + + if events&unix.FILE_DELETE != 0 { + if !w.sendEvent(path, Remove) { + return nil + } + reRegister = false + } + if events&unix.FILE_RENAME_FROM != 0 { + if !w.sendEvent(path, Rename) { + return nil + } + // Don't keep watching the new file name + reRegister = false + } + if events&unix.FILE_RENAME_TO != 0 { + // We don't report a Rename event for this case, because Rename events + // are interpreted as referring to the _old_ name of the file, and in + // this case the event would refer to the new name of the file. This + // type of rename event is not supported by fsnotify. + + // inotify reports a Remove event in this case, so we simulate this + // here. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't keep watching the file that was removed + reRegister = false + } + + // The file is gone, nothing left to do. + if !reRegister { + if watchedDir { + w.mu.Lock() + delete(w.dirs, path) + w.mu.Unlock() + } + if watchedPath { + w.mu.Lock() + delete(w.watches, path) + w.mu.Unlock() + } + return nil + } + + // If we didn't get a deletion the file still exists and we're going to have + // to watch it again. Let's Stat it now so that we can compare permissions + // and have what we need to continue watching the file + + stat, err := os.Lstat(path) + if err != nil { + // This is unexpected, but we should still emit an event. This happens + // most often on "rm -r" of a subdirectory inside a watched directory We + // get a modify event of something happening inside, but by the time we + // get here, the sudirectory is already gone. Clearly we were watching + // this path but now it is gone. Let's tell the user that it was + // removed. + if !w.sendEvent(path, Remove) { + return nil + } + // Suppress extra write events on removed directories; they are not + // informative and can be confusing. + return nil + } + + // resolve symlinks that were explicitly watched as we would have at Add() + // time. this helps suppress spurious Chmod events on watched symlinks + if isWatched { + stat, err = os.Stat(path) + if err != nil { + // The symlink still exists, but the target is gone. Report the + // Remove similar to above. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't return the error + } + } + + if events&unix.FILE_MODIFIED != 0 { + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } + if events&unix.FILE_ATTRIB != 0 && stat != nil { + // Only send Chmod if perms changed + if stat.Mode().Perm() != fmode.Perm() { + if !w.sendEvent(path, Chmod) { + return nil + } + } + } + + if stat != nil { + // If we get here, it means we've hit an event above that requires us to + // continue watching the file or directory + return w.associateFile(path, stat, isWatched) + } + return nil +} + +func (w *fen) updateDirectory(path string) error { + // The directory was modified, so we must find unwatched entities and watch + // them. If something was removed from the directory, nothing will happen, + // as everything else should still be watched. + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, entry := range files { + path := filepath.Join(path, entry.Name()) + if w.port.PathIsWatched(path) { + continue + } + + finfo, err := entry.Info() + if err != nil { + return err + } + err = w.associateFile(path, finfo, false) + if !w.sendError(err) { + return nil + } + if !w.sendEvent(path, Create) { + return nil + } + } + return nil +} + +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { + if w.isClosed() { + return ErrClosed + } + // This is primarily protecting the call to AssociatePath but it is + // important and intentional that the call to PathIsWatched is also + // protected by this mutex. Without this mutex, AssociatePath has been seen + // to error out that the path is already associated. + w.mu.Lock() + defer w.mu.Unlock() + + if w.port.PathIsWatched(path) { + // Remove the old association in favor of this one If we get ENOENT, + // then while the x/sys/unix wrapper still thought that this path was + // associated, the underlying event port did not. This call will have + // cleared up that discrepancy. The most likely cause is that the event + // has fired but we haven't processed it yet. + err := w.port.DissociatePath(path) + if err != nil && !errors.Is(err, unix.ENOENT) { + return err + } + } + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED + } + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) +} + +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { + if !w.port.PathIsWatched(path) { + return nil + } + return w.port.DissociatePath(path) +} + +func (w *fen) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)+len(w.dirs)) + for pathname := range w.dirs { + entries = append(entries, pathname) + } + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000..36c31169 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,658 @@ +//go:build linux && !appengine + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "time" + "unsafe" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type inotify struct { + Events chan Event + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + inotifyFile *os.File + watches *watches + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneMu sync.Mutex + doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex +} + +type ( + watches struct { + mu sync.RWMutex + wd map[uint32]*watch // wd → watch + path map[string]uint32 // pathname → wd + } + watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[uint32]*watch), + path: make(map[string]uint32), + } +} + +func (w *watches) len() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.wd) +} + +func (w *watches) add(ww *watch) { + w.mu.Lock() + defer w.mu.Unlock() + w.wd[ww.wd] = ww + w.path[ww.path] = ww.wd +} + +func (w *watches) remove(wd uint32) { + w.mu.Lock() + defer w.mu.Unlock() + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) + delete(w.wd, wd) +} + +func (w *watches) removePath(path string) ([]uint32, error) { + w.mu.Lock() + defer w.mu.Unlock() + + path, recurse := recursivePath(path) + wd, ok := w.path[path] + if !ok { + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) + } + + delete(w.path, path) + delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } + + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil +} + +func (w *watches) byPath(path string) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[w.path[path]] +} + +func (w *watches) byWd(wd uint32) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[wd] +} + +func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { + w.mu.Lock() + defer w.mu.Unlock() + + var existing *watch + wd, ok := w.path[path] + if ok { + existing = w.wd[wd] + } + + upd, err := f(existing) + if err != nil { + return err + } + if upd != nil { + w.wd[upd.wd] = upd + w.path[upd.path] = upd.wd + + if upd.wd != wd { + delete(w.wd, wd) + } + } + + return nil +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + // Need to set nonblocking mode for SetDeadline to work, otherwise blocking + // I/O operations won't terminate on close. + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &inotify{ + Events: ev, + Errors: errs, + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: newWatches(), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *inotify) sendEvent(e Event) bool { + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *inotify) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *inotify) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *inotify) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() + return nil + } + close(w.done) + w.doneMu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } + + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } + + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { + if existing != nil { + flags |= existing.flags | unix.IN_MASK_ADD + } + + wd, err := unix.InotifyAddWatch(w.fd, path, flags) + if wd == -1 { + return nil, err + } + + if existing == nil { + return &watch{ + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, + }, nil + } + + existing.wd = uint32(wd) + existing.flags = flags + return existing, nil + }) +} + +func (w *inotify) Remove(name string) error { + if w.isClosed() { + return nil + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + return w.remove(filepath.Clean(name)) +} + +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } + } + return nil +} + +func (w *inotify) WatchList() []string { + if w.isClosed() { + return nil + } + + entries := make([]string, 0, w.watches.len()) + w.watches.mu.RLock() + for pathname := range w.watches.path { + entries = append(entries, pathname) + } + w.watches.mu.RUnlock() + + return entries +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *inotify) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + err = io.EOF // If EOF is received. This should really never happen. + } else if n < 0 { + err = errno // If an error occurred while reading. + } else { + err = errors.New("notify: short read in readEvents()") // Read was too short. + } + if !w.sendError(err) { + return + } + continue + } + + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + var offset uint32 + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. + watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch.wd) + } + + // We can't really update the state when a watched path is moved; + // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove + // the watch. + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return + } + } + } + + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } + } + + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } + + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } + } + } + + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() + } + } +} + +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } + return e +} + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000..d8de5ab7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,733 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "time" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type kqueue struct { + Events chan Event + Errors chan error + + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex +} + +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } +} + +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l +} + +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *kqueue) sendEvent(e Event) bool { + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *kqueue) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() + return nil + } + close(w.done) + w.doneMu.Unlock() + + pathsToRemove := w.watches.listPaths(false) + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + return nil +} + +func (w *kqueue) Add(name string) error { return w.AddWith(name) } + +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + _, err := w.addWatch(name, noteAllEvents) + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil +} + +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + return w.remove(name, true) +} + +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { + return nil + } + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(info.wd) + + isDir := w.watches.remove(info.wd, name) + + // Find all watched paths that are in this directory that are not external. + if unwatchFiles && isDir { + pathsToRemove := w.watches.watchesInDir(name) + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error to + // the user, as that will just confuse them with an error about a + // path they did not explicitly watch themselves. + w.Remove(name) + } + } + return nil +} + +func (w *kqueue) WatchList() []string { + if w.isClosed() { + return nil + } + return w.watches.listPaths(true) +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set; the flags are interpreted as +// described in kevent(2). +// +// Returns the real path to the file which was added, with symlinks resolved. +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { + return "", ErrClosed + } + + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes. + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow symlinks. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + link, err := os.Readlink(name) + if err != nil { + // Return nil because Linux can add unresolvable symlinks to the + // watch list without problems, so maintain consistency with + // that. There will be no file events for broken symlinks. + // TODO: more specific check; returns os.PathError; ENOENT? + return "", nil + } + + _, alreadyWatching = w.watches.byPath(link) + if alreadyWatching { + // Add to watches so we don't get spurious Create events later + // on when we diff the directories. + w.watches.addLink(name, 0) + return link, nil + } + + info.linkName = name + name = link + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and Go issues 11180 and 39237. + for { + info.wd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + info.isDir = fi.IsDir() + } + + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(info.wd) + return "", err + } + + if !alreadyWatching { + w.watches.add(name, info.linkName, info.wd, info.isDir) + } + + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *kqueue) readEvents() { + defer func() { + close(w.Events) + close(w.Errors) + _ = unix.Close(w.kq) + unix.Close(w.closepipe[0]) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + return + } + } + + for _, kevent := range kevents { + var ( + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if wd == w.closepipe[0] { + return + } + + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } + + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) + + if event.Has(Rename) || event.Has(Remove) { + w.remove(event.Name, false) + w.watches.markSeen(event.Name, false) + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this; for example, + // mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + _, found := w.watches.byPath(fileDir) + if found { + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return + } + } + } else { + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return + } + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { + e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + // No point sending a write and delete event at the same time: if it's gone, + // then it's gone. + if e.Op.Has(Write) && e.Op.Has(Remove) { + e.Op &^= Write + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *kqueue) watchDirectoryFiles(dirPath string) error { + files, err := os.ReadDir(dirPath) + if err != nil { + return err + } + + for _, f := range files { + path := filepath.Join(dirPath, f.Name()) + + fi, err := f.Info() + if err != nil { + return fmt.Errorf("%q: %w", path, err) + } + + cleanPath, err := w.internalWatch(path, fi) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", path, err) + } + } + + w.watches.markSeen(cleanPath, true) + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *kqueue) dirChange(dir string) error { + files, err := os.ReadDir(dir) + if err != nil { + // Directory no longer exists: we can ignore this safely. kqueue will + // still give us the correct events. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + + for _, f := range files { + fi, err := f.Info() + if err != nil { + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + // Don't need to send an error if this file isn't readable. + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + return nil + } + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + } + return nil +} + +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil + } + } + + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) + if err != nil { + return err + } + w.watches.markSeen(path, true) + return nil +} + +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + // mimic Linux providing delete events for subdirectories, but preserve + // the flags used if currently watching subdirectory + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000..5eb5dbc6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,23 @@ +//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) + +package fsnotify + +import "errors" + +type other struct { + Events chan Event + Errors chan error +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return nil, errors.New("fsnotify not supported on the current platform") +} +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000..c54a6308 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,682 @@ +//go:build windows + +// Windows backend based on ReadDirectoryChangesW() +// +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "time" + "unsafe" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/windows" +) + +type readDirChangesW struct { + Events chan Event + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, closed + watches watchMap // Map of watches (key: i-number) + closed bool // Set to true when Close() is first called +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &readDirChangesW{ + Events: ev, + Errors: errs, + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *readDirChangesW) isClosed() bool { + w.mu.Lock() + defer w.mu.Unlock() + return w.closed +} + +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } + select { + case w.Errors <- err: + return true + case <-w.quit: + return false + } +} + +func (w *readDirChangesW) Close() error { + if w.isClosed() { + return nil + } + + w.mu.Lock() + w.closed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } + +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + if with.bufsize < 4096 { + return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") + } + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + bufsize: with.bufsize, + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +func (w *readDirChangesW) Remove(name string) error { + if w.isClosed() { + return nil + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } + + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +func (w *readDirChangesW) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + bufsize int + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + recurse bool // Recursive watch? + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf []byte // buffer, allocated later +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *readDirChangesW) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + recurse: recurse, + buf: make([]byte, bufsize), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *readDirChangesW) remWatch(pathname string) error { + pathname, recurse := recursivePath(pathname) + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + if recurse && !watch.recurse { + return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) + } + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *readDirChangesW) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *readDirChangesW) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + // We need to pass the array, rather than the slice. + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, + (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), + watch.recurse, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *readDirChangesW) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + // This error is handled after the watch == nil check below. + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case nil: + // No error + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + } + + var offset uint32 + for { + if n == 0 { + w.sendError(ErrEventOverflow) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + if debug { + internal.Debug(fullname, raw.Action) + } + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, "", watch.names[name]&mask) + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + //lint:ignore ST1005 Windows should be capitalized + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e..0760efe9 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,69 +1,494 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !plan9 -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. +// +// Currently supported systems: +// +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( - "bytes" "errors" "fmt" + "os" + "path/filepath" + "strings" ) -// Event represents a single file system notification. +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( + // A new pathname was created. Create Op = 1 << iota + + // The pathname was written to; this does *not* mean the write has finished, + // and a write can be followed by more writes. Write + + // The path was removed; any watches on it will be removed. Some "remove" + // operations may trigger a Rename if the file is actually moved (for + // example "remove to trash" is often a rename). Remove + + // The path was renamed to something else; any watches on it will be + // removed. Rename + + // File attributes were changed. + // + // It's generally not recommended to take action on this event, as it may + // get triggered very frequently by some software. For example, Spotlight + // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. + ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") +) + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } - if op&Create == Create { - buffer.WriteString("|CREATE") +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + +func (o Op) String() string { + var b strings.Builder + if o.Has(Create) { + b.WriteString("|CREATE") + } + if o.Has(Remove) { + b.WriteString("|REMOVE") + } + if o.Has(Write) { + b.WriteString("|WRITE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if o.Has(xUnportableRead) { + b.WriteString("|READ") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") } - if buffer.Len() == 0 { - return "" + if o.Has(Rename) { + b.WriteString("|RENAME") } - return buffer.String()[1:] // Strip leading pipe + if o.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h != 0 } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") +type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } + addOpt func(opt *withOpts) + withOpts struct { + bufsize int + op Op + noFollow bool + sendCreate bool + } ) + +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + +var defaultOpts = withOpts{ + bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, +} + +func getOptions(opts ...addOpt) withOpts { + with := defaultOpts + for _, o := range opts { + if o != nil { + o(&with) + } + } + return with +} + +// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. +// +// This only has effect on Windows systems, and is a no-op for other backends. +// +// The default value is 64K (65536 bytes) which is the highest value that works +// on all filesystems and should be enough for most applications, but if you +// have a large burst of events it may not be enough. You can increase it if +// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). +// +// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +func WithBufferSize(bytes int) addOpt { + return func(opt *withOpts) { opt.bufsize = bytes } +} + +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + +// Check if this path is recursive (ends with "/..." or "\..."), and return the +// path with the /... stripped. +func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } + if filepath.Base(path) == "..." { + return filepath.Dir(path), true + } + return path, false +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 59688559..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000..b0eab100 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000..928319fb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000..3186b0c3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000..f69fdb93 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000..607e683b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000..35c734be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000..e5b3b6f6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000..1dd455bc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000..f1b2e73b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000..52bf4ce5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000..547df1df --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000..7daa45e1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000..30976ce9 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000..37dfeddc --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000..a72c6495 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d853..00000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 36cc3845..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 98cd8476..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go new file mode 100644 index 00000000..f65e8fe3 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -0,0 +1,7 @@ +//go:build freebsd || openbsd || netbsd || dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go new file mode 100644 index 00000000..a29fc7aa --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -0,0 +1,8 @@ +//go:build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb..00000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/modules.txt b/vendor/modules.txt index e679de9a..d2612a23 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -53,9 +53,10 @@ github.com/docker/spdystream/spdy # github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color -# github.com/fsnotify/fsnotify v1.5.4 -## explicit; go 1.16 +# github.com/fsnotify/fsnotify v1.8.0 +## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/ghodss/yaml v1.0.0 ## explicit github.com/ghodss/yaml @@ -718,7 +719,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20240929084351-30a36ccf2201 +# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20241216075633-a8665686cf63 ## explicit; go 1.21 yunion.io/x/cloudmux/pkg/apis yunion.io/x/cloudmux/pkg/apis/billing @@ -728,18 +729,18 @@ yunion.io/x/cloudmux/pkg/cloudprovider yunion.io/x/cloudmux/pkg/multicloud yunion.io/x/cloudmux/pkg/multicloud/esxi yunion.io/x/cloudmux/pkg/multicloud/esxi/vcenter -# yunion.io/x/executor v0.0.0-20230705125604-c5ac3141db32 +# yunion.io/x/executor v0.0.0-20241205080005-48f5b1212256 ## explicit; go 1.12 yunion.io/x/executor/apis yunion.io/x/executor/client -# yunion.io/x/jsonutils v1.0.1-0.20240203102553-4096f103b401 +# yunion.io/x/jsonutils v1.0.1-0.20240930100528-1671a2d0d22f ## explicit; go 1.18 yunion.io/x/jsonutils # yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91 ## explicit; go 1.12 yunion.io/x/log yunion.io/x/log/hooks -# yunion.io/x/onecloud v0.0.0-20241009134947-8e0507d0029f +# yunion.io/x/onecloud v0.0.0-20241217113931-4132f245acc4 ## explicit; go 1.21 yunion.io/x/onecloud/locales yunion.io/x/onecloud/pkg/apihelper @@ -871,7 +872,7 @@ yunion.io/x/onecloud/pkg/util/tagutils yunion.io/x/onecloud/pkg/util/yunionmeta yunion.io/x/onecloud/pkg/vpcagent/models yunion.io/x/onecloud/pkg/vpcagent/ovn/mac -# yunion.io/x/pkg v1.10.1-0.20240905110705-77c46e716318 +# yunion.io/x/pkg v1.10.2 ## explicit; go 1.18 yunion.io/x/pkg/appctx yunion.io/x/pkg/errors diff --git a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/cloudaccount_const.go b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/cloudaccount_const.go index 6ff14ebf..81b4bb3e 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/cloudaccount_const.go +++ b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/cloudaccount_const.go @@ -47,6 +47,8 @@ const ( CLOUD_PROVIDER_VOLCENGINE = "VolcEngine" CLOUD_PROVIDER_ORACLE = "OracleCloud" CLOUD_PROVIDER_SANGFOR = "SangFor" + CLOUD_PROVIDER_ZETTAKIT = "ZettaKit" + CLOUD_PROVIDER_UIS = "UIS" CLOUD_PROVIDER_GENERICS3 = "S3" CLOUD_PROVIDER_CEPH = "Ceph" @@ -59,8 +61,6 @@ const ( CLOUD_PROVIDER_HEALTH_ARREARS = "arrears" // 远端处于欠费状态 CLOUD_PROVIDER_HEALTH_UNKNOWN = "unknown" // 未知状态,查询失败 CLOUD_PROVIDER_HEALTH_NO_PERMISSION = "no permission" // 没有权限获取账单信息 - - ZSTACK_BRAND_DSTACK = "DStack" ) const ( diff --git a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/guest_const.go b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/guest_const.go index 26d36e13..92db4b3b 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/guest_const.go +++ b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/guest_const.go @@ -80,6 +80,8 @@ const ( HYPERVISOR_VOLCENGINE = "volcengine" HYPERVISOR_ORACLE = "oracle" HYPERVISOR_SANGFOR = "sangfor" + HYPERVISOR_ZETTAKIT = "zettakit" + HYPERVISOR_UIS = "uis" ) const ( diff --git a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/host_const.go b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/host_const.go index 4af5e7e3..fb320206 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/host_const.go +++ b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/host_const.go @@ -49,6 +49,8 @@ const ( HOST_TYPE_VOLCENGINE = "volcengine" HOST_TYPE_ORACLE = "oracle" HOST_TYPE_SANGFOR = "sangfor" + HOST_TYPE_ZETTAKIT = "zettakit" + HOST_TYPE_UIS = "uis" // # possible status HOST_ONLINE = "online" diff --git a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/storage_const.go b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/storage_const.go index cf032f77..3a8f1351 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/apis/compute/storage_const.go +++ b/vendor/yunion.io/x/cloudmux/pkg/apis/compute/storage_const.go @@ -115,6 +115,17 @@ const ( STORAGE_FULL = "full" STORAGE_SYSTEM_FULL = "system_full" + + // baidu storage type + STORAGE_BAIDU_SSD = "ssd" // 通用型SSD + STORAGE_BAIDU_PREMIUM_SSD = "premium_ssd" // 高性能云磁盘 + STORAGE_BAIDU_HDD = "hdd" // 通用型HDD + STORAGE_BAIDU_ENHANCED_SSD_PL1 = "enhanced_ssd_pl1" // 增强型SSD_PL1 + STORAGE_BAIDU_ENHANCED_SSD_PL2 = "enhanced_ssd_pl2" // 增强型SSD_PL2 + STORAGE_BAIDU_ENHANCED_SSD_PL3 = "enhanced_ssd_pl3" // 增强型SSD_PL2 + + // ZettaKit + STORAGE_ZETTAKIT_NORMAL = "normal" ) const ( diff --git a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/instance.go b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/instance.go index 368ece4a..5966f76b 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/instance.go +++ b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/instance.go @@ -171,6 +171,13 @@ type SManagedVMCreateConfig struct { UserDataType string WindowsUserDataType string IsWindowsUserDataTypeNeedEncode bool + + IsolateDevices []SIsolateDevice +} + +type SIsolateDevice struct { + Id string + Name string } type SManagedVMChangeConfig struct { diff --git a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/mount_target.go b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/mount_target.go index 9c6007b4..245ac80e 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/mount_target.go +++ b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/mount_target.go @@ -21,3 +21,8 @@ type SMountTargetCreateOptions struct { NetworkId string FileSystemId string } + +type SFileSystemSetQuotaInput struct { + MaxFiles int64 + MaxGb int64 +} diff --git a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go index ea47aaa7..4616516e 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go +++ b/vendor/yunion.io/x/cloudmux/pkg/cloudprovider/resources.go @@ -318,6 +318,18 @@ type ICloudHost interface { GetSchedtags() ([]string, error) GetOvnVersion() string // just for cloudpods host + + GetIsolateDevices() ([]IsolateDevice, error) +} + +type IsolateDevice interface { + GetName() string + GetGlobalId() string + GetModel() string + GetAddr() string + GetDevType() string + GetNumaNode() int8 + GetVendorDeviceId() string } type ICloudVM interface { @@ -400,6 +412,7 @@ type ICloudVM interface { AllocatePublicIpAddress() (string, error) GetPowerStates() string + GetIsolateDeviceIds() ([]string, error) } type ICloudNic interface { @@ -1402,6 +1415,8 @@ type ICloudFileSystem interface { GetMountTargets() ([]ICloudMountTarget, error) CreateMountTarget(opts *SMountTargetCreateOptions) (ICloudMountTarget, error) + SetQuota(input *SFileSystemSetQuotaInput) error + Delete() error } diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go index f39ce2dc..967eed29 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/host.go @@ -778,6 +778,7 @@ type SCreateVMParam struct { Nics []jsonutils.JSONObject ResourcePool string InstanceSnapshotInfo SEsxiInstanceSnapshotInfo + EnableEsxiSwap bool } type SEsxiInstanceSnapshotInfo struct { @@ -1080,7 +1081,17 @@ func (host *SHost) DoCreateVM(ctx context.Context, ds *SDatastore, params SCreat CpuHotAddEnabled: &True, CpuHotRemoveEnabled: &True, MemoryHotAddEnabled: &True, + + ExtraConfig: []types.BaseOptionValue{}, + } + + if !params.EnableEsxiSwap { + spec.ExtraConfig = append(spec.ExtraConfig, &types.OptionValue{ + Key: "sched.swap.vmxSwapEnabled", + Value: "FALSE", + }) } + spec.Files = &types.VirtualMachineFileInfo{ VmPathName: datastorePath, } @@ -1319,6 +1330,14 @@ func (host *SHost) CloneVM(ctx context.Context, from *SVirtualMachine, snapshot CpuHotAddEnabled: &True, CpuHotRemoveEnabled: &True, MemoryHotAddEnabled: &True, + + ExtraConfig: []types.BaseOptionValue{}, + } + if !params.EnableEsxiSwap { + spec.ExtraConfig = append(spec.ExtraConfig, &types.OptionValue{ + Key: "sched.swap.vmxSwapEnabled", + Value: "FALSE", + }) } cloneSpec.Config = &spec task, err := ovm.Clone(ctx, folders.VmFolder, name, *cloneSpec) diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/storagecache.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/storagecache.go index 86fc19a0..9e0536b8 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/storagecache.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/esxi/storagecache.go @@ -42,6 +42,7 @@ type SDatastoreImageCache struct { type EsxiOptions struct { ReasonableCIDREsxi string `help:"Reasonable CIDR in esxi, such as '10.0.0.0/8'" defautl:""` TemplateNameRegex string `help:"Regex of template name"` + EnableEsxiSwap bool `help:"Enable esxi vm swap" default:"false"` } var tempalteNameRegex *regexp.Regexp diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go index c13c6fdf..ceef8cb8 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/host_base.go @@ -16,7 +16,9 @@ package multicloud import ( "yunion.io/x/cloudmux/pkg/apis" + "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" ) type SHostBase struct { @@ -55,3 +57,7 @@ func (host *SHostBase) GetStorageDriver() string { func (host *SHostBase) GetStorageInfo() jsonutils.JSONObject { return nil } + +func (host *SHostBase) GetIsolateDevices() ([]cloudprovider.IsolateDevice, error) { + return nil, errors.Wrapf(cloudprovider.ErrNotImplemented, "GetIsolateDevices") +} diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/instance_base.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/instance_base.go index 570cedd2..412edee1 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/instance_base.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/instance_base.go @@ -94,3 +94,7 @@ func (ins *SInstanceBase) GetPowerStates() string { func (instance *SInstanceBase) GetError() error { return nil } + +func (instance *SInstanceBase) GetIsolateDeviceIds() ([]string, error) { + return nil, errors.Wrapf(cloudprovider.ErrNotImplemented, "GetIsolateDeviceIds") +} diff --git a/vendor/yunion.io/x/cloudmux/pkg/multicloud/nas_base.go b/vendor/yunion.io/x/cloudmux/pkg/multicloud/nas_base.go index 8d8b2b94..50dbb160 100644 --- a/vendor/yunion.io/x/cloudmux/pkg/multicloud/nas_base.go +++ b/vendor/yunion.io/x/cloudmux/pkg/multicloud/nas_base.go @@ -14,7 +14,16 @@ package multicloud +import ( + "yunion.io/x/cloudmux/pkg/cloudprovider" + "yunion.io/x/pkg/errors" +) + type SNasBase struct { SVirtualResourceBase SBillingBase } + +func (self *SNasBase) SetQuota(input *cloudprovider.SFileSystemSetQuotaInput) error { + return errors.Wrapf(cloudprovider.ErrNotImplemented, "SetQuota") +} diff --git a/vendor/yunion.io/x/jsonutils/jsonutils.go b/vendor/yunion.io/x/jsonutils/jsonutils.go index ae51a908..ab0d6060 100644 --- a/vendor/yunion.io/x/jsonutils/jsonutils.go +++ b/vendor/yunion.io/x/jsonutils/jsonutils.go @@ -27,6 +27,7 @@ import ( "yunion.io/x/pkg/sortedmap" ) +// swagger:type object type JSONObject interface { gotypes.ISerializable @@ -68,6 +69,7 @@ var ( JSONFalse = &JSONBool{data: false} ) +// swagger:type object type JSONDict struct { JSONValue data sortedmap.SSortedMap @@ -294,26 +296,25 @@ func (s *sJsonParseSession) parseJSONValue(str []byte, offset int) (JSONObject, // https://www.ietf.org/rfc/rfc4627.txt // -// string = quotation-mark *char quotation-mark +// string = quotation-mark *char quotation-mark // -// char = unescaped / -// escape ( -// %x22 / ; " quotation mark U+0022 -// %x5C / ; \ reverse solidus U+005C -// %x2F / ; / solidus U+002F -// %x62 / ; b backspace U+0008 -// %x66 / ; f form feed U+000C -// %x6E / ; n line feed U+000A -// %x72 / ; r carriage return U+000D -// %x74 / ; t tab U+0009 -// %x75 4HEXDIG ) ; uXXXX U+XXXX +// char = unescaped / +// escape ( +// %x22 / ; " quotation mark U+0022 +// %x5C / ; \ reverse solidus U+005C +// %x2F / ; / solidus U+002F +// %x62 / ; b backspace U+0008 +// %x66 / ; f form feed U+000C +// %x6E / ; n line feed U+000A +// %x72 / ; r carriage return U+000D +// %x74 / ; t tab U+0009 +// %x75 4HEXDIG ) ; uXXXX U+XXXX // -// escape = %x5C ; \ +// escape = %x5C ; \ // -// quotation-mark = %x22 ; " -// -// unescaped = %x20-21 / %x23-5B / %x5D-10FFFF +// quotation-mark = %x22 ; " // +// unescaped = %x20-21 / %x23-5B / %x5D-10FFFF func escapeJsonChar(sb *strings.Builder, ch byte) { switch ch { case '"': diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/api.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/api.go index dfc36200..82f35a7b 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/api.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/api.go @@ -175,6 +175,9 @@ type DiskConfig struct { // requried: false Fs string `json:"fs"` + // 文件系统特性 + FsFeatures *DiskFsFeatures `json:"fs_features"` + // 磁盘存储格式 // enum: ["qcow2", "raw", "docker", "iso", "vmdk", "vmdkflatver1", "vmdkflatver2", "vmdkflat", "vmdksparse", "vmdksparsever1", "vmdksparsever2", "vmdksepsparse", "vhd"] // requried: false @@ -471,6 +474,10 @@ type ServerCreateInput struct { // default: 1 CpuSockets int `json:"cpu_sockets"` + // 额外分配 cpu 数量 + // required: false + ExtraCpuCount int `json:"extra_cpu_count"` + // 用户自定义启动脚本 // 支持 #cloud-config yaml 格式及shell脚本 // 支持特殊user data平台: Aliyun, Qcloud, Azure, Apsara, Ucloud diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount.go index 927cf91e..b1525860 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount.go @@ -72,12 +72,6 @@ type CloudenvResourceListInput struct { // Deprecated Provider []string `json:"provider" yunion-deprecated-by:"providers"` - // 列出指定云平台品牌的资源,一般来说brand和provider相同,除了以上支持的provider之外,还支持以下band - // - // | Brand | Provider | 说明 | - // |----------|----------|------------| - // | DStack | ZStack | 滴滴云私有云 | - // Brands []string `json:"brands"` // swagger:ignore // Deprecated @@ -169,7 +163,7 @@ type CloudaccountCreateInput struct { // | Huawei | Huawei | // | OpenStack | OpenStack | // | Ucloud | Ucloud | - // | ZStack | ZStack, DStack | + // | ZStack | ZStack | // | Google | Google | // | Ctyun | Ctyun | Brand string `json:"brand"` diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount_const.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount_const.go index 17726345..78fef730 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudaccount_const.go @@ -72,6 +72,8 @@ const ( CLOUD_PROVIDER_QINGCLOUD = compute.CLOUD_PROVIDER_QINGCLOUD CLOUD_PROVIDER_ORACLE = compute.CLOUD_PROVIDER_ORACLE CLOUD_PROVIDER_SANGFOR = compute.CLOUD_PROVIDER_SANGFOR + CLOUD_PROVIDER_ZETTAKIT = compute.CLOUD_PROVIDER_ZETTAKIT + CLOUD_PROVIDER_UIS = compute.CLOUD_PROVIDER_UIS CLOUD_PROVIDER_GENERICS3 = compute.CLOUD_PROVIDER_GENERICS3 CLOUD_PROVIDER_CEPH = compute.CLOUD_PROVIDER_CEPH @@ -85,7 +87,6 @@ const ( CLOUD_PROVIDER_HEALTH_UNKNOWN = compute.CLOUD_PROVIDER_HEALTH_UNKNOWN // 未知状态,查询失败 CLOUD_PROVIDER_HEALTH_NO_PERMISSION = compute.CLOUD_PROVIDER_HEALTH_NO_PERMISSION // 没有权限获取账单信息 - ZSTACK_BRAND_DSTACK = compute.ZSTACK_BRAND_DSTACK ONECLOUD_BRAND_ONECLOUD = "OneCloud" CLOUD_ACCOUNT_WIRE_LEVEL_VCENTER = "vcenter" @@ -119,7 +120,7 @@ var ( PRIVATE_CLOUD_PROVIDERS = []string{CLOUD_PROVIDER_ZSTACK, CLOUD_PROVIDER_OPENSTACK, CLOUD_PROVIDER_APSARA, CLOUD_PROVIDER_HCSO, CLOUD_PROVIDER_HCS, CLOUD_PROVIDER_HCSOP, CLOUD_PROVIDER_INCLOUD_SPHERE, CLOUD_PROVIDER_PROXMOX, CLOUD_PROVIDER_REMOTEFILE, - CLOUD_PROVIDER_H3C, CLOUD_PROVIDER_SANGFOR, + CLOUD_PROVIDER_H3C, CLOUD_PROVIDER_SANGFOR, CLOUD_PROVIDER_ZSTACK, CLOUD_PROVIDER_UIS, } PUBLIC_CLOUD_PROVIDERS = []string{ CLOUD_PROVIDER_ALIYUN, @@ -173,6 +174,8 @@ var ( CLOUD_PROVIDER_QINGCLOUD, CLOUD_PROVIDER_ORACLE, CLOUD_PROVIDER_SANGFOR, + CLOUD_PROVIDER_ZETTAKIT, + CLOUD_PROVIDER_UIS, } CLOUD_PROVIDER_HOST_TYPE_MAP = map[string][]string{ @@ -274,6 +277,12 @@ var ( CLOUD_PROVIDER_SANGFOR: { HOST_TYPE_SANGFOR, }, + CLOUD_PROVIDER_ZETTAKIT: { + HOST_TYPE_ZETTAKIT, + }, + CLOUD_PROVIDER_UIS: { + HOST_TYPE_UIS, + }, } ) diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudprovider_quota.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudprovider_quota.go index 0441b5e0..3d92c3b1 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudprovider_quota.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/cloudprovider_quota.go @@ -39,6 +39,4 @@ type CloudproviderQuotaDetails struct { apis.StandaloneResourceDetails CloudregionResourceInfo ManagedResourceInfo - - SCloudproviderQuota } diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/container.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/container.go index 4111765c..e2aa5dd4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/container.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/container.go @@ -81,11 +81,23 @@ const ( // for health check CONTAINER_STATUS_PROBING = "probing" CONTAINER_STATUS_PROBE_FAILED = "probe_failed" + CONTAINER_STATUS_NET_FAILED = "net_failed" + // post overlay + CONTAINER_STATUS_ADD_POST_OVERLY = "adding_post_overly" + CONTAINER_STATUS_ADD_POST_OVERLY_FAILED = "add_post_overly_failed" + CONTAINER_STATUS_REMOVE_POST_OVERLY = "removing_post_overly" + CONTAINER_STATUS_REMOVE_POST_OVERLY_FAILED = "remove_post_overly_failed" ) var ( - ContainerRunningStatus = sets.NewString(CONTAINER_STATUS_RUNNING, CONTAINER_STATUS_PROBING) - ContainerExitedStatus = sets.NewString(CONTAINER_STATUS_EXITED, CONTAINER_STATUS_CRASH_LOOP_BACK_OFF) + ContainerRunningStatus = sets.NewString( + CONTAINER_STATUS_RUNNING, + CONTAINER_STATUS_PROBING, + CONTAINER_STATUS_PROBE_FAILED, + CONTAINER_STATUS_NET_FAILED, + ) + ContainerNoFailedRunningStatus = sets.NewString(CONTAINER_STATUS_RUNNING, CONTAINER_STATUS_PROBING) + ContainerExitedStatus = sets.NewString(CONTAINER_STATUS_EXITED, CONTAINER_STATUS_CRASH_LOOP_BACK_OFF) ) const ( @@ -131,7 +143,8 @@ type ContainerListInput struct { } type ContainerStopInput struct { - Timeout int `json:"timeout"` + Timeout int `json:"timeout"` + Force bool `json:"force"` } type ContainerSyncStatusResponse struct { @@ -244,3 +257,20 @@ type ContainerPerformStatusInput struct { StartedAt *time.Time `json:"started_at"` LastFinishedAt *time.Time `json:"last_finished_at"` } + +type ContainerResourcesSetInput struct { + apis.ContainerResources + DisableLimitCheck bool `json:"disable_limit_check"` +} + +type ContainerVolumeMountAddPostOverlayInput struct { + Index int `json:"index"` + PostOverlay []*apis.ContainerVolumeMountDiskPostOverlay `json:"post_overlay"` +} + +type ContainerVolumeMountRemovePostOverlayInput struct { + Index int `json:"index"` + PostOverlay []*apis.ContainerVolumeMountDiskPostOverlay `json:"post_overlay"` + UseLazy bool `json:"use_lazy"` + ClearLayers bool `json:"clear_layers"` +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk.go index 55064aef..19bad37e 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk.go @@ -15,10 +15,12 @@ package compute import ( + "reflect" "time" "yunion.io/x/cloudmux/pkg/multicloud/esxi/vcenter" "yunion.io/x/jsonutils" + "yunion.io/x/pkg/gotypes" "yunion.io/x/pkg/util/fileutils" "yunion.io/x/onecloud/pkg/apis" @@ -26,6 +28,12 @@ import ( "yunion.io/x/onecloud/pkg/httperrors" ) +func init() { + gotypes.RegisterSerializable(reflect.TypeOf(new(DiskFsFeatures)), func() gotypes.ISerializable { + return new(DiskFsFeatures) + }) +} + type DiskCreateInput struct { apis.VirtualResourceCreateInput apis.EncryptedResourceCreateInput @@ -286,6 +294,7 @@ type DiskAllocateInput struct { ImageId string ImageFormat string FsFormat string + FsFeatures *DiskFsFeatures Rebuild bool BackingDiskId string SnapshotId string @@ -340,3 +349,22 @@ type DiskRebuildInput struct { BackupId *string `json:"backup_id,allowempty"` TemplateId *string `json:"template_id,allowempty"` } + +type DiskFsExt4Features struct { + CaseInsensitive bool `json:"case_insensitive"` +} + +type DiskFsFeatures struct { + Ext4 *DiskFsExt4Features `json:"ext4"` +} + +func (d *DiskFsFeatures) String() string { + return jsonutils.Marshal(d).String() +} + +func (d *DiskFsFeatures) IsZero() bool { + if reflect.DeepEqual(*d, DiskFsFeatures{}) { + return true + } + return false +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk_const.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk_const.go index 49a14b08..684b6097 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/disk_const.go @@ -19,6 +19,7 @@ import "yunion.io/x/cloudmux/pkg/apis/compute" const ( DISK_INIT = compute.DISK_INIT DISK_REBUILD = compute.DISK_REBUILD + DISK_REBUILD_FAILED = "rebuild_failed" DISK_ALLOC_FAILED = compute.DISK_ALLOC_FAILED DISK_STARTALLOC = "start_alloc" DISK_BACKUP_STARTALLOC = compute.DISK_BACKUP_STARTALLOC diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/filesystem.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/filesystem.go index 982a0b4a..7cb29f98 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/filesystem.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/filesystem.go @@ -44,7 +44,7 @@ const ( ) type FileSystemListInput struct { - apis.VirtualResourceListInput + apis.SharableVirtualResourceListInput apis.ExternalizedResourceBaseListInput ManagedResourceListInput @@ -52,7 +52,7 @@ type FileSystemListInput struct { } type FileSystemCreateInput struct { - apis.VirtualResourceCreateInput + apis.SharableVirtualResourceCreateInput // 协议类型 // enum: ["NFS", "SMB", "CPFS"] Protocol string `json:"protocol"` @@ -101,7 +101,7 @@ type FileSystemSyncstatusInput struct { } type FileSystemDetails struct { - apis.VirtualResourceDetails + apis.SharableVirtualResourceDetails ManagedResourceInfo CloudregionResourceInfo @@ -114,3 +114,8 @@ type FileSystemRemoteUpdateInput struct { // 是否覆盖替换所有标签 ReplaceTags *bool `json:"replace_tags" help:"replace all remote tags"` } + +type FileSystemSetQuotaInput struct { + MaxGb *int64 + MaxFiles *int64 +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/guest_const.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/guest_const.go index ce373f23..5e41c9cb 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/guest_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/guest_const.go @@ -220,6 +220,8 @@ const ( HYPERVISOR_QINGCLOUD = compute.HYPERVISOR_QINGCLOUD HYPERVISOR_ORACLE = compute.HYPERVISOR_ORACLE HYPERVISOR_SANGFOR = compute.HYPERVISOR_SANGFOR + HYPERVISOR_ZETTAKIT = compute.HYPERVISOR_ZETTAKIT + HYPERVISOR_UIS = compute.HYPERVISOR_UIS // HYPERVISOR_DEFAULT = HYPERVISOR_KVM HYPERVISOR_DEFAULT = HYPERVISOR_KVM @@ -297,6 +299,8 @@ var HYPERVISORS = []string{ HYPERVISOR_QINGCLOUD, HYPERVISOR_ORACLE, HYPERVISOR_SANGFOR, + HYPERVISOR_ZETTAKIT, + HYPERVISOR_UIS, } const ( diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/guests.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/guests.go index 26e497de..094687a5 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/guests.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/guests.go @@ -818,6 +818,8 @@ type ServerChangeConfigInput struct { // cpu大小 VcpuCount *int `json:"vcpu_count"` + // 任务分配CPU大小 + ExtraCpuCount *int `json:"extra_cpu_count"` // 内存大小, 1024M, 1G VmemSize string `json:"vmem_size"` @@ -932,6 +934,7 @@ type GuestJsonDesc struct { InstanceSnapshotId string `json:"instance_snapshot_id"` InstanceId string `json:"instance_id"` } `json:"instance_snapshot_info"` + EnableEsxiSwap bool `json:"enable_esxi_swap"` EncryptKeyId string `json:"encrypt_key_id,omitempty"` @@ -952,7 +955,8 @@ type SCpuNumaPin struct { SizeMB *int `json:"size_mb"` NodeId int `json:"node_id"` - VcpuPin []SVCpuPin `json:"vcpu_pin"` + VcpuPin []SVCpuPin `json:"vcpu_pin"` + ExtraCpuCount int `json:"extra_cpu_count"` } type ServerSetBootIndexInput struct { @@ -1308,10 +1312,11 @@ type ServerChangeBandwidthInput struct { } type ServerChangeConfigSpecs struct { - CpuSockets int `json:"cpu_sockets"` - VcpuCount int `json:"vcpu_count"` - VmemSize int `json:"vmem_size"` - InstanceType string `json:"instance_type"` + CpuSockets int `json:"cpu_sockets"` + VcpuCount int `json:"vcpu_count"` + ExtraCpuCount int `json:"extra_cpu_count"` + VmemSize int `json:"vmem_size"` + InstanceType string `json:"instance_type"` } type DiskResizeSpec struct { @@ -1352,6 +1357,18 @@ func (conf ServerChangeConfigSettings) AddedCpu() int { return addCpu } +func (conf ServerChangeConfigSettings) ExtraCpuChanged() bool { + return conf.ExtraCpuCount != conf.Old.ExtraCpuCount +} + +func (conf ServerChangeConfigSettings) AddedExtraCpu() int { + addCpu := conf.ExtraCpuCount - conf.Old.ExtraCpuCount + if addCpu < 0 { + addCpu = 0 + } + return addCpu +} + func (conf ServerChangeConfigSettings) MemChanged() bool { return conf.VmemSize != conf.Old.VmemSize } diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/host.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/host.go index 01e3fe88..76bf44b6 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/host.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/host.go @@ -230,7 +230,8 @@ type HostDetails struct { // reserved resource for isolated device ReservedResourceForGpu *IsolatedDeviceReservedResourceInput `json:"reserved_resource_for_gpu"` // isolated device count - IsolatedDeviceCount int + IsolatedDeviceCount int + IsolatedDeviceTypeCount map[string]int // host init warnning SysWarn string `json:"sys_warn"` @@ -551,7 +552,8 @@ type SHostPingInput struct { type HostReserveCpusInput struct { Cpus string Mems string - DisableSchedLoadBalance *bool `json:"disable_sched_load_balance"` + DisableSchedLoadBalance *bool `json:"disable_sched_load_balance"` + ProcessesPrefix []string `json:"processes_prefix"` } type HostAutoMigrateInput struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/host_const.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/host_const.go index b6c007bf..96749632 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/host_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/host_const.go @@ -57,6 +57,8 @@ const ( HOST_TYPE_QINGCLOUD = compute.HOST_TYPE_QINGCLOUD HOST_TYPE_ORACLE = compute.HOST_TYPE_ORACLE HOST_TYPE_SANGFOR = compute.HOST_TYPE_SANGFOR + HOST_TYPE_ZETTAKIT = compute.HOST_TYPE_ZETTAKIT + HOST_TYPE_UIS = compute.HOST_TYPE_UIS HOST_TYPE_DEFAULT = HOST_TYPE_HYPERVISOR @@ -155,6 +157,8 @@ var HOST_TYPES = []string{ HOST_TYPE_QINGCLOUD, HOST_TYPE_ORACLE, HOST_TYPE_SANGFOR, + HOST_TYPE_ZETTAKIT, + HOST_TYPE_UIS, } var ALL_NIC_TYPES = []compute.TNicType{NIC_TYPE_IPMI, NIC_TYPE_ADMIN, NIC_TYPE_NORMAL} @@ -185,4 +189,5 @@ const ( const ( HOSTMETA_RESERVED_CPUS_INFO = "reserved_cpus_info" + HOSTMETA_RESERVED_CPUS_RATE = "reserved_cpus_rate" ) diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/isolated_device.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/isolated_device.go index c613764d..7bda1f90 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/isolated_device.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/isolated_device.go @@ -37,6 +37,7 @@ type IsolateDeviceDetails struct { type IsolatedDeviceListInput struct { apis.StandaloneResourceListInput + apis.ExternalizedResourceBaseListInput apis.DomainizedResourceListInput HostFilterListInput diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/loadbalancer.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/loadbalancer.go index 51346047..9e6bfb29 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/loadbalancer.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/loadbalancer.go @@ -96,6 +96,12 @@ type LoadbalancerDetails struct { // 关联安全组列表 Secgroups []SimpleSecurityGroup `json:"secgroups"` + LoadbalancerUsage +} + +type LoadbalancerUsage struct { + BackendGroupCount int `json:"backend_group_count"` + ListenerCount int `json:"listener_count"` } type SimpleSecurityGroup struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/secgroup.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/secgroup.go index 0a8ab156..91adfff4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/secgroup.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/secgroup.go @@ -312,11 +312,6 @@ type GuestsecgroupDetails struct { Secgroup string `json:"secgroup"` } -//type SElasticcachesecgroup struct { -// SElasticcacheJointsBase -// SSecurityGroupResourceBase -//} - type ElasticcachesecgroupDetails struct { ElasticcacheJointResourceDetails diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/snapshot.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/snapshot.go index ebfe4dfa..360866bf 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/snapshot.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/snapshot.go @@ -74,6 +74,9 @@ type SnapshotListInput struct { // list server snapshots ServerId string `json:"server_id"` + // 未关联任何磁盘 + Unused bool `json:"unused"` + // 按虚拟机名称排序 // pattern:asc|desc OrderByGuest string `json:"order_by_guest"` diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage.go index c41ce0b5..81d7dc55 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage.go @@ -77,6 +77,9 @@ type StorageCreateInput struct { // example: 192.168.222.3,192.168.222.4,192.168.222.99 RbdMonHost string `json:"rbd_mon_host"` + // enable ceph messenger v2 + EnableMessengerV2 *bool `json:"enable_messenger_v2"` + // swagger:ignore MonHost string @@ -117,7 +120,6 @@ type StorageCreateInput struct { CLVMVgName string // SLVM VG Name SLVMVgName string - MasterHost string Lvmlockd bool } @@ -180,6 +182,9 @@ type StorageDetails struct { // 超分比 CommitBound float32 `json:"commit_bound"` + + // master host name + MasterHostName string `json:"master_host_name"` } func (self StorageDetails) GetMetricTags() map[string]string { @@ -239,6 +244,9 @@ type StorageUpdateInput struct { // example: AQDigB9dtnDAKhAAxS6X4zi4BPR/lIle4nf4Dw== RbdKey string `json:"rbd_key"` + // enable ceph messenger v2 + EnableMessengerV2 *bool `json:"enable_messenger_v2"` + RbdTimeoutInput // swagger:ignore diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage_const.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage_const.go index 19070f6c..5b82874b 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/storage_const.go @@ -126,6 +126,16 @@ const ( STORAGE_ECLOUD_SSD = compute.STORAGE_ECLOUD_SSD // 高性能盘 STORAGE_ECLOUD_SSDEBS = compute.STORAGE_ECLOUD_SSDEBS // 性能优化盘 STORAGE_ECLOUD_SYSTEM = compute.STORAGE_ECLOUD_SYSTEM // 系统盘 + + STORAGE_BAIDU_SSD = compute.STORAGE_BAIDU_SSD + STORAGE_BAIDU_PREMIUM_SSD = compute.STORAGE_BAIDU_PREMIUM_SSD + STORAGE_BAIDU_HDD = compute.STORAGE_BAIDU_HDD + STORAGE_BAIDU_ENHANCED_SSD_PL1 = compute.STORAGE_BAIDU_ENHANCED_SSD_PL1 + STORAGE_BAIDU_ENHANCED_SSD_PL2 = compute.STORAGE_BAIDU_ENHANCED_SSD_PL2 + STORAGE_BAIDU_ENHANCED_SSD_PL3 = compute.STORAGE_BAIDU_ENHANCED_SSD_PL3 + + // zettakit + STORAGE_ZETTAKIT_NORMAL = compute.STORAGE_ZETTAKIT_NORMAL ) const ( @@ -223,6 +233,9 @@ type StorageResourceInput struct { type StorageFilterListInputBase struct { StorageResourceInput + // 以host过滤 + StorageHostId string `json:"storage_host_id"` + // 以存储名称排序 // pattern:asc|desc OrderByStorage string `json:"order_by_storage"` diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/compute/zz_generated.model.go b/vendor/yunion.io/x/onecloud/pkg/apis/compute/zz_generated.model.go index bff97c87..24595d0e 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/compute/zz_generated.model.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/compute/zz_generated.model.go @@ -273,13 +273,6 @@ type SCloudaccount struct { RegionId string `json:"region_id"` } -// SCloudimage is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SCloudimage. -type SCloudimage struct { - apis.SStandaloneResourceBase - apis.SExternalizedResourceBase - SCloudregionResourceBase -} - // SCloudprovider is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SCloudprovider. type SCloudprovider struct { apis.SEnabledStatusStandaloneResourceBase @@ -311,33 +304,6 @@ type SCloudprovider struct { SProjectMappingResourceBase } -// SCloudproviderCapability is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SCloudproviderCapability. -type SCloudproviderCapability struct { - apis.SResourceBase - CloudproviderId string `json:"cloudprovider_id"` - CloudregionId string `json:"cloudregion_id"` - Capability string `json:"capability"` -} - -// SCloudproviderQuota is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SCloudproviderQuota. -type SCloudproviderQuota struct { - apis.SStandaloneResourceBase - apis.SExternalizedResourceBase - SManagedResourceBase - SCloudregionResourceBase - // 配额范围 - // cloudregion: 区域级别 - // cloudprovider: 云订阅级别 - QuotaRange string `json:"quota_range"` - // 已使用的配额 - // -1代表未从云平台拿到已使用配额信息 - UsedCount int64 `json:"used_count"` - // 最大配额限制 - MaxCount int64 `json:"max_count"` - // 配额类型 - QuotaType string `json:"quota_type"` -} - // SCloudproviderResourceBase is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SCloudproviderResourceBase. type SCloudproviderResourceBase struct { CloudproviderId string `json:"cloudprovider_id"` @@ -384,6 +350,12 @@ type SContainer struct { GuestId string `json:"guest_id"` // Spec stores all container running options Spec *ContainerSpec `json:"spec"` + // 启动时间 + StartedAt time.Time `json:"started_at"` + // 上次退出时间 + LastFinishedAt time.Time `json:"last_finished_at"` + // 重启次数 + RestartCount int `json:"restart_count"` } // SDBInstance is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SDBInstance. @@ -580,6 +552,8 @@ type SDisk struct { BackupId string `json:"backup_id"` // 文件系统 FsFormat string `json:"fs_format"` + // 文件系统特性 + FsFeatures *DiskFsFeatures `json:"fs_features"` // 磁盘类型 // sys: 系统盘 // data: 数据盘 @@ -658,13 +632,6 @@ type SDnsZoneResourceBase struct { DnsZoneId string `json:"dns_zone_id"` } -// SDnsZoneVpc is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SDnsZoneVpc. -type SDnsZoneVpc struct { - apis.SJointResourceBase - SDnsZoneResourceBase - VpcId string `json:"vpc_id"` -} - // SDynamicschedtag is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SDynamicschedtag. type SDynamicschedtag struct { apis.SStandaloneResourceBase @@ -945,7 +912,7 @@ type SExternalProject struct { // SFileSystem is an autogenerated struct via yunion.io/x/onecloud/pkg/compute/models.SFileSystem. type SFileSystem struct { - apis.SVirtualResourceBase + apis.SSharableVirtualResourceBase apis.SExternalizedResourceBase SManagedResourceBase SBillingResourceBase diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/const.go b/vendor/yunion.io/x/onecloud/pkg/apis/const.go index 46d58367..0015ed2f 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/const.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/const.go @@ -142,3 +142,11 @@ var ( func IsARM(osArch string) bool { return utils.IsInStringArray(osArch, ARCH_ARM) } + +func IsIllegalSearchDomain(domain string) bool { + switch domain { + case "cloud.onecloud.io": + return true + } + return false +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/container.go b/vendor/yunion.io/x/onecloud/pkg/apis/container.go index 1dd636ef..1eb93ca6 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/container.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/container.go @@ -43,9 +43,26 @@ type ContainerLifecyle struct { PostStart *ContainerLifecyleHandler `json:"post_start"` } +type ContainerProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + ContainerDefaultProcMount ContainerProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays in tact with + // no modifications. + ContainerUnmaskedProcMount ContainerProcMountType = "Unmasked" +) + type ContainerSecurityContext struct { RunAsUser *int64 `json:"run_as_user,omitempty"` RunAsGroup *int64 `json:"run_as_group,omitempty"` + // procMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + ProcMount ContainerProcMountType `json:"proc_mount"` } type ContainerResources struct { @@ -57,6 +74,10 @@ type ContainerResources struct { PidsMax *int `json:"pids_max"` // DevicesAllow will be set to devices.allow DevicesAllow []string `json:"devices_allow"` + // This flag only affects the cpuset controller. If the clone_children + // flag is enabled in a cgroup, a new cpuset cgroup will copy its + // configuration fromthe parent during initialization. + CpusetCloneChildren bool `json:"cpuset_clone_children"` } type ContainerSpec struct { @@ -64,6 +85,8 @@ type ContainerSpec struct { Image string `json:"image"` // Image pull policy ImagePullPolicy ImagePullPolicy `json:"image_pull_policy"` + // Image credential id + ImageCredentialId string `json:"image_credential_id"` // Command to execute (i.e., entrypoint for docker) Command []string `json:"command"` // Args for the Command (i.e. command for docker) @@ -149,11 +172,13 @@ var ( ) type ContainerVolumeMount struct { - Type ContainerVolumeMountType `json:"type"` - Disk *ContainerVolumeMountDisk `json:"disk"` - HostPath *ContainerVolumeMountHostPath `json:"host_path"` - Text *ContainerVolumeMountText `json:"text"` - CephFS *ContainerVolumeMountCephFS `json:"ceph_fs"` + // 用于标识当前 pod volume mount 的唯一性 + UniqueName string `json:"unique_name"` + Type ContainerVolumeMountType `json:"type"` + Disk *ContainerVolumeMountDisk `json:"disk"` + HostPath *ContainerVolumeMountHostPath `json:"host_path"` + Text *ContainerVolumeMountText `json:"text"` + CephFS *ContainerVolumeMountCephFS `json:"ceph_fs"` // Mounted read-only if true, read-write otherwise (false or unspecified). ReadOnly bool `json:"read_only"` // Path within the container at which the volume should be mounted. Must @@ -203,12 +228,24 @@ func (o ContainerVolumeMountDiskOverlay) IsValid() error { return nil } +type ContainerVolumeMountDiskPostOverlay struct { + // 宿主机底层目录 + HostLowerDir []string `json:"host_lower_dir"` + // 合并后要挂载到容器的目录 + ContainerTargetDir string `json:"container_target_dir"` +} + type ContainerVolumeMountDisk struct { - Index *int `json:"index,omitempty"` - Id string `json:"id"` - SubDirectory string `json:"sub_directory"` - StorageSizeFile string `json:"storage_size_file"` - Overlay *ContainerVolumeMountDiskOverlay `json:"overlay"` + Index *int `json:"index,omitempty"` + Id string `json:"id"` + SubDirectory string `json:"sub_directory"` + StorageSizeFile string `json:"storage_size_file"` + // lower overlay 设置,disk 的 volume 会作为 upper,最终 merged 的目录会传给容器 + Overlay *ContainerVolumeMountDiskOverlay `json:"overlay"` + // case insensitive feature is incompatible with overlayfs + CaseInsensitivePaths []string `json:"case_insensitive_paths"` + // 当 disk volume 挂载完后,需要 overlay 的目录设置 + PostOverlay []*ContainerVolumeMountDiskPostOverlay `json:"post_overlay"` } type ContainerVolumeMountHostPathType string diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/host/container.go b/vendor/yunion.io/x/onecloud/pkg/apis/host/container.go index ecec721b..6c530336 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/host/container.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/host/container.go @@ -21,12 +21,14 @@ import ( ) type ContainerVolumeMountDisk struct { - Index *int `json:"index,omitempty"` - Id string `json:"id"` - TemplateId string `json:"template_id"` - SubDirectory string `json:"sub_directory"` - StorageSizeFile string `json:"storage_size_file"` - Overlay *apis.ContainerVolumeMountDiskOverlay `json:"overlay"` + Index *int `json:"index,omitempty"` + Id string `json:"id"` + TemplateId string `json:"template_id"` + SubDirectory string `json:"sub_directory"` + StorageSizeFile string `json:"storage_size_file"` + Overlay *apis.ContainerVolumeMountDiskOverlay `json:"overlay"` + CaseInsensitivePaths []string `json:"case_insensitive_paths"` + PostOverlay []*apis.ContainerVolumeMountDiskPostOverlay `json:"post_overlay"` } type ContainerVolumeMountCephFS struct { @@ -38,11 +40,13 @@ type ContainerVolumeMountCephFS struct { } type ContainerVolumeMount struct { - Type apis.ContainerVolumeMountType `json:"type"` - Disk *ContainerVolumeMountDisk `json:"disk"` - HostPath *apis.ContainerVolumeMountHostPath `json:"host_path"` - Text *apis.ContainerVolumeMountText `json:"text"` - CephFS *ContainerVolumeMountCephFS `json:"ceph_fs"` + // 用于标识当前 pod volume mount 的唯一性 + UniqueName string `json:"unique_name"` + Type apis.ContainerVolumeMountType `json:"type"` + Disk *ContainerVolumeMountDisk `json:"disk"` + HostPath *apis.ContainerVolumeMountHostPath `json:"host_path"` + Text *apis.ContainerVolumeMountText `json:"text"` + CephFS *ContainerVolumeMountCephFS `json:"ceph_fs"` // Mounted read-only if true, read-write otherwise (false or unspecified). ReadOnly bool `json:"read_only"` // Path within the container at which the volume should be mounted. Must @@ -58,8 +62,9 @@ type ContainerVolumeMount struct { type ContainerSpec struct { apis.ContainerSpec - VolumeMounts []*ContainerVolumeMount `json:"volume_mounts"` - Devices []*ContainerDevice `json:"devices"` + ImageCredentialToken string `json:"image_credential_token"` + VolumeMounts []*ContainerVolumeMount `json:"volume_mounts"` + Devices []*ContainerDevice `json:"devices"` } type ContainerDevice struct { @@ -130,4 +135,5 @@ type ContainerStopInput struct { Timeout int64 `json:"timeout"` ShmSizeMB int `json:"shm_size_mb"` ContainerName string `json:"container_name"` + Force bool `json:"force"` } diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/identity/aksk.go b/vendor/yunion.io/x/onecloud/pkg/apis/identity/aksk.go index a41b7b51..715d95ce 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/identity/aksk.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/identity/aksk.go @@ -26,6 +26,7 @@ const ( RECOVERY_SECRETS_TYPE = "recovery_secret" OIDC_CREDENTIAL_TYPE = "oidc" ENCRYPT_KEY_TYPE = "enc_key" + CONTAINER_IMAGE_TYPE = "container_image" ) type SAccessKeySecretBlob struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/identity/consts.go b/vendor/yunion.io/x/onecloud/pkg/apis/identity/consts.go index 0c67ccda..a302c673 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/identity/consts.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/identity/consts.go @@ -236,6 +236,7 @@ var ( // kubeserver blacklist options // ############################ "running_mode", + "enable_default_policy", }, } ) diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/identity/credential.go b/vendor/yunion.io/x/onecloud/pkg/apis/identity/credential.go index 94ec45d0..668f3e8d 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/identity/credential.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/identity/credential.go @@ -50,3 +50,15 @@ type CredentialCreateInput struct { // Ignore KeyHash string `json:"key_hash"` } + +type CredentialContainerImageBlob struct { + Username string `json:"username"` + Password string `json:"password"` + Auth string `json:"auth"` + ServerAddress string `json:"server_address,omitempty"` + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identity_token,omitempty"` + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registry_token,omitempty"` +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alert.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alert.go index 612762ce..f1364168 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alert.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alert.go @@ -15,10 +15,12 @@ package monitor import ( + "reflect" "time" "yunion.io/x/jsonutils" "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/gotypes" "yunion.io/x/onecloud/pkg/apis" ) @@ -84,20 +86,37 @@ type AlertSetting struct { Conditions []AlertCondition `json:"conditions"` } +func (s AlertSetting) String() string { + return jsonutils.Marshal(s).String() +} + +func (s AlertSetting) IsZero() bool { + return len(s.Conditions) == 0 +} + type AlertCondition struct { - Type string `json:"type"` - Query AlertQuery `json:"query"` - Reducer Condition `json:"reducer"` - Evaluator Condition `json:"evaluator"` - Operator string `json:"operator"` + Type string `json:"type"` + Query AlertQuery `json:"query"` + Reducer Condition `json:"reducer"` + ReducerOrder ResultReducerOrder `json:"reducer_order"` + Evaluator Condition `json:"evaluator"` + Operator string `json:"operator"` } +type ResultReducerOrder string + +const ( + RESULT_REDUCER_ORDER_ASC ResultReducerOrder = "asc" + RESULT_REDUCER_ORDER_DESC ResultReducerOrder = "desc" +) + type AlertQuery struct { Model MetricQuery `json:"model"` From string `json:"from"` To string `json:"to"` // 查询结果 reducer,执行 p95 这些操作 - ResultReducer *Condition `json:"result_reducer"` + ResultReducer *Condition `json:"result_reducer"` + ResultReducerOrder ResultReducerOrder `json:"result_reducer_order"` } type AlertCreateInput struct { @@ -188,6 +207,7 @@ type EvalMatch struct { Tags map[string]string `json:"tags"` Unit string `json:"unit"` AlertDetails jsonutils.JSONObject `json:"alert_details"` + IsRecovery bool `json:"is_recovery"` } type AlertTestRunOutput struct { @@ -212,3 +232,9 @@ type AlertPauseInput struct { Paused bool `json:"paused"` } + +func init() { + gotypes.RegisterSerializable(reflect.TypeOf(&AlertSetting{}), func() gotypes.ISerializable { + return &AlertSetting{} + }) +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertdashboard.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertdashboard.go index 0c2622c3..4df79b05 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertdashboard.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertdashboard.go @@ -15,8 +15,6 @@ package monitor import ( - "yunion.io/x/jsonutils" - "yunion.io/x/onecloud/pkg/apis" ) @@ -41,7 +39,7 @@ type AlertPanelDetail struct { PanelName string `json:"panel_name"` PanelId string `json:"panel_id"` Refresh string `json:"refresh"` - Setting jsonutils.JSONObject + Setting *AlertSetting PanelDetails } @@ -53,3 +51,10 @@ type AlertClonePanelInput struct { type AlertCloneDashboardInput struct { CloneName string `json:"clone_name"` } + +type AlertPanelSetOrderInput struct { + Order []struct { + PanelId string `json:"panel_id"` + Index int `json:"index"` + } `json:"order"` +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertjoint.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertjoint.go new file mode 100644 index 00000000..2d2e5aad --- /dev/null +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertjoint.go @@ -0,0 +1,28 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitor + +import "yunion.io/x/onecloud/pkg/apis" + +type AlertJointCreateInput struct { + apis.Meta + + AlertId string `json:"alert_id"` +} + +type AlertJointListInput struct { + apis.JointResourceBaseListInput + AlertIds []string `json:"alert_ids"` +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertnotification.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertnotification.go index 77f55ac0..14322b72 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertnotification.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertnotification.go @@ -35,12 +35,7 @@ type AlertJointResourceBaseDetails struct { type AlertnotificationDetails struct { AlertJointResourceBaseDetails Notification string `json:"notification"` -} - -type AlertJointCreateInput struct { - apis.Meta - - AlertId string `json:"alert_id"` + Frequency int64 `json:"frequency"` } type AlertnotificationCreateInput struct { @@ -50,3 +45,7 @@ type AlertnotificationCreateInput struct { UsedBy string `json:"used_by"` Params jsonutils.JSONObject `json:"params"` } + +type AlertNotificationListInput struct { + AlertJointListInput +} diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertrecord.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertrecord.go index 1dc9e950..3dbaaacb 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertrecord.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/alertrecord.go @@ -39,6 +39,7 @@ type AlertRecordListInput struct { ResType string `json:"res_type"` Alerting bool `json:"alerting"` ResName string `json:"res_name"` + ResId string `json:"res_id"` } type AlertRecordDetails struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/template.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/template.go index 5900f33a..8f35f4cf 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/template.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/template.go @@ -19,10 +19,10 @@ type NotificationTemplateCreateInput struct { } type NotificationTemplateConfig struct { - Title string `json:"title"` - Name string `json:"name"` - ResourceName string `json:"resource_name"` - Matches []EvalMatch `json:"matches"` + Title string `json:"title"` + Name string `json:"name"` + ResourceName string `json:"resource_name"` + Matches []*EvalMatch `json:"matches"` // PrevAlertState AlertStateType `json:"prev_alert_state"` // State AlertStateType `json:"state"` NoDataFound bool `json:"no_data"` diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/unifiedmonitor_query.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/unifiedmonitor_query.go index 64fd91d9..9bc30638 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/unifiedmonitor_query.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/unifiedmonitor_query.go @@ -23,7 +23,7 @@ var ( UNIFIED_MONITOR_FIELD_OPT_TYPE = []string{"Aggregations", "Selectors"} UNIFIED_MONITOR_GROUPBY_OPT_TYPE = []string{"time", "tag", "fill"} UNIFIED_MONITOR_FIELD_OPT_VALUE = map[string][]string{ - "Aggregations": {"MEAN", "SUM"}, // {"COUNT", "DISTINCT", "INTEGRAL", "MEAN", "MEDIAN", "MODE", "STDDEV", "SUM"}, + "Aggregations": {"MEAN", "SUM", "MAX", "MIN"}, // {"COUNT", "DISTINCT", "INTEGRAL", "MEAN", "MEDIAN", "MODE", "STDDEV", "SUM"}, "Selectors": {"BOTTOM", "FIRST", "LAST", "MAX", "MIN", "TOP"}, } UNIFIED_MONITOR_GROUPBY_OPT_VALUE = map[string][]string{ @@ -65,6 +65,27 @@ var ( } ) +func GetMeasurementTagIdKeyByResType(resType string) string { + return MEASUREMENT_TAG_ID[resType] +} + +func GetMeasurementTagIdKeyByResTypeWithDefault(resType string) string { + tagId := GetMeasurementTagIdKeyByResType(resType) + if len(tagId) == 0 { + tagId = "host_id" + } + return tagId +} + +func GetMeasurementResourceId(tags map[string]string, resType string) string { + return tags[GetMeasurementTagIdKeyByResType(resType)] +} + +func GetResourceIdFromTagWithDefault(tags map[string]string, resType string) string { + tagId := GetMeasurementTagIdKeyByResTypeWithDefault(resType) + return tags[tagId] +} + type MetricFunc struct { FieldOptType []string `json:"field_opt_type"` FieldOptValue map[string][]string `json:"field_opt_value"` diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/zz_generated.model.go b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/zz_generated.model.go index 55178c15..0fd70a45 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/monitor/zz_generated.model.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/monitor/zz_generated.model.go @@ -29,11 +29,11 @@ type SAlert struct { apis.SStatusStandaloneResourceBase SMonitorScopedResource // Frequency is evaluate period - Frequency int64 `json:"frequency"` - Settings jsonutils.JSONObject `json:"settings"` - Level string `json:"level"` - Message string `json:"message"` - UsedBy string `json:"used_by"` + Frequency int64 `json:"frequency"` + Settings *AlertSetting `json:"settings"` + Level string `json:"level"` + Message string `json:"message"` + UsedBy string `json:"used_by"` // Silenced bool ExecutionError string `json:"execution_error"` // If an alert rule has a configured `For` and the query violates the configured threshold @@ -65,14 +65,15 @@ type SAlertDashboardPanel struct { apis.SVirtualJointResourceBase DashboardId string `json:"dashboard_id"` PanelId string `json:"panel_id"` + Index int `json:"index"` } // SAlertPanel is an autogenerated struct via yunion.io/x/onecloud/pkg/monitor/models.SAlertPanel. type SAlertPanel struct { apis.SStatusStandaloneResourceBase apis.SScopedResourceBase - Settings jsonutils.JSONObject `json:"settings"` - Message string `json:"message"` + Settings *AlertSetting `json:"settings"` + Message string `json:"message"` } // SAlertRecord is an autogenerated struct via yunion.io/x/onecloud/pkg/monitor/models.SAlertRecord. diff --git a/vendor/yunion.io/x/onecloud/pkg/apis/scheduler/api.go b/vendor/yunion.io/x/onecloud/pkg/apis/scheduler/api.go index 5baf8900..dd2e2aa5 100644 --- a/vendor/yunion.io/x/onecloud/pkg/apis/scheduler/api.go +++ b/vendor/yunion.io/x/onecloud/pkg/apis/scheduler/api.go @@ -87,8 +87,12 @@ type ScheduleInput struct { OsArch string `json:"os_arch"` ResetCpuNumaPin bool `json:"reset_cpu_numa_pin"` - // For Migrate - CpuNumaPin []SCpuNumaPin `json:"cpu_numa_pin"` + ExtraCpuCount int `json:"extra_cpu_count"` + CpuNumaPin []SCpuNumaPin `json:"cpu_numa_pin"` + PreferNumaNodes []int `json:"prefer_numa_nodes"` + + // GuestIds + GuestIds []string `json:"guest_ids"` HostMemPageSizeKB int `json:"host_mem_page_size"` SkipKernelCheck *bool `json:"skip_kernel_check"` @@ -198,9 +202,10 @@ type SCpuPin struct { } type SCpuNumaPin struct { - CpuPin []int - NodeId int - MemSizeMB *int + CpuPin []int + NodeId int + MemSizeMB *int + ExtraCpuCount int } type CandidateResource struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/cmdline/parser.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/cmdline/parser.go index 17e884bc..594ca6b9 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/cmdline/parser.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/cmdline/parser.go @@ -148,6 +148,23 @@ func ParseDiskConfig(diskStr string, idx int) (*compute.DiskConfig, error) { return nil, errors.Errorf("invalid disk fs %s, allow choices: %s", str, osprofile.FS_TYPES) } diskConfig.Fs = str + case "fs_features": + if diskConfig.Fs == "" { + return nil, errors.Errorf("disk fs is required") + } + diskConfig.FsFeatures = &compute.DiskFsFeatures{} + for _, feature := range strings.Split(str, ",") { + if diskConfig.Fs == "ext4" { + if diskConfig.FsFeatures.Ext4 == nil { + diskConfig.FsFeatures.Ext4 = &compute.DiskFsExt4Features{} + } + if feature == "casefold" { + diskConfig.FsFeatures.Ext4.CaseInsensitive = true + } else { + return nil, errors.Errorf("invalid feature %s of %s", feature, diskConfig.Fs) + } + } + } case "format": if !utils.IsInStringArray(str, osprofile.IMAGE_FORMAT_TYPES) { return nil, errors.Errorf("invalid disk format %s, allow choices: %s", str, osprofile.IMAGE_FORMAT_TYPES) diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/consts/db.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/consts/db.go index f23999a3..172bc44c 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/consts/db.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/consts/db.go @@ -31,8 +31,18 @@ var ( localTaskWorkerCount int enableChangeOwnerAutoRename = false + + enableDefaultPolicy = true ) +func SetDefaultPolicy(enable bool) { + enableDefaultPolicy = enable +} + +func IsEnableDefaultPolicy() bool { + return enableDefaultPolicy == true +} + func SetDefaultDB(dialect, connStr string) { defaultDBDialect = dialect defaultDBConnectionString = connStr diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/opslog_const.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/opslog_const.go index 940386ad..fb7ec286 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/opslog_const.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/opslog_const.go @@ -333,4 +333,10 @@ const ( ACT_STOP_RESCUE_FAILED = "stop_rescue_failed" ACT_RE_BILLING = "re_billing" + + ACT_CLONE = "clone" + ACT_CLONE_FAILED = "clone_failed" + + ACT_REBUILD = "rebuild" + ACT_REBUILD_FAILED = "rebuild_failed" ) diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/sharablebase.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/sharablebase.go index e4f66a43..1b0d5233 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/sharablebase.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/sharablebase.go @@ -346,9 +346,17 @@ func SharableManagerFilterByOwner(ctx context.Context, manager IStandaloneModelM )) } if !result.ProjectTags.IsEmpty() && resScope == rbacscope.ScopeProject { + subq := manager.Query("id") policyTagFilters := tagutils.STagFilters{} policyTagFilters.AddFilters(result.ProjectTags) - q = ObjectIdQueryWithTagFilters(ctx, q, "tenant_id", "project", policyTagFilters) + subq = ObjectIdQueryWithTagFilters(ctx, subq, "tenant_id", "project", policyTagFilters) + q = q.Filter(sqlchemy.OR( + sqlchemy.In(q.Field("id"), subq.SubQuery()), + sqlchemy.AND( + sqlchemy.IsTrue(q.Field("is_public")), + sqlchemy.Equals(q.Field("public_scope"), rbacscope.ScopeSystem), + ), + )) } if !result.ObjectTags.IsEmpty() { policyTagFilters := tagutils.STagFilters{} diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/standalone_anon.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/standalone_anon.go index 3236ad14..2d4c9d45 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/standalone_anon.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/standalone_anon.go @@ -369,20 +369,21 @@ func (model *SStandaloneAnonResourceBase) SetOrganizationMetadataAll(ctx context return errors.Wrap(err, "SetAllOrganization") } } - { - userTags := make(map[string]interface{}) - for k, _ := range meta { - if strings.HasPrefix(k, ORGANIZATION_TAG_PREFIX) { - k = k[len(ORGANIZATION_TAG_PREFIX):] - } - k = USER_TAG_PREFIX + k - userTags[k] = "none" - } - err := Metadata.SetValuesWithLog(ctx, model, userTags, userCred) - if err != nil { - return errors.Wrap(err, "SetValuesWithLog userTags") - } - } + // 避免加入组织架构后,项目所在的层级会移除此项目 + //{ + // userTags := make(map[string]interface{}) + // for k, _ := range meta { + // if strings.HasPrefix(k, ORGANIZATION_TAG_PREFIX) { + // k = k[len(ORGANIZATION_TAG_PREFIX):] + // } + // k = USER_TAG_PREFIX + k + // userTags[k] = "none" + // } + // err := Metadata.SetValuesWithLog(ctx, model, userTags, userCred) + // if err != nil { + // return errors.Wrap(err, "SetValuesWithLog userTags") + // } + //} return nil } diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/statusbase.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/statusbase.go index fde4c803..38d894fd 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/statusbase.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/statusbase.go @@ -21,6 +21,7 @@ import ( "yunion.io/x/jsonutils" "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/util/sets" "yunion.io/x/pkg/utils" "yunion.io/x/sqlchemy" @@ -105,7 +106,13 @@ func statusBaseSetStatus(ctx context.Context, model IStatusBaseModel, userCred m } OpsLog.LogEvent(model, ACT_UPDATE_STATUS, notes, userCred) success := true - if strings.Contains(status, "fail") || status == apis.STATUS_UNKNOWN || status == api.CLOUD_PROVIDER_DISCONNECTED { + isFail := false + for _, sub := range []string{"fail", "crash"} { + if strings.Contains(status, sub) { + isFail = true + } + } + if isFail || sets.NewString(apis.STATUS_UNKNOWN, api.CLOUD_PROVIDER_DISCONNECTED, api.CONTAINER_STATUS_CRASH_LOOP_BACK_OFF).Has(status) { success = false } logclient.AddSimpleActionLog(model, logclient.ACT_UPDATE_STATUS, notes, userCred, success) diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/noop_task.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/noop_task.go new file mode 100644 index 00000000..824952ad --- /dev/null +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/noop_task.go @@ -0,0 +1,50 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package taskman + +import ( + "context" + + "yunion.io/x/jsonutils" + "yunion.io/x/pkg/errors" + + "yunion.io/x/onecloud/pkg/cloudcommon/db" + "yunion.io/x/onecloud/pkg/mcclient" +) + +type NoopTask struct { + STask +} + +func init() { + RegisterTask(NoopTask{}) +} + +func (task *NoopTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) { + task.SetStageComplete(ctx, nil) +} + +func StartNoopTask(ctx context.Context, userCred mcclient.TokenCredential, obj db.IStandaloneModel, parentTaskId string) error { + params := jsonutils.NewDict() + task, err := TaskManager.NewTask(ctx, "NoopTask", obj, userCred, params, parentTaskId, "") + if err != nil { + return errors.Wrap(err, "NewTask") + } + err = task.ScheduleRun(nil) + if err != nil { + return errors.Wrap(err, "ScheduleRun") + } + return nil +} diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/subtasks.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/subtasks.go index 586e7078..f3e2eecc 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/subtasks.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/subtasks.go @@ -43,6 +43,7 @@ func init() { "subtask", "subtasks", )} + SubTaskManager.SetVirtualObject(SubTaskManager) } type SSubTask struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/tasks.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/tasks.go index b35ffb4d..dbe1e396 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/tasks.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/db/taskman/tasks.go @@ -210,26 +210,26 @@ func (manager *STaskManager) FetchTaskById(taskId string) *STask { return manager.fetchTask(taskId) } -func (self *STask) ValidateDeleteCondition(ctx context.Context, info jsonutils.JSONObject) error { +func (task *STask) ValidateDeleteCondition(ctx context.Context, info jsonutils.JSONObject) error { return httperrors.NewForbiddenError("forbidden") } -func (self *STask) ValidateUpdateCondition(ctx context.Context) error { +func (task *STask) ValidateUpdateCondition(ctx context.Context) error { return httperrors.NewForbiddenError("forbidden") } -func (self *STask) BeforeInsert() { - if len(self.Id) == 0 { - self.Id = stringutils.UUID4() +func (task *STask) BeforeInsert() { + if len(task.Id) == 0 { + task.Id = stringutils.UUID4() } } -func (self *STask) GetId() string { - return self.Id +func (task *STask) GetId() string { + return task.Id } -func (self *STask) GetName() string { - return self.TaskName +func (task *STask) GetName() string { + return task.TaskName } func (task *STask) saveStartAt() { @@ -431,8 +431,9 @@ func (manager *STaskManager) NewParallelTask( parentTask := task.GetParentTask() if parentTask != nil { - st := SSubTask{TaskId: parentTask.Id, Stage: parentTask.Stage, SubtaskId: task.Id} - err := SubTaskManager.TableSpec().Insert(ctx, &st) + st := &SSubTask{TaskId: parentTask.Id, Stage: parentTask.Stage, SubtaskId: task.Id} + st.SetModelManager(SubTaskManager, st) + err := SubTaskManager.TableSpec().Insert(ctx, st) if err != nil { log.Errorf("Subtask insert error %s", err) return nil, err @@ -661,7 +662,7 @@ func execITask(taskValue reflect.Value, task *STask, odata jsonutils.JSONObject, } }() - log.Debugf("Call %s %s %#v", task.TaskName, stageName, params) + log.Debugf("Call %s(%s) %s %#v", task.TaskName, task.Id, stageName, params) funcValue.Call(params) // call save request context @@ -673,41 +674,41 @@ func (task *STask) ScheduleRun(data jsonutils.JSONObject) error { return runTask(task.Id, data) } -func (self *STask) IsSubtask() bool { - return self.HasParentTask() +func (task *STask) IsSubtask() bool { + return task.HasParentTask() } -func (self *STask) GetParentTaskId() string { - if len(self.ParentTaskId) > 0 { - return self.ParentTaskId +func (task *STask) GetParentTaskId() string { + if len(task.ParentTaskId) > 0 { + return task.ParentTaskId } - parentTaskId, _ := self.Params.GetString(PARENT_TASK_ID_KEY) + parentTaskId, _ := task.Params.GetString(PARENT_TASK_ID_KEY) if len(parentTaskId) > 0 { return parentTaskId } return "" } -func (self *STask) HasParentTask() bool { - parentTaskId := self.GetParentTaskId() +func (task *STask) HasParentTask() bool { + parentTaskId := task.GetParentTaskId() if len(parentTaskId) > 0 { return true } return false } -func (self *STask) GetParentTask() *STask { - parentTaskId := self.GetParentTaskId() +func (task *STask) GetParentTask() *STask { + parentTaskId := task.GetParentTaskId() if len(parentTaskId) > 0 { return TaskManager.fetchTask(parentTaskId) } return nil } -func (self *STask) GetRequestContext() appctx.AppContextData { +func (task *STask) GetRequestContext() appctx.AppContextData { ctxData := appctx.AppContextData{} - if self.Params != nil { - ctxJson, _ := self.Params.Get(REQUEST_CONTEXT_KEY) + if task.Params != nil { + ctxJson, _ := task.Params.Get(REQUEST_CONTEXT_KEY) if ctxJson != nil { ctxJson.Unmarshal(&ctxData) } @@ -715,30 +716,30 @@ func (self *STask) GetRequestContext() appctx.AppContextData { return ctxData } -func (self *STask) SaveRequestContext(data *appctx.AppContextData) { +func (task *STask) SaveRequestContext(data *appctx.AppContextData) { jsonData := jsonutils.Marshal(data) - log.Debugf("SaveRequestContext %s param %s", jsonData, self.Params) - _, err := db.Update(self, func() error { - params := self.Params.CopyExcludes(REQUEST_CONTEXT_KEY) + log.Debugf("SaveRequestContext %s(%s) %s param %s", task.TaskName, task.Id, jsonData, task.Params) + _, err := db.Update(task, func() error { + params := task.Params.CopyExcludes(REQUEST_CONTEXT_KEY) params.Add(jsonData, REQUEST_CONTEXT_KEY) - self.Params = params - self.EndAt = timeutils.UtcNow() + task.Params = params + task.EndAt = timeutils.UtcNow() return nil }) - log.Debugf("Params: %s", self.Params) + log.Debugf("Params: %s(%s) %s", task.TaskName, task.Id, task.Params) if err != nil { log.Errorf("save_request_context fail %s", err) } } -func (self *STask) SaveParams(data *jsonutils.JSONDict) error { - return self.SetStage("", data) +func (task *STask) SaveParams(data *jsonutils.JSONDict) error { + return task.SetStage("", data) } -func (self *STask) SetStage(stageName string, data *jsonutils.JSONDict) error { - _, err := db.Update(self, func() error { +func (task *STask) SetStage(stageName string, data *jsonutils.JSONDict) error { + _, err := db.Update(task, func() error { params := jsonutils.NewDict() - params.Update(self.Params) + params.Update(task.Params) if data != nil { params.Update(data) } @@ -750,16 +751,16 @@ func (self *STask) SetStage(stageName string, data *jsonutils.JSONDict) error { } stageList := stages.(*jsonutils.JSONArray) stageData := jsonutils.NewDict() - stageData.Add(jsonutils.NewString(self.Stage), "name") + stageData.Add(jsonutils.NewString(task.Stage), "name") stageData.Add(jsonutils.NewTimeString(time.Now()), "complete_at") stageList.Add(stageData) - self.Stage = stageName + task.Stage = stageName } - self.Params = params + task.Params = params return nil }) if err != nil { - log.Errorf("set_stage fail %s", err) + log.Errorf("Task %s(%s) set_stage %s fail %s", task.TaskName, task.Id, stageName, err) } return err } @@ -781,7 +782,7 @@ func (task *STask) GetObjectStr() string { } func (task *STask) SetStageComplete(ctx context.Context, data *jsonutils.JSONDict) { - log.Infof("XXX TASK %s complete", task.TaskName) + log.Infof("XXX TASK %s(%s) complete", task.TaskName, task.Id) task.SetStage(TASK_STAGE_COMPLETE, data) task.SetProgressAndStatus(100, taskStatusDone) if data == nil { @@ -795,19 +796,19 @@ func (task *STask) SetStageComplete(ctx context.Context, data *jsonutils.JSONDic task.NotifyParentTaskComplete(ctx, data, false) } -func (self *STask) SetStageFailed(ctx context.Context, reason jsonutils.JSONObject) { - if self.Stage == TASK_STAGE_FAILED { - log.Warningf("Task %s has been failed", self.TaskName) +func (task *STask) SetStageFailed(ctx context.Context, reason jsonutils.JSONObject) { + if task.Stage == TASK_STAGE_FAILED { + log.Warningf("Task %s(%s) has been failed", task.TaskName, task.Id) return } - log.Infof("XXX TASK %s failed: %s on stage %s", self.TaskName, reason, self.Stage) + log.Infof("XXX TASK %s(%s) failed: %s on stage %s", task.TaskName, task.Id, reason, task.Stage) reasonDict := jsonutils.NewDict() - reasonDict.Add(jsonutils.NewString(self.Stage), "stage") + reasonDict.Add(jsonutils.NewString(task.Stage), "stage") if reason != nil { reasonDict.Add(reason, "reason") } reason = reasonDict - prevFailed, _ := self.Params.Get("__failed_reason") + prevFailed, _ := task.Params.Get("__failed_reason") if prevFailed != nil { switch prevFailed.(type) { case *jsonutils.JSONArray: @@ -819,17 +820,17 @@ func (self *STask) SetStageFailed(ctx context.Context, reason jsonutils.JSONObje } data := jsonutils.NewDict() data.Add(reason, "__failed_reason") - self.SetStage(TASK_STAGE_FAILED, data) - self.SetProgressAndStatus(100, taskStatusDone) - self.NotifyParentTaskFailure(ctx, reason) + task.SetStage(TASK_STAGE_FAILED, data) + task.SetProgressAndStatus(100, taskStatusDone) + task.NotifyParentTaskFailure(ctx, reason) } -func (self *STask) NotifyParentTaskComplete(ctx context.Context, body *jsonutils.JSONDict, failed bool) { - log.Infof("notify_parent_task_complete: %s params %s", self.TaskName, self.Params) - parentTaskId := self.GetParentTaskId() - parentTaskNotify, _ := self.Params.GetString(PARENT_TASK_NOTIFY_KEY) +func (task *STask) NotifyParentTaskComplete(ctx context.Context, body *jsonutils.JSONDict, failed bool) { + log.Infof("notify_parent_task_complete: %s(%s) params %s", task.TaskName, task.Id, task.Params) + parentTaskId := task.GetParentTaskId() + parentTaskNotify, _ := task.Params.GetString(PARENT_TASK_NOTIFY_KEY) if len(parentTaskId) > 0 { - subTask := SubTaskManager.GetSubTask(parentTaskId, self.Id) + subTask := SubTaskManager.GetSubTask(parentTaskId, task.Id) if subTask != nil { subTask.SaveResults(failed, body) } @@ -871,19 +872,19 @@ func notifyRemoteTask(ctx context.Context, notifyUrl string, taskid string, body log.Infof("Notify remote URL %s(%s) get acked: %s!", notifyUrl, taskid, body.String()) } -func (self *STask) NotifyParentTaskFailure(ctx context.Context, reason jsonutils.JSONObject) { +func (task *STask) NotifyParentTaskFailure(ctx context.Context, reason jsonutils.JSONObject) { body := jsonutils.NewDict() body.Add(jsonutils.NewString("error"), "__status__") - body.Add(jsonutils.NewString(self.TaskName), "__task_name__") + body.Add(jsonutils.NewString(task.TaskName), "__task_name__") body.Add(reason, "__reason__") - self.NotifyParentTaskComplete(ctx, body, true) + task.NotifyParentTaskComplete(ctx, body, true) } -func (self *STask) IsCurrentStageComplete() bool { - totalSubtasksCnt, _ := SubTaskManager.GetTotalSubtasksCount(self.Id, self.Stage) - initSubtasksCnt, _ := SubTaskManager.GetInitSubtasksCount(self.Id, self.Stage) - log.Debugf("Task %s IsCurrentStageComplete totalSubtasks %d initSubtasks %d ", self.String(), totalSubtasksCnt, initSubtasksCnt) - self.SetProgress(float32(totalSubtasksCnt-initSubtasksCnt) / float32(totalSubtasksCnt)) +func (task *STask) IsCurrentStageComplete() bool { + totalSubtasksCnt, _ := SubTaskManager.GetTotalSubtasksCount(task.Id, task.Stage) + initSubtasksCnt, _ := SubTaskManager.GetInitSubtasksCount(task.Id, task.Stage) + log.Debugf("Task %s IsCurrentStageComplete totalSubtasks %d initSubtasks %d ", task.String(), totalSubtasksCnt, initSubtasksCnt) + task.SetProgress(float32(totalSubtasksCnt-initSubtasksCnt) / float32(totalSubtasksCnt)) if totalSubtasksCnt > 0 && initSubtasksCnt == 0 { return true } else { @@ -891,10 +892,10 @@ func (self *STask) IsCurrentStageComplete() bool { } } -func (self *STask) GetPendingUsage(quota quotas.IQuota, index int) error { +func (task *STask) GetPendingUsage(quota quotas.IQuota, index int) error { key := pendingUsageKey(index) - if self.Params.Contains(key) { - quotaJson, err := self.Params.Get(key) + if task.Params.Contains(key) { + quotaJson, err := task.Params.Get(key) if err != nil { return errors.Wrapf(err, "task.Params.Get %s", key) } @@ -914,12 +915,12 @@ func pendingUsageKey(index int) string { return key } -func (self *STask) SetPendingUsage(quota quotas.IQuota, index int) error { - _, err := db.Update(self, func() error { +func (task *STask) SetPendingUsage(quota quotas.IQuota, index int) error { + _, err := db.Update(task, func() error { key := pendingUsageKey(index) - params := self.Params.CopyExcludes(key) + params := task.Params.CopyExcludes(key) params.Add(jsonutils.Marshal(quota), key) - self.Params = params + task.Params = params return nil }) if err != nil { @@ -928,11 +929,11 @@ func (self *STask) SetPendingUsage(quota quotas.IQuota, index int) error { return err } -func (self *STask) ClearPendingUsage(index int) error { - _, err := db.Update(self, func() error { +func (task *STask) ClearPendingUsage(index int) error { + _, err := db.Update(task, func() error { key := pendingUsageKey(index) - params := self.Params.CopyExcludes(key) - self.Params = params + params := task.Params.CopyExcludes(key) + task.Params = params return nil }) if err != nil { @@ -941,24 +942,24 @@ func (self *STask) ClearPendingUsage(index int) error { return err } -func (self *STask) GetParams() *jsonutils.JSONDict { - return self.Params +func (task *STask) GetParams() *jsonutils.JSONDict { + return task.Params } -func (self *STask) GetUserCred() mcclient.TokenCredential { - return self.UserCred +func (task *STask) GetUserCred() mcclient.TokenCredential { + return task.UserCred } -func (self *STask) GetTaskId() string { - return self.GetId() +func (task *STask) GetTaskId() string { + return task.GetId() } -func (self *STask) GetObject() db.IStandaloneModel { - return self.taskObject +func (task *STask) GetObject() db.IStandaloneModel { + return task.taskObject } -func (self *STask) GetObjects() []db.IStandaloneModel { - return self.taskObjects +func (task *STask) GetObjects() []db.IStandaloneModel { + return task.taskObjects } func (task *STask) GetTaskRequestHeader() http.Header { diff --git a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/options/options.go b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/options/options.go index cd3c1fed..97da6399 100644 --- a/vendor/yunion.io/x/onecloud/pkg/cloudcommon/options/options.go +++ b/vendor/yunion.io/x/onecloud/pkg/cloudcommon/options/options.go @@ -19,7 +19,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -123,6 +123,7 @@ type BaseOptions struct { EnableAppProfiling bool `help:"enable profiling API" default:"false"` EnableChangeOwnerAutoRename bool `help:"Allows renaming when changing names" default:"false"` + EnableDefaultPolicy bool `help:"Enable defualt policies" default:"true"` } const ( @@ -220,7 +221,7 @@ func (opt *EtcdOptions) GetEtcdTLSConfig() (*tls.Config, error) { opt.EtcdUseTLS = true } if opt.EtcdCacert != "" { - data, err := ioutil.ReadFile(opt.EtcdCacert) + data, err := os.ReadFile(opt.EtcdCacert) if err != nil { return nil, errors.Wrap(err, "read cacert file") } @@ -377,7 +378,7 @@ func parseOptions(optStruct interface{}, args []string, configFileName string, s h.Init() log.DisableColors() log.Logger().AddHook(h) - log.Logger().Out = ioutil.Discard + log.Logger().Out = io.Discard atexit.Register(atexit.ExitHandler{ Prio: atexit.PRIO_LOG_CLOSE, Reason: "deinit log rotate hook", @@ -393,6 +394,7 @@ func parseOptions(optStruct interface{}, args []string, configFileName string, s consts.SetRegion(optionsRef.Region) } + consts.SetDefaultPolicy(optionsRef.EnableDefaultPolicy) consts.SetDomainizedNamespace(optionsRef.DomainizedNamespace) consts.SetTaskWorkerCount(optionsRef.TaskWorkerCount) diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/app.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/app.go index fa71be31..4d0d58a8 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/app.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/app.go @@ -39,6 +39,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=webapp +// +onecloud:swagger-gen-model-plural=webapps type SAppManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/app_environment.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/app_environment.go index 563180e3..5be6fa51 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/app_environment.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/app_environment.go @@ -30,6 +30,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=webappenvironment +// +onecloud:swagger-gen-model-plural=webappenvironments type SAppEnvironmentManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/backup_storage.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/backup_storage.go index f8778415..9cc0c938 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/backup_storage.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/backup_storage.go @@ -33,6 +33,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=backupstorage +// +onecloud:swagger-gen-model-plural=backupstorages type SBackupStorageManager struct { db.SEnabledStatusInfrasResourceBaseManager } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/baremetalagents.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/baremetalagents.go index 652645d2..7d533c01 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/baremetalagents.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/baremetalagents.go @@ -32,6 +32,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=baremetalagent +// +onecloud:swagger-gen-model-plural=baremetalagents type SBaremetalagentManager struct { db.SStandaloneResourceBaseManager SZoneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/capabilities.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/capabilities.go index f824e9a8..5435c5b9 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/capabilities.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/capabilities.go @@ -854,8 +854,8 @@ func getIsolatedDeviceInfo(ctx context.Context, userCred mcclient.TokenCredentia } hosts := hostQuery.SubQuery() - q := devices.Query(devices.Field("model"), devices.Field("dev_type"), devices.Field("nvme_size_mb")) - q = q.Filter(sqlchemy.NotIn(devices.Field("dev_type"), []string{api.USB_TYPE, api.NIC_TYPE, api.NVME_PT_TYPE})) + q := devices.Query(hosts.Field("host_type"), devices.Field("model"), devices.Field("dev_type"), devices.Field("nvme_size_mb")) + q = q.Filter(sqlchemy.NotIn(devices.Field("dev_type"), []string{api.USB_TYPE, api.NIC_TYPE})) if zone != nil { q = q.Join(hosts, sqlchemy.Equals(devices.Field("host_id"), hosts.Field("id"))) q = q.Filter(sqlchemy.Equals(hosts.Field("zone_id"), zone.Id)) @@ -863,6 +863,8 @@ func getIsolatedDeviceInfo(ctx context.Context, userCred mcclient.TokenCredentia subq := getRegionZoneSubq(region) q = q.Join(hosts, sqlchemy.Equals(devices.Field("host_id"), hosts.Field("id"))) q = q.Filter(sqlchemy.In(hosts.Field("zone_id"), subq)) + } else { + q = q.Join(hosts, sqlchemy.Equals(devices.Field("host_id"), hosts.Field("id"))) } /*if len(domainId) > 0 { subq := getDomainManagerSubq(domainId) @@ -871,7 +873,7 @@ func getIsolatedDeviceInfo(ctx context.Context, userCred mcclient.TokenCredentia sqlchemy.IsNullOrEmpty(hosts.Field("manager_id")), )) }*/ - q = q.GroupBy(devices.Field("model"), devices.Field("dev_type"), devices.Field("nvme_size_mb")) + q = q.GroupBy(hosts.Field("host_type"), devices.Field("model"), devices.Field("dev_type"), devices.Field("nvme_size_mb")) rows, err := q.Rows() if err != nil { @@ -886,7 +888,8 @@ func getIsolatedDeviceInfo(ctx context.Context, userCred mcclient.TokenCredentia var sizeMB int var vdev bool var hypervisor string - rows.Scan(&m, &t, &sizeMB) + var hostType string + rows.Scan(&hostType, &m, &t, &sizeMB) if m == "" { continue @@ -900,6 +903,10 @@ func getIsolatedDeviceInfo(ctx context.Context, userCred mcclient.TokenCredentia hypervisor = api.HYPERVISOR_KVM } + if hostType == api.HOST_TYPE_ZETTAKIT { + hypervisor = api.HYPERVISOR_ZETTAKIT + } + gpus = append(gpus, PCIDevModelTypes{m, t, sizeMB, vdev, hypervisor}) if !utils.IsInStringArray(m, gpuModels) { diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudaccounts.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudaccounts.go index 787f4681..c1c8ecaf 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudaccounts.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudaccounts.go @@ -145,7 +145,6 @@ type SCloudaccount struct { Sysinfo jsonutils.JSONObject `get:"domain"` // 品牌信息, 一般和provider相同 - // example: DStack Brand string `width:"64" charset:"utf8" nullable:"true" list:"domain" create:"optional"` // 额外信息 @@ -2300,18 +2299,18 @@ func (manager *SCloudaccountManager) AutoSyncCloudaccountStatusTask(ctx context. for i := range accounts { if accounts[i].GetEnabled() && accounts[i].shouldProbeStatus() && accounts[i].CanSync() { id, name, account := accounts[i].Id, accounts[i].Name, &accounts[i] - cloudaccountProbeMutex.Lock() - if _, ok := cloudaccountProbe[id]; ok { - cloudaccountProbeMutex.Unlock() + cloudaccountPendingSyncsMutex.Lock() + if _, ok := cloudaccountPendingSyncs[id]; ok { + cloudaccountPendingSyncsMutex.Unlock() continue } - cloudaccountProbe[id] = struct{}{} - cloudaccountProbeMutex.Unlock() + cloudaccountPendingSyncs[id] = struct{}{} + cloudaccountPendingSyncsMutex.Unlock() RunSyncCloudAccountTask(ctx, func() { defer func() { - cloudaccountProbeMutex.Lock() - defer cloudaccountProbeMutex.Unlock() - delete(cloudaccountProbe, id) + cloudaccountPendingSyncsMutex.Lock() + defer cloudaccountPendingSyncsMutex.Unlock() + delete(cloudaccountPendingSyncs, id) }() log.Debugf("syncAccountStatus %s %s", id, name) idctx := context.WithValue(ctx, "id", id) @@ -2495,9 +2494,6 @@ func (account *SCloudaccount) syncAccountStatus(ctx context.Context, userCred mc var ( cloudaccountPendingSyncs = map[string]struct{}{} cloudaccountPendingSyncsMutex = &sync.Mutex{} - - cloudaccountProbe = map[string]struct{}{} - cloudaccountProbeMutex = &sync.Mutex{} ) func (account *SCloudaccount) SubmitSyncAccountTask(ctx context.Context, userCred mcclient.TokenCredential, waitChan chan error) { diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudimages.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudimages.go index 0624ec3e..3ca1f747 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudimages.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudimages.go @@ -27,6 +27,7 @@ import ( "yunion.io/x/onecloud/pkg/util/yunionmeta" ) +// +onecloud:swagger-gen-ignore type SCloudimageManager struct { db.SStandaloneResourceBaseManager db.SExternalizedResourceBaseManager @@ -46,6 +47,7 @@ func init() { CloudimageManager.SetVirtualObject(CloudimageManager) } +// +onecloud:swagger-gen-ignore type SCloudimage struct { db.SStandaloneResourceBase db.SExternalizedResourceBase diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovider_quotas.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovider_quotas.go index 8d90b79c..31c0d83b 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovider_quotas.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovider_quotas.go @@ -54,6 +54,7 @@ func init() { CloudproviderQuotaManager.SetVirtualObject(CloudproviderQuotaManager) } +// +onecloud:swagger-gen-ignore type SCloudproviderQuota struct { db.SStandaloneResourceBase db.SExternalizedResourceBase diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovidercapacities.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovidercapacities.go index 1135ef9e..0eb7bb6a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovidercapacities.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudprovidercapacities.go @@ -45,6 +45,7 @@ func init() { CloudproviderCapabilityManager.SetVirtualObject(CloudproviderCapabilityManager) } +// +onecloud:swagger-gen-ignore type SCloudproviderCapability struct { db.SResourceBase diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudsync.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudsync.go index 88b2fd7b..c96d87a1 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudsync.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/cloudsync.go @@ -104,6 +104,9 @@ func syncRegionQuotas(ctx context.Context, userCred mcclient.TokenCredential, sy return remoteRegion.GetICloudQuotas() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return nil + } msg := fmt.Sprintf("GetICloudQuotas for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return err @@ -209,6 +212,9 @@ func syncRegionEips( return remoteRegion.GetIEips() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetIEips for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return @@ -236,6 +242,9 @@ func syncRegionBuckets(ctx context.Context, userCred mcclient.TokenCredential, s return remoteRegion.GetIBuckets() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetIBuckets for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return @@ -272,6 +281,9 @@ func syncRegionVPCs( return remoteRegion.GetIVpcs() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetVpcs for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return @@ -977,6 +989,7 @@ func syncZoneHosts( syncHostNics(ctx, userCred, syncResults, provider, &localHosts[i], remoteHosts[i]) // syncHostWires(ctx, userCred, syncResults, provider, &localHosts[i], remoteHosts[i]) syncHostVMs(ctx, userCred, syncResults, provider, driver, &localHosts[i], remoteHosts[i], syncRange) + syncHostIsolateDevices(ctx, userCred, syncResults, provider, driver, &localHosts[i], remoteHosts[i], syncRange) }() } return newCachePairs @@ -1096,13 +1109,40 @@ func syncHostVMs(ctx context.Context, userCred mcclient.TokenCredential, syncRes return } - syncVMPeripherals(ctx, userCred, syncVMPairs[i].Local, syncVMPairs[i].Remote, localHost, provider, driver) + SyncVMPeripherals(ctx, userCred, syncVMPairs[i].Local, syncVMPairs[i].Remote, localHost, provider, driver) }() } } -func syncVMPeripherals( +func syncHostIsolateDevices(ctx context.Context, userCred mcclient.TokenCredential, syncResults SSyncResultSet, provider *SCloudprovider, driver cloudprovider.ICloudProvider, localHost *SHost, remoteHost cloudprovider.ICloudHost, syncRange *SSyncRange) { + devs, err := func() ([]cloudprovider.IsolateDevice, error) { + defer syncResults.AddRequestCost(HostManager)() + return remoteHost.GetIsolateDevices() + }() + if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } + msg := fmt.Sprintf("GetIsolateDevices for host %s failed %s", remoteHost.GetName(), err) + log.Errorf(msg) + return + } + + result := func() compare.SyncResult { + defer syncResults.AddSqlCost(HostManager)() + return localHost.SyncHostIsolateDevices(ctx, userCred, driver, devs, provider.GetOwnerId(), syncRange.Xor) + }() + + syncResults.Add(HostManager, result) + + msg := result.Result() + notes := fmt.Sprintf("SyncHostIsolateDevices for host %s result: %s", localHost.Name, msg) + log.Infof(notes) + provider.SyncError(result, notes, userCred) +} + +func SyncVMPeripherals( ctx context.Context, userCred mcclient.TokenCredential, local *SGuest, @@ -1137,6 +1177,10 @@ func syncVMPeripherals( if result.IsError() { log.Errorf("syncVMInstanceSnapshots error %v", result.AllError()) } + err = syncVMIsolateDevices(ctx, userCred, provider, local, remote) + if err != nil && errors.Cause(err) != cloudprovider.ErrNotSupported && errors.Cause(err) != cloudprovider.ErrNotImplemented { + logclient.AddSimpleActionLog(local, logclient.ACT_CLOUD_SYNC, errors.Wrapf(err, "syncVMIsolateDevice"), userCred, false) + } } func syncVMNics( @@ -1206,6 +1250,52 @@ func syncVMSecgroups(ctx context.Context, userCred mcclient.TokenCredential, pro return localVM.SyncVMSecgroups(ctx, userCred, secgroupIds) } +func syncVMIsolateDevices(ctx context.Context, userCred mcclient.TokenCredential, provider *SCloudprovider, localVM *SGuest, remoteVM cloudprovider.ICloudVM) error { + devIds, err := remoteVM.GetIsolateDeviceIds() + if err != nil { + return errors.Wrap(err, "remoteVM.GetIsolateDeviceIds") + } + return localVM.SyncVMIsolateDevices(ctx, userCred, devIds) +} + +func (self *SGuest) SyncVMIsolateDevices(ctx context.Context, userCred mcclient.TokenCredential, externalIds []string) error { + host, err := self.GetHost() + if err != nil { + return err + } + devs, err := self.GetIsolatedDevices() + if err != nil { + return errors.Wrapf(err, "GetIsolatedDevices") + } + for i := range devs { + if !utils.IsInStringArray(devs[i].ExternalId, externalIds) { + _, err = db.Update(&devs[i], func() error { + devs[i].GuestId = "" + return nil + }) + if err != nil { + return err + } + } + } + sq := HostManager.Query("id").Equals("manager_id", host.ManagerId).SubQuery() + q := IsolatedDeviceManager.Query().In("host_id", sq).In("external_id", externalIds) + err = db.FetchModelObjects(IsolatedDeviceManager, q, &devs) + if err != nil { + return err + } + for i := range devs { + _, err = db.Update(&devs[i], func() error { + devs[i].GuestId = self.Id + return nil + }) + if err != nil { + return err + } + } + return nil +} + func syncSkusFromPrivateCloud( ctx context.Context, userCred mcclient.TokenCredential, @@ -1981,6 +2071,9 @@ func syncRegionSnapshots( return remoteRegion.GetISnapshots() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetISnapshots for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return @@ -2016,6 +2109,9 @@ func syncRegionSnapshotPolicies( return remoteRegion.GetISnapshotPolicies() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } log.Errorf("GetISnapshotPolicies for region %s failed %s", remoteRegion.GetName(), err) return } @@ -2048,6 +2144,9 @@ func syncRegionNetworkInterfaces( return remoteRegion.GetINetworkInterfaces() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetINetworkInterfaces for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return @@ -2084,6 +2183,9 @@ func syncRegionNetworkInterfaces( func syncInterfaceAddresses(ctx context.Context, userCred mcclient.TokenCredential, localInterface *SNetworkInterface, remoteInterface cloudprovider.ICloudNetworkInterface) { addresses, err := remoteInterface.GetICloudInterfaceAddresses() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return + } msg := fmt.Sprintf("GetICloudInterfaceAddresses for networkinterface %s failed %s", remoteInterface.GetName(), err) log.Errorf(msg) return @@ -2811,6 +2913,9 @@ func syncGlobalVpcs(ctx context.Context, userCred mcclient.TokenCredential, sync } secgroups, err := remoteVpcs[i].GetISecurityGroups() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + continue + } log.Errorf("GetISecurityGroup for global vpc %s error: %v", localVpcs[i].Name, err) continue } @@ -2849,6 +2954,9 @@ func syncTablestore( return remoteRegion.GetICloudTablestores() }() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return nil + } msg := fmt.Sprintf("GetICloudTablestores for region %s failed %s", remoteRegion.GetName(), err) log.Errorf(msg) return err @@ -2879,6 +2987,9 @@ func syncModelartsPools( ) error { ipools, err := remoteRegion.GetIModelartsPools() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return nil + } msg := fmt.Sprintf("GetIModelartsPools for provider %s failed %s", err, ipools) log.Errorf(msg) return err @@ -2901,6 +3012,9 @@ func syncModelartsPoolSkus( ) error { ipools, err := remoteRegion.GetIModelartsPoolSku() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return nil + } msg := fmt.Sprintf("GetIModelartsPoolSku for provider %s failed %s", err, ipools) log.Errorf(msg) return err @@ -2923,6 +3037,9 @@ func syncMiscResources( ) error { exts, err := remoteRegion.GetIMiscResources() if err != nil { + if errors.Cause(err) == cloudprovider.ErrNotImplemented || errors.Cause(err) == cloudprovider.ErrNotSupported { + return nil + } msg := fmt.Sprintf("GetIMiscResources for provider %s failed %v", provider.Name, err) log.Errorf(msg) return err diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/containers.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/containers.go index 0c167713..90b3ef6a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/containers.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/containers.go @@ -16,6 +16,7 @@ package models import ( "context" + "encoding/base64" "fmt" "path/filepath" "strings" @@ -31,13 +32,16 @@ import ( "yunion.io/x/onecloud/pkg/apis" api "yunion.io/x/onecloud/pkg/apis/compute" hostapi "yunion.io/x/onecloud/pkg/apis/host" + identityapi "yunion.io/x/onecloud/pkg/apis/identity" imageapi "yunion.io/x/onecloud/pkg/apis/image" "yunion.io/x/onecloud/pkg/cloudcommon/consts" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" + "yunion.io/x/onecloud/pkg/compute/options" "yunion.io/x/onecloud/pkg/httperrors" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/onecloud/pkg/mcclient/auth" + identitymod "yunion.io/x/onecloud/pkg/mcclient/modules/identity" kubemod "yunion.io/x/onecloud/pkg/mcclient/modules/k8s" ) @@ -74,7 +78,7 @@ type SContainer struct { Spec *api.ContainerSpec `length:"long" create:"required" list:"user" update:"user"` // 启动时间 - StartedAt time.Time `nullable:"false" created_at:"false" index:"true" get:"user" list:"user" json:"started_at"` + StartedAt time.Time `nullable:"true" created_at:"false" index:"true" get:"user" list:"user" json:"started_at"` // 上次退出时间 LastFinishedAt time.Time `nullable:"true" created_at:"false" index:"true" get:"user" list:"user" json:"last_finished_at"` @@ -145,22 +149,27 @@ func (m *SContainerManager) ValidateCreateData(ctx context.Context, userCred mcc } pod := obj.(*SGuest) input.GuestId = pod.GetId() - if err := m.ValidateSpec(ctx, userCred, &input.Spec, pod); err != nil { + if err := m.ValidateSpec(ctx, userCred, &input.Spec, pod, nil); err != nil { return nil, errors.Wrap(err, "validate spec") } return input, nil } -func (m *SContainerManager) ValidateSpec(ctx context.Context, userCred mcclient.TokenCredential, spec *api.ContainerSpec, pod *SGuest) error { +func (m *SContainerManager) ValidateSpec(ctx context.Context, userCred mcclient.TokenCredential, spec *api.ContainerSpec, pod *SGuest, ctr *SContainer) error { if spec.ImagePullPolicy == "" { spec.ImagePullPolicy = apis.ImagePullPolicyIfNotPresent } if !sets.NewString(apis.ImagePullPolicyAlways, apis.ImagePullPolicyIfNotPresent).Has(string(spec.ImagePullPolicy)) { return httperrors.NewInputParameterError("invalid image_pull_policy %s", spec.ImagePullPolicy) } + if spec.ImageCredentialId != "" { + if _, err := m.GetImageCredential(ctx, userCred, spec.ImageCredentialId); err != nil { + return errors.Wrapf(err, "get image credential by id: %s", spec.ImageCredentialId) + } + } if pod != nil { - if err := m.ValidateSpecVolumeMounts(ctx, userCred, pod, spec); err != nil { + if err := m.ValidateSpecVolumeMounts(ctx, userCred, pod, spec, ctr); err != nil { return errors.Wrap(err, "ValidateSpecVolumeMounts") } for idx, dev := range spec.Devices { @@ -213,12 +222,32 @@ func (m *SContainerManager) ValidateSpecDevice(ctx context.Context, userCred mcc return drv.ValidateCreateData(ctx, userCred, pod, dev) } -func (m *SContainerManager) ValidateSpecVolumeMounts(ctx context.Context, userCred mcclient.TokenCredential, pod *SGuest, spec *api.ContainerSpec) error { +func (m *SContainerManager) ValidateSpecVolumeMounts(ctx context.Context, userCred mcclient.TokenCredential, pod *SGuest, spec *api.ContainerSpec, ctr *SContainer) error { relation, err := m.GetVolumeMountRelations(pod, spec) if err != nil { return errors.Wrap(err, "GetVolumeMountRelations") } + + curCtrs, _ := m.GetContainersByPod(pod.GetId()) + volUniqNames := sets.NewString() + for idx := range curCtrs { + if ctr != nil && ctr.GetId() == curCtrs[idx].GetId() { + continue + } + for _, vol := range curCtrs[idx].Spec.VolumeMounts { + if vol.UniqueName != "" { + volUniqNames.Insert(vol.UniqueName) + } + } + } for idx, vm := range spec.VolumeMounts { + if vm.UniqueName != "" { + if volUniqNames.Has(vm.UniqueName) { + return httperrors.NewDuplicateNameError("volume_mount unique_name %s", fmt.Sprintf("%s: %s, index: %d", vm.UniqueName, jsonutils.Marshal(vm), idx)) + } else { + volUniqNames.Insert(vm.UniqueName) + } + } newVm, err := m.ValidateSpecVolumeMount(ctx, userCred, pod, vm) if err != nil { return errors.Wrapf(err, "validate volume mount %s", jsonutils.Marshal(vm)) @@ -343,6 +372,35 @@ func (m *SContainerManager) validateSpecProbeHandler(probe apis.ContainerProbeHa return nil } +func (m *SContainerManager) startBatchTask(ctx context.Context, userCred mcclient.TokenCredential, taskName string, ctrs []SContainer, taskData *jsonutils.JSONDict, parentTaskId string) error { + ctrPtrs := make([]db.IStandaloneModel, len(ctrs)) + for i := range ctrs { + ctrPtrs[i] = &ctrs[i] + } + task, err := taskman.TaskManager.NewParallelTask(ctx, taskName, ctrPtrs, userCred, taskData, parentTaskId, "") + if err != nil { + return errors.Wrapf(err, "NewParallelTask %s", taskName) + } + return task.ScheduleRun(nil) +} + +func (m *SContainerManager) StartBatchStartTask(ctx context.Context, userCred mcclient.TokenCredential, ctrs []SContainer, parentTaskId string) error { + return m.startBatchTask(ctx, userCred, "ContainerBatchStartTask", ctrs, nil, parentTaskId) +} + +func (m *SContainerManager) StartBatchStopTask(ctx context.Context, userCred mcclient.TokenCredential, ctrs []SContainer, timeout int, parentTaskId string) error { + params := make([]api.ContainerStopInput, len(ctrs)) + for i := range ctrs { + params[i] = api.ContainerStopInput{ + Timeout: timeout, + } + } + taskParams := jsonutils.NewDict() + taskParams.Add(jsonutils.Marshal(params), "params") + + return m.startBatchTask(ctx, userCred, "ContainerBatchStopTask", ctrs, taskParams, parentTaskId) +} + func (c *SContainer) PostCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) { c.SVirtualResourceBase.PostCreate(ctx, userCred, ownerId, query, data) if !jsonutils.QueryBoolean(data, "skip_task", false) { @@ -371,7 +429,7 @@ func (c *SContainer) ValidateUpdateData(ctx context.Context, userCred mcclient.T } input.VirtualResourceBaseUpdateInput = baseInput - if err := GetContainerManager().ValidateSpec(ctx, userCred, &input.Spec, c.GetPod()); err != nil { + if err := GetContainerManager().ValidateSpec(ctx, userCred, &input.Spec, c.GetPod(), c); err != nil { return nil, errors.Wrap(err, "validate spec") } @@ -398,12 +456,14 @@ func (vm *ContainerVolumeMountRelation) toHostDiskMount(disk *apis.ContainerVolu return nil, errors.Errorf("fetch disk by id %s", disk.Id) } ret := &hostapi.ContainerVolumeMountDisk{ - Index: disk.Index, - Id: disk.Id, - TemplateId: diskObj.TemplateId, - SubDirectory: disk.SubDirectory, - StorageSizeFile: disk.StorageSizeFile, - Overlay: disk.Overlay, + Index: disk.Index, + Id: disk.Id, + TemplateId: diskObj.TemplateId, + SubDirectory: disk.SubDirectory, + StorageSizeFile: disk.StorageSizeFile, + Overlay: disk.Overlay, + CaseInsensitivePaths: disk.CaseInsensitivePaths, + PostOverlay: disk.PostOverlay, } return ret, nil } @@ -438,6 +498,7 @@ func (vm *ContainerVolumeMountRelation) toCephFSMount(fs *apis.ContainerVolumeMo func (vm *ContainerVolumeMountRelation) ToHostMount(ctx context.Context, userCred mcclient.TokenCredential) (*hostapi.ContainerVolumeMount, error) { ret := &hostapi.ContainerVolumeMount{ + UniqueName: vm.VolumeMount.UniqueName, Type: vm.VolumeMount.Type, Disk: nil, CephFS: nil, @@ -500,8 +561,13 @@ func (c *SContainer) StartStartTask(ctx context.Context, userCred mcclient.Token } func (c *SContainer) PerformStop(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data *api.ContainerStopInput) (jsonutils.JSONObject, error) { - if !sets.NewString(api.CONTAINER_STATUS_RUNNING, api.CONTAINER_STATUS_STOP_FAILED).Has(c.Status) { - return nil, httperrors.NewInvalidStatusError("Can't stop container in status %s", c.Status) + if !data.Force { + if !sets.NewString( + api.CONTAINER_STATUS_RUNNING, + api.CONTAINER_STATUS_PROBING, + api.CONTAINER_STATUS_STOP_FAILED).Has(c.Status) { + return nil, httperrors.NewInvalidStatusError("Can't stop container in status %s", c.Status) + } } return nil, c.StartStopTask(ctx, userCred, data, "") } @@ -529,18 +595,81 @@ func (c *SContainer) StartSyncStatusTask(ctx context.Context, userCred mcclient. } func (c *SContainer) CustomizeDelete(ctx context.Context, userCred mcclient.TokenCredential, query, data jsonutils.JSONObject) error { - return c.StartDeleteTask(ctx, userCred, "") + return c.StartDeleteTask(ctx, userCred, "", jsonutils.QueryBoolean(data, "purge", false)) } -func (c *SContainer) StartDeleteTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { +func (c *SContainer) StartDeleteTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string, purge bool) error { c.SetStatus(ctx, userCred, api.CONTAINER_STATUS_DELETING, "") - task, err := taskman.TaskManager.NewTask(ctx, "ContainerDeleteTask", c, userCred, nil, parentTaskId, "", nil) + task, err := taskman.TaskManager.NewTask(ctx, "ContainerDeleteTask", c, userCred, jsonutils.Marshal(map[string]interface{}{ + "purge": purge, + }).(*jsonutils.JSONDict), parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } return task.ScheduleRun(nil) } +func (m *SContainerManager) GetImageCredential(ctx context.Context, userCred mcclient.TokenCredential, id string) (*apis.ContainerPullImageAuthConfig, error) { + s := auth.GetSession(ctx, userCred, options.Options.Region) + ret, err := identitymod.Credentials.GetById(s, id, nil) + if err != nil { + if errors.Cause(err) == errors.ErrNotFound || strings.Contains(err.Error(), "NotFound") { + ret, err = identitymod.Credentials.GetByName(s, id, nil) + if err != nil { + return nil, errors.Wrapf(err, "get credential by id or name of %s", id) + } + } + return nil, errors.Wrap(err, "get credentials by id") + } + credType, _ := ret.GetString("type") + if credType != identityapi.CONTAINER_IMAGE_TYPE { + return nil, httperrors.NewNotSupportedError("unsupported credential type %s", credType) + } + blobStr, err := ret.GetString("blob") + if err != nil { + return nil, errors.Wrap(err, "get blob") + } + obj, err := jsonutils.ParseString(blobStr) + if err != nil { + return nil, errors.Wrapf(err, "json parse string: %s", blobStr) + } + blob := new(identityapi.CredentialContainerImageBlob) + if err := obj.Unmarshal(blob); err != nil { + return nil, errors.Wrap(err, "unmarshal blob") + } + out := &apis.ContainerPullImageAuthConfig{ + Username: blob.Username, + Password: blob.Password, + Auth: blob.Auth, + ServerAddress: blob.ServerAddress, + IdentityToken: blob.IdentityToken, + RegistryToken: blob.RegistryToken, + } + return out, nil +} + +func (c *SContainer) GetImageCredential(ctx context.Context, userCred mcclient.TokenCredential) (*apis.ContainerPullImageAuthConfig, error) { + if c.Spec.ImageCredentialId == "" { + return nil, errors.Wrap(errors.ErrEmpty, "image_credential_id is empty") + } + return GetContainerManager().GetImageCredential(ctx, userCred, c.Spec.ImageCredentialId) +} + +func (c *SContainer) GetHostPullImageInput(ctx context.Context, userCred mcclient.TokenCredential) (*hostapi.ContainerPullImageInput, error) { + input := &hostapi.ContainerPullImageInput{ + Image: c.Spec.Image, + PullPolicy: c.Spec.ImagePullPolicy, + } + if c.Spec.ImageCredentialId != "" { + cred, err := c.GetImageCredential(ctx, userCred) + if err != nil { + return nil, errors.Wrapf(err, "GetImageCredential %s", c.Spec.ImageCredentialId) + } + input.Auth = cred + } + return input, nil +} + func (c *SContainer) StartPullImageTask(ctx context.Context, userCred mcclient.TokenCredential, input *hostapi.ContainerPullImageInput, parentTaskId string) error { c.SetStatus(ctx, userCred, api.CONTAINER_STATUS_PULLING_IMAGE, "") task, err := taskman.TaskManager.NewTask(ctx, "ContainerPullImageTask", c, userCred, jsonutils.Marshal(input).(*jsonutils.JSONDict), parentTaskId, "", nil) @@ -592,6 +721,11 @@ func (c *SContainer) ToHostContainerSpec(ctx context.Context, userCred mcclient. VolumeMounts: mounts, Devices: ctrDevs, } + pullInput, err := c.GetHostPullImageInput(ctx, userCred) + if err != nil { + return nil, errors.Wrap(err, "GetHostPullImageInput") + } + hSpec.ImageCredentialToken = base64.StdEncoding.EncodeToString([]byte(jsonutils.Marshal(pullInput.Auth).String())) return hSpec, nil } @@ -695,14 +829,15 @@ func (c *SContainer) GetDetailsExecInfo(ctx context.Context, userCred mcclient.T } func (c *SContainer) PerformExecSync(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input *api.ContainerExecSyncInput) (jsonutils.JSONObject, error) { - if c.Status != api.CONTAINER_STATUS_RUNNING { + if !api.ContainerRunningStatus.Has(c.GetStatus()) { return nil, httperrors.NewInvalidStatusError("Can't exec container in status %s", c.Status) } return c.GetPodDriver().RequestExecSyncContainer(ctx, userCred, c, input) } -func (c *SContainer) PerformSetResourcesLimit(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, limit *apis.ContainerResources) (jsonutils.JSONObject, error) { - if err := c.ValidateResourcesLimit(limit); err != nil { +func (c *SContainer) PerformSetResourcesLimit(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, input *api.ContainerResourcesSetInput) (jsonutils.JSONObject, error) { + limit := &input.ContainerResources + if err := c.ValidateResourcesLimit(limit, input.DisableLimitCheck); err != nil { return nil, errors.Wrap(err, "ValidateResourcesLimit") } if _, err := db.Update(c, func() error { @@ -717,7 +852,7 @@ func (c *SContainer) PerformSetResourcesLimit(ctx context.Context, userCred mccl return c.GetPodDriver().RequestSetContainerResourcesLimit(ctx, userCred, c, limit) } -func (c *SContainer) ValidateResourcesLimit(limit *apis.ContainerResources) error { +func (c *SContainer) ValidateResourcesLimit(limit *apis.ContainerResources, disableLimitCheck bool) error { if limit == nil { return httperrors.NewInputParameterError("limit cannot be nil") } @@ -726,8 +861,10 @@ func (c *SContainer) ValidateResourcesLimit(limit *apis.ContainerResources) erro if *limit.CpuCfsQuota <= 0 { return httperrors.NewInputParameterError("invalid cpu_cfs_quota %f", *limit.CpuCfsQuota) } - if *limit.CpuCfsQuota > float64(pod.VcpuCount) { - return httperrors.NewInputParameterError("cpu_cfs_quota %f can't large than %d", *limit.CpuCfsQuota, pod.VcpuCount) + if !disableLimitCheck { + if *limit.CpuCfsQuota > float64(pod.VcpuCount) { + return httperrors.NewInputParameterError("cpu_cfs_quota %f can't large than %d", *limit.CpuCfsQuota, pod.VcpuCount) + } } } return nil @@ -776,6 +913,10 @@ func (c *SContainer) PerformStatus(ctx context.Context, userCred mcclient.TokenC if input.RestartCount > 0 { c.RestartCount = input.RestartCount } + if api.ContainerRunningStatus.Has(input.Status) { + // 当容器状态是运行时 restart_count 重新计数 + c.RestartCount = 0 + } if input.StartedAt != nil { c.StartedAt = *input.StartedAt } @@ -870,3 +1011,133 @@ func (c *SContainer) StartCommit(ctx context.Context, userCred mcclient.TokenCre } return task.ScheduleRun(nil) } + +func (c *SContainer) isPostOverlayExist(vm *apis.ContainerVolumeMount, ov *apis.ContainerVolumeMountDiskPostOverlay) bool { + for _, cov := range vm.Disk.PostOverlay { + if ov.ContainerTargetDir == cov.ContainerTargetDir { + return true + } + } + return false +} + +func (c *SContainer) validateVolumeMountPostOverlayAction(action string, index int, ovs []*apis.ContainerVolumeMountDiskPostOverlay) (*apis.ContainerVolumeMount, error) { + if !api.ContainerExitedStatus.Has(c.Status) && !api.ContainerRunningStatus.Has(c.Status) { + return nil, httperrors.NewInvalidStatusError("can't %s post overlay on status %s", action, c.Status) + } + if index >= len(c.Spec.VolumeMounts) { + return nil, httperrors.NewInputParameterError("index %d out of volume_mount size %d", index, len(c.Spec.VolumeMounts)) + } + vm := new(apis.ContainerVolumeMount) + curVm := c.Spec.VolumeMounts[index] + if err := jsonutils.Marshal(curVm).Unmarshal(vm); err != nil { + return nil, errors.Wrap(err, "use json unmarshal to new volume mount") + } + if vm.Type != apis.CONTAINER_VOLUME_MOUNT_TYPE_DISK { + return nil, httperrors.NewInputParameterError("invalid volume mount type %s", vm.Type) + } + return vm, nil +} + +func (c *SContainer) GetVolumeMountCopy(index int) (*apis.ContainerVolumeMount, error) { + if index >= len(c.Spec.VolumeMounts) { + return nil, httperrors.NewInputParameterError("index %d out of volume_mount size %d", index, len(c.Spec.VolumeMounts)) + } + vm := new(apis.ContainerVolumeMount) + curVm := c.Spec.VolumeMounts[index] + if err := jsonutils.Marshal(curVm).Unmarshal(vm); err != nil { + return nil, errors.Wrap(err, "use json unmarshal to new volume mount") + } + return vm, nil +} + +func (c *SContainer) getPostOverlayVolumeMount( + index int, + updateF func(mount *apis.ContainerVolumeMount) (*apis.ContainerVolumeMount, error), +) (*apis.ContainerVolumeMount, error) { + vm, err := c.GetVolumeMountCopy(index) + if err != nil { + return nil, err + } + return updateF(vm) +} + +func (c *SContainer) GetAddPostOverlayVolumeMount(index int, ovs []*apis.ContainerVolumeMountDiskPostOverlay) (*apis.ContainerVolumeMount, error) { + return c.getPostOverlayVolumeMount(index, func(vm *apis.ContainerVolumeMount) (*apis.ContainerVolumeMount, error) { + if vm.Disk.PostOverlay == nil { + vm.Disk.PostOverlay = []*apis.ContainerVolumeMountDiskPostOverlay{} + } + vm.Disk.PostOverlay = append(vm.Disk.PostOverlay, ovs...) + return vm, nil + }) +} + +func (c *SContainer) GetRemovePostOverlayVolumeMount(index int, ovs []*apis.ContainerVolumeMountDiskPostOverlay) (*apis.ContainerVolumeMount, error) { + return c.getPostOverlayVolumeMount(index, func(vm *apis.ContainerVolumeMount) (*apis.ContainerVolumeMount, error) { + // remove post overlay + for _, ov := range ovs { + vm.Disk = c.removePostOverlay(vm.Disk, ov) + } + return vm, nil + }) +} + +func (c *SContainer) PerformAddVolumeMountPostOverlay(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, input *api.ContainerVolumeMountAddPostOverlayInput) (jsonutils.JSONObject, error) { + vm, err := c.validateVolumeMountPostOverlayAction("add", input.Index, input.PostOverlay) + if err != nil { + return nil, err + } + for _, ov := range input.PostOverlay { + isExist := c.isPostOverlayExist(vm, ov) + if isExist { + return nil, httperrors.NewInputParameterError("post overlay %s already exists", ov.ContainerTargetDir) + } + } + return nil, c.StartAddVolumeMountPostOverlayTask(ctx, userCred, input, "") +} + +func (c *SContainer) StartAddVolumeMountPostOverlayTask(ctx context.Context, userCred mcclient.TokenCredential, input *api.ContainerVolumeMountAddPostOverlayInput, parentTaskId string) error { + c.SetStatus(ctx, userCred, api.CONTAINER_STATUS_ADD_POST_OVERLY, "") + task, err := taskman.TaskManager.NewTask(ctx, "ContainerAddVolumeMountPostOverlayTask", c, userCred, jsonutils.Marshal(input).(*jsonutils.JSONDict), parentTaskId, "", nil) + if err != nil { + return errors.Wrap(err, "New ContainerAddVolumeMountPostOverlayTask") + } + return task.ScheduleRun(nil) +} + +func (c *SContainer) StartRemoveVolumeMountPostOverlayTask(ctx context.Context, userCred mcclient.TokenCredential, input *api.ContainerVolumeMountRemovePostOverlayInput, parentTaskId string) error { + c.SetStatus(ctx, userCred, api.CONTAINER_STATUS_REMOVE_POST_OVERLY, "") + task, err := taskman.TaskManager.NewTask(ctx, "ContainerRemoveVolumeMountPostOverlayTask", c, userCred, jsonutils.Marshal(input).(*jsonutils.JSONDict), parentTaskId, "", nil) + if err != nil { + return errors.Wrap(err, "New ContainerRemoveVolumeMountPostOverlayTask") + } + return task.ScheduleRun(nil) +} + +func (c *SContainer) PerformRemoveVolumeMountPostOverlay(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, input *api.ContainerVolumeMountRemovePostOverlayInput) (jsonutils.JSONObject, error) { + vm, err := c.validateVolumeMountPostOverlayAction("remove", input.Index, input.PostOverlay) + if err != nil { + return nil, err + } + if len(vm.Disk.PostOverlay) == 0 { + return nil, httperrors.NewInputParameterError("no post overlay") + } + for _, ov := range input.PostOverlay { + isExist := c.isPostOverlayExist(vm, ov) + if !isExist { + return nil, httperrors.NewInputParameterError("post overlay %s not exists", ov.ContainerTargetDir) + } + } + return nil, c.StartRemoveVolumeMountPostOverlayTask(ctx, userCred, input, "") +} + +func (c *SContainer) removePostOverlay(vmd *apis.ContainerVolumeMountDisk, ov *apis.ContainerVolumeMountDiskPostOverlay) *apis.ContainerVolumeMountDisk { + curOvs := vmd.PostOverlay + for i, cov := range curOvs { + if cov.ContainerTargetDir == ov.ContainerTargetDir { + curOvs = append(curOvs[:i], curOvs[i+1:]...) + } + } + vmd.PostOverlay = curOvs + return vmd +} diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_accounts.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_accounts.go index 5ebf77e6..02a12f36 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_accounts.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_accounts.go @@ -38,6 +38,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dbinstanceaccount +// +onecloud:swagger-gen-model-plural=dbinstanceaccounts type SDBInstanceAccountManager struct { db.SStatusStandaloneResourceBaseManager SDBInstanceResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_backups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_backups.go index 8a3fbbe2..7ea0a527 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_backups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_backups.go @@ -37,6 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dbinstancebackup +// +onecloud:swagger-gen-model-plural=dbinstancebackups type SDBInstanceBackupManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_databases.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_databases.go index 123488b0..ea8a26d4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_databases.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_databases.go @@ -35,6 +35,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dbinstancedatabase +// +onecloud:swagger-gen-model-plural=dbinstancedatabases type SDBInstanceDatabaseManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_parameters.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_parameters.go index 45c7878d..ed45ccb6 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_parameters.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_parameters.go @@ -32,6 +32,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dbinstanceparameter +// +onecloud:swagger-gen-model-plural=dbinstanceparameters type SDBInstanceParameterManager struct { db.SStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_privileges.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_privileges.go index 0204c96f..002a2efc 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_privileges.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_privileges.go @@ -31,6 +31,8 @@ import ( "yunion.io/x/onecloud/pkg/mcclient" ) +// +onecloud:swagger-gen-model-singular=dbinstanceprivilege +// +onecloud:swagger-gen-model-plural=dbinstanceprivileges type SDBInstancePrivilegeManager struct { db.SResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_skus.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_skus.go index 8e68cc66..4f5cced1 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_skus.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstance_skus.go @@ -40,6 +40,8 @@ import ( "yunion.io/x/onecloud/pkg/util/yunionmeta" ) +// +onecloud:swagger-gen-model-singular=dbinstance_sku +// +onecloud:swagger-gen-model-plural=dbinstance_skus type SDBInstanceSkuManager struct { db.SEnabledStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstances.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstances.go index 6fdedf06..1a7a9047 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstances.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dbinstances.go @@ -53,6 +53,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dbinstance +// +onecloud:swagger-gen-model-plural=dbinstances type SDBInstanceManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/diskbackups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/diskbackups.go index fc5d9d85..e6ac1e84 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/diskbackups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/diskbackups.go @@ -36,6 +36,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=diskbackup +// +onecloud:swagger-gen-model-plural=diskbackups type SDiskBackupManager struct { db.SVirtualResourceBaseManager SDiskResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/disks.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/disks.go index 986ea8f0..f5658658 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/disks.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/disks.go @@ -115,6 +115,8 @@ type SDisk struct { // 文件系统 FsFormat string `width:"32" charset:"ascii" nullable:"true" list:"user" json:"fs_format"` + // 文件系统特性 + FsFeatures *api.DiskFsFeatures `length:"medium" nullable:"true" list:"user" json:"fs_features"` // 磁盘类型 // sys: 系统盘 @@ -414,7 +416,9 @@ func (self *SDisk) CustomizeCreate(ctx context.Context, userCred mcclient.TokenC if err := data.Unmarshal(input); err != nil { return errors.Wrap(err, "Unmarshal json") } - self.fetchDiskInfo(input.DiskConfig) + if err := self.fetchDiskInfo(input.DiskConfig); err != nil { + return errors.Wrap(err, "fetch disk info") + } err := self.SEncryptedResource.CustomizeCreate(ctx, userCred, ownerId, data, "disk-"+pinyinutils.Text2Pinyin(self.Name)) if err != nil { return errors.Wrap(err, "SEncryptedResource.CustomizeCreate") @@ -868,6 +872,9 @@ func (self *SDisk) StartAllocate(ctx context.Context, host *SHost, storage *SSto } if len(fsFormat) > 0 { input.FsFormat = fsFormat + if self.FsFeatures != nil { + input.FsFeatures = self.FsFeatures + } } if self.IsEncrypted() { var err error @@ -2220,7 +2227,7 @@ func parseIsoInfo(ctx context.Context, userCred mcclient.TokenCredential, imageI return image, nil } -func (self *SDisk) fetchDiskInfo(diskConfig *api.DiskConfig) { +func (self *SDisk) fetchDiskInfo(diskConfig *api.DiskConfig) error { if len(diskConfig.SnapshotId) > 0 { self.SnapshotId = diskConfig.SnapshotId self.DiskType = diskConfig.DiskType @@ -2241,6 +2248,12 @@ func (self *SDisk) fetchDiskInfo(diskConfig *api.DiskConfig) { if len(diskConfig.Fs) > 0 { self.FsFormat = diskConfig.Fs } + if diskConfig.FsFeatures != nil { + self.FsFeatures = diskConfig.FsFeatures + if self.FsFeatures.Ext4 != nil && self.FsFormat != "ext4" { + return httperrors.NewInputParameterError("only ext4 fs can set fs_features.ext4, current is %q", self.FsFormat) + } + } if self.FsFormat == "swap" { self.DiskType = api.DISK_TYPE_SWAP self.Nonpersistent = true @@ -2260,6 +2273,7 @@ func (self *SDisk) fetchDiskInfo(diskConfig *api.DiskConfig) { self.DiskFormat = diskConfig.Format self.DiskSize = diskConfig.SizeMb self.OsArch = diskConfig.OsArch + return nil } type DiskInfo struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsrecords.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsrecords.go index be9a1e85..eca50650 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsrecords.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsrecords.go @@ -44,6 +44,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=dnsrecord +// +onecloud:swagger-gen-model-plural=dnsrecords type SDnsRecordManager struct { db.SEnabledStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsvpcs.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsvpcs.go index aadb6d11..06d09505 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsvpcs.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/dnsvpcs.go @@ -21,6 +21,7 @@ import ( "yunion.io/x/onecloud/pkg/mcclient" ) +// +onecloud:swagger-gen-ignore type SDnsZoneVpcManager struct { db.SJointResourceBaseManager } @@ -43,6 +44,7 @@ func init() { }) } +// +onecloud:swagger-gen-ignore type SDnsZoneVpc struct { db.SJointResourceBase SDnsZoneResourceBase diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_accounts.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_accounts.go index 221085d6..d16840bc 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_accounts.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_accounts.go @@ -40,7 +40,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) -// SElasticcache.Account +// +onecloud:swagger-gen-model-singular=elasticcacheaccount +// +onecloud:swagger-gen-model-plural=elasticcacheaccounts type SElasticcacheAccountManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_acls.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_acls.go index 7205da15..37ca788e 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_acls.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_acls.go @@ -38,7 +38,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) -// SElasticcache.Acl +// +onecloud:swagger-gen-model-singular=elasticcacheacl +// +onecloud:swagger-gen-model-plural=elasticcacheacls type SElasticcacheAclManager struct { db.SStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_backups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_backups.go index 75ae48e9..97a09fbd 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_backups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_backups.go @@ -37,7 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) -// SElasticcache.Backup +// +onecloud:swagger-gen-model-singular=elasticcachebackup +// +onecloud:swagger-gen-model-plural=elasticcachebackups type SElasticcacheBackupManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_parameters.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_parameters.go index 133cad83..f9568efe 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_parameters.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_parameters.go @@ -34,7 +34,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) -// SElasticcache.Parameter +// +onecloud:swagger-gen-model-singular=elasticcacheparameter +// +onecloud:swagger-gen-model-plural=elasticcacheparameters type SElasticcacheParameterManager struct { db.SStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_skus.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_skus.go index a7eb4ab4..05e5fa78 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_skus.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticcache_skus.go @@ -42,6 +42,8 @@ import ( "yunion.io/x/onecloud/pkg/util/yunionmeta" ) +// +onecloud:swagger-gen-model-singular=elasticcachesku +// +onecloud:swagger-gen-model-plural=elasticcacheskus type SElasticcacheSkuManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticips.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticips.go index 65756a89..0ec56a36 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticips.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/elasticips.go @@ -49,6 +49,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=eip +// +onecloud:swagger-gen-model-plural=eips type SElasticipManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/external_projects.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/external_projects.go index 3183ba65..f870726a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/external_projects.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/external_projects.go @@ -45,6 +45,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=externalproject +// +onecloud:swagger-gen-model-plural=externalprojects type SExternalProjectManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/filesystem.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/filesystem.go index a787d6dd..96f3bf0f 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/filesystem.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/filesystem.go @@ -42,7 +42,7 @@ import ( ) type SFileSystemManager struct { - db.SVirtualResourceBaseManager + db.SSharableVirtualResourceBaseManager db.SExternalizedResourceBaseManager SManagedResourceBaseManager SCloudregionResourceBaseManager @@ -55,7 +55,7 @@ var FileSystemManager *SFileSystemManager func init() { FileSystemManager = &SFileSystemManager{ - SVirtualResourceBaseManager: db.NewVirtualResourceBaseManager( + SSharableVirtualResourceBaseManager: db.NewSharableVirtualResourceBaseManager( SFileSystem{}, "file_systems_tbl", "file_system", @@ -66,7 +66,7 @@ func init() { } type SFileSystem struct { - db.SVirtualResourceBase + db.SSharableVirtualResourceBase db.SExternalizedResourceBase SManagedResourceBase SBillingResourceBase @@ -111,9 +111,9 @@ func (manager *SFileSystemManager) ListItemFilter( query api.FileSystemListInput, ) (*sqlchemy.SQuery, error) { var err error - q, err = manager.SVirtualResourceBaseManager.ListItemFilter(ctx, q, userCred, query.VirtualResourceListInput) + q, err = manager.SSharableVirtualResourceBaseManager.ListItemFilter(ctx, q, userCred, query.SharableVirtualResourceListInput) if err != nil { - return nil, errors.Wrapf(err, "SVirtualResourceBaseManager.ListItemFilter") + return nil, errors.Wrapf(err, "SSharableVirtualResourceBaseManager.ListItemFilter") } q, err = manager.SExternalizedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ExternalizedResourceBaseListInput) if err != nil { @@ -192,7 +192,7 @@ func (man *SFileSystemManager) ValidateCreateData(ctx context.Context, userCred input.ExpiredAt = billingCycle.EndAt(tm) } - input.VirtualResourceCreateInput, err = man.SVirtualResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.VirtualResourceCreateInput) + input.SharableVirtualResourceCreateInput, err = man.SSharableVirtualResourceBaseManager.ValidateCreateData(ctx, userCred, ownerId, query, input.SharableVirtualResourceCreateInput) if err != nil { return input, err } @@ -200,7 +200,7 @@ func (man *SFileSystemManager) ValidateCreateData(ctx context.Context, userCred } func (fileSystem *SFileSystem) PostCreate(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, data jsonutils.JSONObject) { - fileSystem.SVirtualResourceBase.PostCreate(ctx, userCred, ownerId, query, data) + fileSystem.SSharableVirtualResourceBase.PostCreate(ctx, userCred, ownerId, query, data) fileSystem.StartCreateTask(ctx, userCred, jsonutils.GetAnyString(data, []string{"network_id"}), "") } @@ -231,15 +231,15 @@ func (manager SFileSystemManager) FetchCustomizeColumns( isList bool, ) []api.FileSystemDetails { rows := make([]api.FileSystemDetails, len(objs)) - virtRows := manager.SVirtualResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) + virtRows := manager.SSharableVirtualResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) regionRows := manager.SCloudregionResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) mRows := manager.SManagedResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) zoneIds := make([]string, len(objs)) for i := range rows { rows[i] = api.FileSystemDetails{ - VirtualResourceDetails: virtRows[i], - CloudregionResourceInfo: regionRows[i], - ManagedResourceInfo: mRows[i], + SharableVirtualResourceDetails: virtRows[i], + CloudregionResourceInfo: regionRows[i], + ManagedResourceInfo: mRows[i], } nas := objs[i].(*SFileSystem) zoneIds[i] = nas.ZoneId @@ -261,9 +261,9 @@ func (manager *SFileSystemManager) ListItemExportKeys(ctx context.Context, keys stringutils2.SSortedStrings, ) (*sqlchemy.SQuery, error) { var err error - q, err = manager.SVirtualResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) + q, err = manager.SSharableVirtualResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { - return nil, errors.Wrap(err, "SVirtualResourceBaseManager.ListItemExportKeys") + return nil, errors.Wrap(err, "SSharableVirtualResourceBaseManager.ListItemExportKeys") } q, err = manager.SCloudregionResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { @@ -275,7 +275,7 @@ func (manager *SFileSystemManager) ListItemExportKeys(ctx context.Context, func (manager *SFileSystemManager) QueryDistinctExtraField(q *sqlchemy.SQuery, field string) (*sqlchemy.SQuery, error) { var err error - q, err = manager.SVirtualResourceBaseManager.QueryDistinctExtraField(q, field) + q, err = manager.SSharableVirtualResourceBaseManager.QueryDistinctExtraField(q, field) if err == nil { return q, nil } @@ -299,9 +299,9 @@ func (manager *SFileSystemManager) OrderByExtraFields( ) (*sqlchemy.SQuery, error) { var err error - q, err = manager.SVirtualResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.VirtualResourceListInput) + q, err = manager.SSharableVirtualResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.SharableVirtualResourceListInput) if err != nil { - return nil, errors.Wrap(err, "SVirtualResourceBaseManager.OrderByExtraFields") + return nil, errors.Wrap(err, "SSharableVirtualResourceBaseManager.OrderByExtraFields") } q, err = manager.SManagedResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.ManagedResourceListInput) if err != nil { @@ -452,14 +452,14 @@ func (fileSystem *SFileSystem) RealDelete(ctx context.Context, userCred mcclient return errors.Wrapf(err, "mount target %s real delete", mts[i].DomainName) } } - return fileSystem.SVirtualResourceBase.Delete(ctx, userCred) + return fileSystem.SSharableVirtualResourceBase.Delete(ctx, userCred) } func (fileSystem *SFileSystem) ValidateDeleteCondition(ctx context.Context, info jsonutils.JSONObject) error { if fileSystem.DisableDelete.IsTrue() { return httperrors.NewInvalidStatusError("FileSystem is locked, cannot delete") } - return fileSystem.SVirtualResourceBase.ValidateDeleteCondition(ctx, nil) + return fileSystem.SSharableVirtualResourceBase.ValidateDeleteCondition(ctx, nil) } func (fileSystem *SFileSystem) SyncAllWithCloudFileSystem(ctx context.Context, userCred mcclient.TokenCredential, fs cloudprovider.ICloudFileSystem) error { @@ -589,6 +589,33 @@ func (fileSystem *SFileSystem) StartSyncstatus(ctx context.Context, userCred mcc return StartResourceSyncStatusTask(ctx, userCred, fileSystem, "FileSystemSyncstatusTask", parentTaskId) } +// 设置容量大小(CephFS) +func (fileSystem *SFileSystem) PerformSetQuota(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input *api.FileSystemSetQuotaInput) (jsonutils.JSONObject, error) { + if input.MaxFiles == nil && input.MaxGb == nil { + return nil, httperrors.NewMissingParameterError("max_gb") + } + var openTask = true + count, err := taskman.TaskManager.QueryTasksOfObject(fileSystem, time.Now().Add(-3*time.Minute), &openTask).CountWithError() + if err != nil { + return nil, err + } + if count > 0 { + return nil, httperrors.NewBadRequestError("Nas has %d task active, can't sync status", count) + } + + return nil, fileSystem.StartSetQuotaTask(ctx, userCred, input) +} + +func (fileSystem *SFileSystem) StartSetQuotaTask(ctx context.Context, userCred mcclient.TokenCredential, input *api.FileSystemSetQuotaInput) error { + params := jsonutils.Marshal(input).(*jsonutils.JSONDict) + task, err := taskman.TaskManager.NewTask(ctx, "FileSystemSetQuotaTask", fileSystem, userCred, params, "", "", nil) + if err != nil { + return err + } + fileSystem.SetStatus(ctx, userCred, api.NAS_STATUS_EXTENDING, "set quota") + return task.ScheduleRun(nil) +} + func (fileSystem *SFileSystem) GetIRegion(ctx context.Context) (cloudprovider.ICloudRegion, error) { provider, err := fileSystem.GetDriver(ctx) if err != nil { @@ -684,7 +711,7 @@ func (fileSystem *SFileSystem) OnMetadataUpdated(ctx context.Context, userCred m } func (fileSystem *SFileSystem) GetShortDesc(ctx context.Context) *jsonutils.JSONDict { - desc := fileSystem.SVirtualResourceBase.GetShortDesc(ctx) + desc := fileSystem.SSharableVirtualResourceBase.GetShortDesc(ctx) region, _ := fileSystem.GetRegion() provider := fileSystem.GetCloudprovider() info := MakeCloudProviderInfo(region, nil, provider) diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/globalvpcs.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/globalvpcs.go index 3dcc1883..6c068030 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/globalvpcs.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/globalvpcs.go @@ -40,6 +40,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=globalvpc +// +onecloud:swagger-gen-model-plural=globalvpcs type SGlobalVpcManager struct { db.SEnabledStatusInfrasResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/groups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/groups.go index 482049ec..a424e33c 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/groups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/groups.go @@ -46,6 +46,8 @@ const ( RDS_TYPE = "RDS" ) +// +onecloud:swagger-gen-model-singular=instancegroup +// +onecloud:swagger-gen-model-plural=instancegroups type SGroupManager struct { db.SVirtualResourceBaseManager db.SEnabledResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_actions.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_actions.go index 454b0b37..fad8c9ae 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_actions.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_actions.go @@ -29,6 +29,7 @@ import ( "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" + "yunion.io/x/pkg/gotypes" "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/util/billing" "yunion.io/x/pkg/util/httputils" @@ -55,6 +56,7 @@ import ( "yunion.io/x/onecloud/pkg/cloudcommon/policy" "yunion.io/x/onecloud/pkg/cloudcommon/userdata" "yunion.io/x/onecloud/pkg/cloudcommon/validators" + "yunion.io/x/onecloud/pkg/compute/baremetal" guestdriver_types "yunion.io/x/onecloud/pkg/compute/guestdrivers/types" "yunion.io/x/onecloud/pkg/compute/options" "yunion.io/x/onecloud/pkg/httperrors" @@ -588,7 +590,7 @@ func (self *SGuest) StartMigrateTask( data.Set("guest_status", jsonutils.NewString(guestStatus)) dedicateMigrateTask := "GuestMigrateTask" - if !utils.IsInStringArray(self.GetHypervisor(), []string{api.HYPERVISOR_KVM, api.HYPERVISOR_POD}) { + if len(self.ExternalId) > 0 { dedicateMigrateTask = "ManagedGuestMigrateTask" //托管私有云 } self.SetStatus(ctx, userCred, vmStatus, "") @@ -647,7 +649,7 @@ func (self *SGuest) StartGuestLiveMigrateTask( data.Set("guest_status", jsonutils.NewString(guestStatus)) dedicateMigrateTask := "GuestLiveMigrateTask" - if self.GetHypervisor() != api.HYPERVISOR_KVM { + if len(self.ExternalId) > 0 { dedicateMigrateTask = "ManagedGuestLiveMigrateTask" //托管私有云 } if task, err := taskman.TaskManager.NewTask(ctx, dedicateMigrateTask, self, userCred, data, parentTaskId, "", nil); err != nil { @@ -1558,14 +1560,18 @@ func (self *SGuest) StartGueststartTask( data *jsonutils.JSONDict, parentTaskId string, ) error { schedStart := self.Hypervisor == api.HYPERVISOR_KVM && self.guestDisksStorageTypeIsShared() + startFromCreate := false + if !gotypes.IsNil(data) { + startFromCreate = jsonutils.QueryBoolean(data, "start_from_create", false) + } if options.Options.IgnoreNonrunningGuests { host := HostManager.FetchHostById(self.HostId) - if host != nil && host.EnableNumaAllocate { + if !startFromCreate && host != nil && host.EnableNumaAllocate { schedStart = true } } - if self.CpuNumaPin != nil { + if !startFromCreate && self.CpuNumaPin != nil { // clean cpu numa pin err := self.SetCpuNumaPin(ctx, userCred, nil, nil) if err != nil { @@ -2308,17 +2314,43 @@ func (self *SGuest) AttachIsolatedDevices(ctx context.Context, userCred mcclient } } if dev.DevType == api.LEGACY_VGPU_TYPE { - devs, err := self.GetIsolatedDevices() + attachedGpus, err := self.GetIsolatedDevices() if err != nil { return errors.Wrap(err, "get isolated devices") } - for i := range devs { - if devs[i].DevType == api.LEGACY_VGPU_TYPE { + for i := range attachedGpus { + if attachedGpus[i].DevType == api.LEGACY_VGPU_TYPE { return httperrors.NewBadRequestError("Nvidia vgpu count exceed > 1") - } else if utils.IsInStringArray(devs[i].DevType, api.VALID_GPU_TYPES) { + } else if utils.IsInStringArray(attachedGpus[i].DevType, api.VALID_GPU_TYPES) { return httperrors.NewBadRequestError("Nvidia vgpu can't passthrough with other gpus") } } + } else if dev.DevType == api.CONTAINER_DEV_NVIDIA_MPS { + allDevs, err := IsolatedDeviceManager.GetUnusedDevsOnHost(host.Id, devModel, -1) + if err != nil { + return httperrors.NewInternalServerError("fetch gpu failed %s", err) + } + attachedGpus, err := self.GetIsolatedDevices() + if err != nil { + return httperrors.NewInternalServerError("get attached isolated devices %s", err) + } + attachedAddrs := map[string]struct{}{} + for i := range attachedGpus { + addr := strings.Split(attachedGpus[i].Addr, "-")[0] + attachedAddrs[addr] = struct{}{} + } + validDevs := []SIsolatedDevice{} + for i := range allDevs { + devAddr := strings.Split(allDevs[i].Addr, "-")[0] + if _, ok := attachedAddrs[devAddr]; ok { + continue + } + validDevs = append(validDevs, allDevs[i]) + } + if len(validDevs) < count { + return httperrors.NewInsufficientResourceError("require %d %s isolated device of host %s is not enough", count, devModel, host.GetName()) + } + devs = validDevs[:count] } unusedDevs = append(unusedDevs, devs...) } @@ -2863,10 +2895,22 @@ func (self *SGuest) PerformDetachnetwork( return nil, nil } -func (guest *SGuest) fixDefaultGateway(ctx context.Context, userCred mcclient.TokenCredential) error { +func (guest *SGuest) fixDefaultGatewayByNics(ctx context.Context, userCred mcclient.TokenCredential, nics []SGuestnetwork) (bool, error) { defaultGwCnt := 0 + for i := range nics { + if nics[i].Virtual || len(nics[i].TeamWith) > 0 { + continue + } + if nics[i].IsDefault { + defaultGwCnt++ + } + } + + if defaultGwCnt == 1 { + return false, nil + } + nicList := netutils2.SNicInfoList{} - nics, _ := guest.GetNetworks("") for i := range nics { if nics[i].Virtual || len(nics[i].TeamWith) > 0 { continue @@ -2874,22 +2918,24 @@ func (guest *SGuest) fixDefaultGateway(ctx context.Context, userCred mcclient.To net, _ := nics[i].GetNetwork() if net != nil { nicList = nicList.Add(nics[i].IpAddr, nics[i].MacAddr, net.GuestGateway) - if nics[i].IsDefault { - defaultGwCnt++ - } } } - if defaultGwCnt != 1 { - gwMac, _ := nicList.FindDefaultNicMac() - if gwMac != "" { - err := guest.setDefaultGateway(ctx, userCred, gwMac) - if err != nil { - log.Errorf("setDefaultGateway fail %s", err) - return errors.Wrap(err, "setDefaultGateway") - } + + gwMac, _ := nicList.FindDefaultNicMac() + if gwMac != "" { + err := guest.setDefaultGateway(ctx, userCred, gwMac) + if err != nil { + log.Errorf("setDefaultGateway fail %s", err) + return true, errors.Wrap(err, "setDefaultGateway") } } - return nil + return true, nil +} + +func (guest *SGuest) fixDefaultGateway(ctx context.Context, userCred mcclient.TokenCredential) error { + nics, _ := guest.GetNetworks("") + _, err := guest.fixDefaultGatewayByNics(ctx, userCred, nics) + return err } // 挂载网卡 @@ -3176,6 +3222,9 @@ func (self *SGuest) PerformChangeConfig(ctx context.Context, userCred mcclient.T if added := confs.AddedCpu(); added > 0 { pendingUsage.Cpu = added } + if added := confs.AddedExtraCpu(); added > 0 { + pendingUsage.Cpu += added + } if added := confs.AddedMem(); added > 0 { pendingUsage.Memory = added } @@ -3199,7 +3248,7 @@ func (self *SGuest) PerformChangeConfig(ctx context.Context, userCred mcclient.T return nil, nil } -func (self *SGuest) ChangeConfToSchedDesc(addCpu, addMem int, schedInputDisks []*api.DiskConfig) *schedapi.ScheduleInput { +func (self *SGuest) ChangeConfToSchedDesc(addCpu, addExtraCpu, addMem int, schedInputDisks []*api.DiskConfig) *schedapi.ScheduleInput { region, _ := self.GetRegion() devs, _ := self.GetIsolatedDevices() desc := &schedapi.ScheduleInput{ @@ -3218,6 +3267,7 @@ func (self *SGuest) ChangeConfToSchedDesc(addCpu, addMem int, schedInputDisks [] OsArch: self.OsArch, ChangeConfig: true, HasIsolatedDevice: len(devs) > 0, + ExtraCpuCount: addExtraCpu, } return desc } @@ -6535,3 +6585,16 @@ func (self *SGuest) PerformSyncOsInfo(ctx context.Context, userCred mcclient.Tok return nil, self.startQgaSyncOsInfoTask(ctx, userCred, "") } } + +func (g *SGuest) PerformSetRootDiskMatcher(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, data *api.BaremetalRootDiskMatcher) (jsonutils.JSONObject, error) { + if g.GetHypervisor() != api.HYPERVISOR_BAREMETAL { + return nil, httperrors.NewNotAcceptableError("only %s support for setting root disk matcher", api.HYPERVISOR_BAREMETAL) + } + if err := baremetal.ValidateRootDiskMatcher(data); err != nil { + return nil, err + } + if err := g.SetMetadata(ctx, api.BAREMETAL_SERVER_METATA_ROOT_DISK_MATCHER, jsonutils.Marshal(data), userCred); err != nil { + return nil, errors.Wrapf(err, "set %s", api.BAREMETAL_SERVER_METATA_ROOT_DISK_MATCHER) + } + return nil, nil +} diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_queries.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_queries.go index 6b292053..64ffff5a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_queries.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/guest_queries.go @@ -824,6 +824,7 @@ func fetchGuestIsolatedDevices(guestIds []string) map[string][]api.SIsolatedDevi dev.GuestId = devs[i].GuestId dev.Addr = devs[i].Addr dev.VendorDeviceId = devs[i].VendorDeviceId + dev.NumaNode = byte(devs[i].NumaNode) gdevs, ok := ret[devs[i].GuestId] if !ok { gdevs = make([]api.SIsolatedDevice, 0) diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/guests.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/guests.go index 9186ef2b..2d1388d1 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/guests.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/guests.go @@ -129,6 +129,8 @@ type SGuest struct { VmemSize int `nullable:"false" list:"user" create:"required"` // CPU 内存绑定信息 CpuNumaPin jsonutils.JSONObject `nullable:"true" get:"user" update:"user" create:"optional"` + // 额外分配的 CPU 数量 + ExtraCpuCount int `nullable:"false" default:"0" list:"user" create:"optional"` // 启动顺序 BootOrder string `width:"8" charset:"ascii" nullable:"true" default:"cdn" list:"user" update:"user" create:"optional"` @@ -1324,8 +1326,9 @@ func (guest *SGuest) SetCpuNumaPin( vcpuId := 0 for i := range schedCpuNumaPin { cpuNumaPin[i] = api.SCpuNumaPin{ - SizeMB: schedCpuNumaPin[i].MemSizeMB, - NodeId: schedCpuNumaPin[i].NodeId, + SizeMB: schedCpuNumaPin[i].MemSizeMB, + NodeId: schedCpuNumaPin[i].NodeId, + ExtraCpuCount: schedCpuNumaPin[i].ExtraCpuCount, } if len(schedCpuNumaPin[i].CpuPin) > 0 { @@ -3083,17 +3086,21 @@ func (self *SGuest) SyncRemoveCloudVM(ctx context.Context, userCred mcclient.Tok } return q }) - if err == nil { - _, err = db.Update(self, func() error { - self.HostId = host.GetId() - self.Status = iVM.GetStatus() - self.PowerStates = iVM.GetPowerStates() - self.InferPowerStates() - return nil - }) - return err + if err != nil { + log.Errorf("fetch vm %s(%s) host by id %s error: %v", self.Name, self.ExternalId, hostId, err) + return nil } + _, err = db.Update(self, func() error { + self.HostId = host.GetId() + self.Status = iVM.GetStatus() + self.PowerStates = iVM.GetPowerStates() + self.InferPowerStates() + return nil + }) + return err } + // 公有云实例, 因为翻页查询导致实例返回结果漏查,且GetIHostId一般返回为空 + return nil } else if errors.Cause(err) != cloudprovider.ErrNotFound { return errors.Wrap(err, "GetIVMById") } @@ -3147,7 +3154,7 @@ func (guest *SGuest) SyncAllWithCloudVM(ctx context.Context, userCred mcclient.T return errors.Wrap(err, "guest.syncWithCloudVM") } - syncVMPeripherals(ctx, userCred, guest, extVM, host, provider, driver) + SyncVMPeripherals(ctx, userCred, guest, extVM, host, provider, driver) return nil } @@ -4318,7 +4325,7 @@ func (self *SGuest) allocSriovNicDevice( } netConfig.SriovDevice.NetworkIndex = &gn.Index netConfig.SriovDevice.WireId = net.WireId - err = self.createIsolatedDeviceOnHost(ctx, userCred, host, netConfig.SriovDevice, pendingUsageZone, nil) + err = self.createIsolatedDeviceOnHost(ctx, userCred, host, netConfig.SriovDevice, pendingUsageZone, nil, nil) if err != nil { return errors.Wrap(err, "self.createIsolatedDeviceOnHost") } @@ -4507,7 +4514,7 @@ func (self *SGuest) attachNVMEDevice( ) error { gd := self.GetGuestDisk(disk.Id) diskConfig.NVMEDevice.DiskIndex = &gd.Index - err := self.createIsolatedDeviceOnHost(ctx, userCred, host, diskConfig.NVMEDevice, pendingUsage, nil) + err := self.createIsolatedDeviceOnHost(ctx, userCred, host, diskConfig.NVMEDevice, pendingUsage, nil, nil) if err != nil { return errors.Wrap(err, "self.createIsolatedDeviceOnHost") } @@ -4660,12 +4667,23 @@ func (self *SGuest) createDiskOnHost( } func (self *SGuest) CreateIsolatedDeviceOnHost(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, devs []*api.IsolatedDeviceConfig, pendingUsage quotas.IQuota) error { + var numaNodes []int + if self.CpuNumaPin != nil { + numaNodes = make([]int, 0) + cpuNumaPin := make([]schedapi.SCpuNumaPin, 0) + self.CpuNumaPin.Unmarshal(&cpuNumaPin) + + for i := range cpuNumaPin { + numaNodes = append(numaNodes, cpuNumaPin[i].NodeId) + } + } + usedDeviceMap := map[string]*SIsolatedDevice{} for _, devConfig := range devs { if devConfig.DevType == api.NIC_TYPE || devConfig.DevType == api.NVME_PT_TYPE { continue } - err := self.createIsolatedDeviceOnHost(ctx, userCred, host, devConfig, pendingUsage, usedDeviceMap) + err := self.createIsolatedDeviceOnHost(ctx, userCred, host, devConfig, pendingUsage, usedDeviceMap, numaNodes) if err != nil { return err } @@ -4673,11 +4691,11 @@ func (self *SGuest) CreateIsolatedDeviceOnHost(ctx context.Context, userCred mcc return nil } -func (self *SGuest) createIsolatedDeviceOnHost(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, devConfig *api.IsolatedDeviceConfig, pendingUsage quotas.IQuota, usedDevMap map[string]*SIsolatedDevice) error { +func (self *SGuest) createIsolatedDeviceOnHost(ctx context.Context, userCred mcclient.TokenCredential, host *SHost, devConfig *api.IsolatedDeviceConfig, pendingUsage quotas.IQuota, usedDevMap map[string]*SIsolatedDevice, preferNumaNodes []int) error { lockman.LockClass(ctx, QuotaManager, self.ProjectId) defer lockman.ReleaseClass(ctx, QuotaManager, self.ProjectId) - err := IsolatedDeviceManager.attachHostDeviceToGuestByDesc(ctx, self, host, devConfig, userCred, usedDevMap) + err := IsolatedDeviceManager.attachHostDeviceToGuestByDesc(ctx, self, host, devConfig, userCred, usedDevMap, preferNumaNodes) if err != nil { return err } @@ -5185,6 +5203,8 @@ func (self *SGuest) GetJsonDescAtHypervisor(ctx context.Context, host *SHost) *a LightMode: self.RescueMode, Hypervisor: self.GetHypervisor(), + + EnableEsxiSwap: options.Options.EnableEsxiSwap, } if len(self.BackupHostId) > 0 { @@ -5222,6 +5242,10 @@ func (self *SGuest) GetJsonDescAtHypervisor(ctx context.Context, host *SHost) *a // nics, domain desc.Domain = options.Options.DNSDomain nics, _ := self.GetNetworks("") + changed, _ := self.fixDefaultGatewayByNics(ctx, auth.AdminCredential(), nics) + if changed { + nics, _ = self.GetNetworks("") + } for _, nic := range nics { nicDesc := nic.getJsonDescAtHost(ctx, host) desc.Nics = append(desc.Nics, nicDesc) @@ -5331,6 +5355,7 @@ func (self *SGuest) GetJsonDescAtBaremetal(ctx context.Context, host *SHost) *ap desc.DiskConfig = host.getDiskConfig() + self.fixDefaultGateway(ctx, auth.AdminCredential()) netifs := host.GetAllNetInterfaces() desc.Domain = options.Options.DNSDomain @@ -6249,6 +6274,7 @@ func (self *SGuest) ToSchedDesc() *schedapi.ScheduleInput { config.Hypervisor = self.GetHypervisor() desc.ServerConfig = *config desc.OsArch = self.OsArch + desc.ExtraCpuCount = self.ExtraCpuCount return desc } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/hosts.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/hosts.go index 16666e82..b51d07ad 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/hosts.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/hosts.go @@ -2632,6 +2632,94 @@ func (self *SGuest) Purge(ctx context.Context, userCred mcclient.TokenCredential return self.purge(ctx, userCred) } +func (hh *SHost) GetIsolateDevices() ([]SIsolatedDevice, error) { + q := IsolatedDeviceManager.Query().Equals("host_id", hh.Id) + ret := []SIsolatedDevice{} + err := db.FetchModelObjects(IsolatedDeviceManager, q, &ret) + if err != nil { + return nil, err + } + return ret, nil +} + +func (hh *SHost) SyncHostIsolateDevices(ctx context.Context, userCred mcclient.TokenCredential, iprovider cloudprovider.ICloudProvider, devs []cloudprovider.IsolateDevice, syncOwnerId mcclient.IIdentityProvider, xor bool) compare.SyncResult { + lockman.LockRawObject(ctx, IsolatedDeviceManager.Keyword(), hh.Id) + defer lockman.ReleaseRawObject(ctx, IsolatedDeviceManager.Keyword(), hh.Id) + + result := compare.SyncResult{} + + dbDevs, err := hh.GetIsolateDevices() + if err != nil { + result.Error(errors.Wrapf(err, "GetIsolateDevices")) + return result + } + + removed := make([]SIsolatedDevice, 0) + commondb := make([]SIsolatedDevice, 0) + commonext := make([]cloudprovider.IsolateDevice, 0) + added := make([]cloudprovider.IsolateDevice, 0) + duplicated := make(map[string][]cloudprovider.IsolateDevice) + + err = compare.CompareSets2(dbDevs, devs, &removed, &commondb, &commonext, &added, &duplicated) + if err != nil { + result.Error(err) + return result + } + + for i := 0; i < len(removed); i += 1 { + err := removed[i].Delete(ctx, userCred) + if err != nil { + result.DeleteError(err) + continue + } + result.Delete() + } + + if !xor { + for i := 0; i < len(commondb); i += 1 { + err := commondb[i].syncWithCloudIsolateDevice(ctx, userCred, commonext[i]) + if err != nil { + result.UpdateError(err) + continue + } + result.Update() + } + } + + for i := 0; i < len(added); i += 1 { + err := hh.newIsolateDevice(ctx, userCred, added[i]) + if err != nil { + result.AddError(err) + continue + } + result.Add() + } + + if len(duplicated) > 0 { + errs := make([]error, 0) + for k, vms := range duplicated { + errs = append(errs, errors.Wrapf(errors.ErrDuplicateId, "Duplicate Id %s (%d)", k, len(vms))) + } + result.AddError(errors.NewAggregate(errs)) + } + + return result +} + +func (hh *SHost) newIsolateDevice(ctx context.Context, userCred mcclient.TokenCredential, dev cloudprovider.IsolateDevice) error { + ret := &SIsolatedDevice{} + ret.SetModelManager(IsolatedDeviceManager, ret) + ret.HostId = hh.Id + ret.ExternalId = dev.GetGlobalId() + ret.Name = dev.GetName() + ret.Model = dev.GetModel() + ret.Addr = dev.GetAddr() + ret.DevType = dev.GetDevType() + ret.NumaNode = dev.GetNumaNode() + ret.VendorDeviceId = dev.GetVendorDeviceId() + return IsolatedDeviceManager.TableSpec().Insert(ctx, ret) +} + func (hh *SHost) SyncHostVMs(ctx context.Context, userCred mcclient.TokenCredential, iprovider cloudprovider.ICloudProvider, vms []cloudprovider.ICloudVM, syncOwnerId mcclient.IIdentityProvider, xor bool) ([]SGuestSyncResult, compare.SyncResult) { lockman.LockRawObject(ctx, GuestManager.Keyword(), hh.Id) defer lockman.ReleaseRawObject(ctx, GuestManager.Keyword(), hh.Id) @@ -3679,6 +3767,17 @@ func (manager *SHostManager) FetchCustomizeColumns( if devs, ok := isolatedDeviceMap[hostIds[i]]; ok { rows[i].IsolatedDeviceCount = len(devs) + for j := range devs { + dev := devs[j] + if rows[i].IsolatedDeviceTypeCount == nil { + rows[i].IsolatedDeviceTypeCount = make(map[string]int, 0) + } + if cnt, ok := rows[i].IsolatedDeviceTypeCount[dev.DevType]; ok { + rows[i].IsolatedDeviceTypeCount[dev.DevType] = cnt + 1 + } else { + rows[i].IsolatedDeviceTypeCount[dev.DevType] = 1 + } + } rows[i].ReservedResourceForGpu = hosts[i].GetDevsReservedResource(devs) } @@ -4644,13 +4743,34 @@ func (hh *SHost) StartSyncAllGuestsStatusTask(ctx context.Context, userCred mccl } } +func (hh *SHost) GetStoragesByMasterHost() ([]string, error) { + sq := StorageManager.Query() + sq = sq.In("storage_type", api.SHARED_STORAGE) + sq = sq.Filter(sqlchemy.OR(sqlchemy.Equals(sq.Field("master_host"), hh.Id), sqlchemy.IsNullOrEmpty(sq.Field("master_host")))) + subq := sq.SubQuery() + hsq := HoststorageManager.Query().Equals("host_id", hh.Id) + hsq = hsq.Join(subq, sqlchemy.Equals(subq.Field("id"), hsq.Field("storage_id"))) + + hostStorages := make([]SHoststorage, 0) + if err := hsq.All(&hostStorages); err != nil && err != sql.ErrNoRows { + return nil, errors.Wrap(err, "get hostStorages") + } else if err == sql.ErrNoRows { + return nil, nil + } + storages := make([]string, len(hostStorages)) + for i := range storages { + storages[i] = hostStorages[i].StorageId + } + return storages, nil +} + func (hh *SHost) PerformPing(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.SHostPingInput) (jsonutils.JSONObject, error) { if hh.HostType == api.HOST_TYPE_BAREMETAL { return nil, httperrors.NewNotSupportedError("ping host type %s not support", hh.HostType) } if input.WithData { // piggyback storage stats info - log.Debugf("host ping %s", jsonutils.Marshal(input)) + log.Debugf("host ping %#v", input) for _, si := range input.StorageStats { storageObj, err := StorageManager.FetchById(si.StorageId) if err != nil { @@ -4696,12 +4816,17 @@ func (hh *SHost) PerformPing(ctx context.Context, userCred mcclient.TokenCredent } result := jsonutils.NewDict() result.Set("name", jsonutils.NewString(hh.GetName())) - dependSvcs := []string{"ntpd", "kafka", apis.SERVICE_TYPE_INFLUXDB, apis.SERVICE_TYPE_VICTORIA_METRICS, "elasticsearch"} + dependSvcs := []string{"ntpd", "kafka", apis.SERVICE_TYPE_INFLUXDB, apis.SERVICE_TYPE_VICTORIA_METRICS, "elasticsearch", "opentsdb"} catalog := auth.GetCatalogData(dependSvcs, options.Options.Region) if catalog == nil { return nil, fmt.Errorf("Get catalog error") } result.Set("catalog", catalog) + if storages, err := hh.GetStoragesByMasterHost(); err != nil { + return nil, err + } else { + result.Set("master_host_storages", jsonutils.NewStringArray(storages)) + } appParams := appsrv.AppContextGetParams(ctx) if appParams != nil { @@ -4713,6 +4838,50 @@ func (hh *SHost) PerformPing(ctx context.Context, userCred mcclient.TokenCredent return result, nil } +func (host *SHost) getHostNodeReservePercent(reservedCpusStr string) (map[string]float32, error) { + reservedCpuset, err := cpuset.Parse(reservedCpusStr) + if err != nil { + return nil, errors.Wrap(err, "cpuset parse reserved cpus") + } + + topoObj, err := host.SysInfo.Get("topology") + if err != nil { + return nil, errors.Wrap(err, "get topology from host sys_info") + } + info := new(hostapi.HostTopology) + if err := topoObj.Unmarshal(info); err != nil { + return nil, errors.Wrap(err, "Unmarshal host topology struct") + } + nodecpus := map[int]int{} + nodeReservedCpus := map[int]int{} + for i := range info.Nodes { + cSet := cpuset.NewBuilder() + for j := 0; j < len(info.Nodes[i].Cores); j++ { + for k := 0; k < len(info.Nodes[i].Cores[j].LogicalProcessors); k++ { + if reservedCpuset.Contains(info.Nodes[i].Cores[j].LogicalProcessors[k]) { + if cnt, ok := nodeReservedCpus[info.Nodes[i].ID]; !ok { + nodeReservedCpus[info.Nodes[i].ID] = 1 + } else { + nodeReservedCpus[info.Nodes[i].ID] = 1 + cnt + } + } + + cSet.Add(info.Nodes[i].Cores[j].LogicalProcessors[k]) + } + } + nodecpus[info.Nodes[i].ID] = cSet.Result().Size() + } + reserveRate := map[string]float32{} + for nodeId, cnt := range nodecpus { + reserveCnt, ok := nodeReservedCpus[nodeId] + if !ok { + reserveCnt = 0 + } + reserveRate[strconv.Itoa(nodeId)] = float32(reserveCnt) / float32(cnt) + } + return reserveRate, nil +} + func (host *SHost) getHostLogicalCores() ([]int, error) { cpuObj, err := host.SysInfo.Get("cpu_info") if err != nil { @@ -4768,14 +4937,6 @@ func (hh *SHost) PerformReserveCpus( return nil, httperrors.NewNotSupportedError("host type %s not support reserve cpus", hh.HostType) } - cnt, err := hh.GetRunningGuestCount() - if err != nil { - return nil, err - } - if cnt > 0 { - return nil, httperrors.NewBadRequestError("host %s has %d guests, can't update reserve cpus", hh.Id, cnt) - } - if input.Cpus == "" { return nil, httperrors.NewInputParameterError("missing cpus") } @@ -4810,11 +4971,27 @@ func (hh *SHost) PerformReserveCpus( } } + if len(input.Cpus) > 0 { + reservePercent, err := hh.getHostNodeReservePercent(input.Cpus) + if err != nil { + return nil, errors.Errorf("failed getHostNodeReservePercent: %s", err) + } + err = hh.SetMetadata(ctx, api.HOSTMETA_RESERVED_CPUS_RATE, reservePercent, userCred) + if err != nil { + return nil, err + } + } else { + err = hh.RemoveMetadata(ctx, api.HOSTMETA_RESERVED_CPUS_RATE, userCred) + if err != nil { + return nil, err + } + } + err = hh.SetMetadata(ctx, api.HOSTMETA_RESERVED_CPUS_INFO, input, userCred) if err != nil { return nil, err } - if hh.CpuReserved < cs.Size() { + if hh.CpuReserved != cs.Size() { _, err = db.Update(hh, func() error { hh.CpuReserved = cs.Size() return nil diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/hostwires.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/hostwires.go index fad1c747..809ca5f4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/hostwires.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/hostwires.go @@ -59,7 +59,7 @@ type SHostwireDeprecated struct { Bridge string `width:"64" charset:"ascii" nullable:"false" list:"domain" update:"domain" create:"domain_required"` // 接口名称 - Interface string `width:"16" charset:"ascii" nullable:"false" list:"domain" update:"domain" create:"domain_required"` + Interface string `width:"64" charset:"ascii" nullable:"false" list:"domain" update:"domain" create:"domain_required"` // 是否是主地址 IsMaster bool `nullable:"true" default:"false" list:"domain" update:"domain" create:"domain_optional"` // MAC地址 diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/instance_backup.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/instance_backup.go index 81125f7f..3d23855e 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/instance_backup.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/instance_backup.go @@ -77,6 +77,8 @@ type SInstanceBackup struct { SizeMb int `nullable:"false" list:"user"` } +// +onecloud:swagger-gen-model-singular=instancebackup +// +onecloud:swagger-gen-model-plural=instancebackups type SInstanceBackupManager struct { db.SVirtualResourceBaseManager SManagedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/isolated_devices.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/isolated_devices.go index e5b4ea99..18c743b8 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/isolated_devices.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/isolated_devices.go @@ -18,11 +18,14 @@ import ( "context" "database/sql" "fmt" + "math" "reflect" "sort" + "strconv" "strings" "time" + "yunion.io/x/cloudmux/pkg/cloudprovider" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" @@ -33,6 +36,7 @@ import ( "yunion.io/x/sqlchemy" api "yunion.io/x/onecloud/pkg/apis/compute" + hostapi "yunion.io/x/onecloud/pkg/apis/host" "yunion.io/x/onecloud/pkg/apis/notify" "yunion.io/x/onecloud/pkg/cloudcommon/consts" "yunion.io/x/onecloud/pkg/cloudcommon/db" @@ -64,6 +68,7 @@ var VENDOR_ID_MAP = api.VENDOR_ID_MAP type SIsolatedDeviceManager struct { db.SStandaloneResourceBaseManager + db.SExternalizedResourceBaseManager SHostResourceBaseManager } @@ -87,6 +92,7 @@ func init() { type SIsolatedDevice struct { db.SStandaloneResourceBase + db.SExternalizedResourceBase SHostResourceBase `width:"36" charset:"ascii" nullable:"false" default:"" index:"true" list:"domain" create:"domain_required"` // # PCI / GPU-HPC / GPU-VGA / USB / NIC @@ -309,6 +315,10 @@ func (manager *SIsolatedDeviceManager) ListItemFilter( if err != nil { return nil, errors.Wrap(err, "SHostResourceBaseManager.ListItemFilter") } + q, err = manager.SExternalizedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ExternalizedResourceBaseListInput) + if err != nil { + return nil, errors.Wrap(err, "SExternalizedResourceBaseManager.ListItemFilter") + } if query.Gpu != nil && *query.Gpu { q = q.Startswith("dev_type", "GPU") @@ -334,7 +344,7 @@ func (manager *SIsolatedDeviceManager) ListItemFilter( } if !query.ShowBaremetalIsolatedDevices { - sq := HostManager.Query("id").In("host_type", []string{api.HOST_TYPE_HYPERVISOR, api.HOST_TYPE_CONTAINER}).SubQuery() + sq := HostManager.Query("id").In("host_type", []string{api.HOST_TYPE_HYPERVISOR, api.HOST_TYPE_CONTAINER, api.HOST_TYPE_ZETTAKIT}).SubQuery() q = q.In("host_id", sq) } @@ -469,6 +479,17 @@ func (self *SIsolatedDevice) getVendor() string { } } +func GetVendorByVendorDeviceId(vendorDeviceId string) string { + parts := strings.Split(vendorDeviceId, ":") + vendorId := parts[0] + vendor, ok := ID_VENDOR_MAP[vendorId] + if ok { + return vendor + } else { + return vendorId + } +} + func (self *SIsolatedDevice) IsGPU() bool { return strings.HasPrefix(self.DevType, "GPU") || sets.NewString(api.CONTAINER_GPU_TYPES...).Has(self.DevType) } @@ -562,13 +583,16 @@ func (manager *SIsolatedDeviceManager) _isValidDeviceInfo(config *api.IsolatedDe return nil } -func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByDesc(ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice) error { +func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByDesc( + ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, + userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice, preferNumaNodes []int, +) error { if len(devConfig.Id) > 0 { return manager.attachSpecificDeviceToGuest(ctx, guest, devConfig, userCred) } else if len(devConfig.DevicePath) > 0 { - return manager.attachHostDeviceToGuestByDevicePath(ctx, guest, host, devConfig, userCred, usedDevMap) + return manager.attachHostDeviceToGuestByDevicePath(ctx, guest, host, devConfig, userCred, usedDevMap, preferNumaNodes) } else { - return manager.attachHostDeviceToGuestByModel(ctx, guest, host, devConfig, userCred, usedDevMap) + return manager.attachHostDeviceToGuestByModel(ctx, guest, host, devConfig, userCred, usedDevMap, preferNumaNodes) } } @@ -584,7 +608,7 @@ func (manager *SIsolatedDeviceManager) attachSpecificDeviceToGuest(ctx context.C return guest.attachIsolatedDevice(ctx, userCred, dev, devConfig.NetworkIndex, devConfig.DiskIndex) } -func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByDevicePath(ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice) error { +func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByDevicePath(ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice, preferNumaNodes []int) error { if len(devConfig.Model) == 0 || len(devConfig.DevicePath) == 0 { return fmt.Errorf("Model or DevicePath is empty: %#v", devConfig) } @@ -601,7 +625,7 @@ func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByDevicePath(ctx c } } if selectedDev.Id == "" { - return fmt.Errorf("Can't found unused model %s device_path %s on host %s", devConfig.Model, devConfig.DevicePath, host.Id) + selectedDev = devs[0] } return guest.attachIsolatedDevice(ctx, userCred, &selectedDev, devConfig.NetworkIndex, devConfig.DiskIndex) } @@ -636,7 +660,125 @@ func (pq *SorttedGroupDevs) Pop() interface{} { return item } -func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByModel(ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice) error { +type SNodeIsolateDevicesInfo struct { + TotalDevCount int + ReservedRate float32 +} + +func (manager *SIsolatedDeviceManager) getDevNodesUsedRate( + ctx context.Context, host *SHost, devConfig *api.IsolatedDeviceConfig, topo *hostapi.HostTopology, +) (map[string]SNodeIsolateDevicesInfo, error) { + devs, err := manager.findHostDevsByDevConfig(devConfig.Model, devConfig.DevType, host.Id, devConfig.WireId) + if err != nil || len(devs) == 0 { + return nil, fmt.Errorf("Can't found model %s on host %s", devConfig.Model, host.Id) + } + mapDevs := map[string][]SIsolatedDevice{} + for i := range devs { + dev := devs[i] + devPath := dev.DevicePath + var gdevs []SIsolatedDevice + + gdevs, ok := mapDevs[devPath] + if !ok { + gdevs = []SIsolatedDevice{dev} + } else { + gdevs = append(gdevs, dev) + } + mapDevs[devPath] = gdevs + } + nodesGroupDevs := map[string]SorttedGroupDevs{} + for devPath, mappedDevs := range mapDevs { + numaNode := strconv.Itoa(int(mappedDevs[0].NumaNode)) + if _, ok := nodesGroupDevs[numaNode]; ok { + nodesGroupDevs[numaNode] = append(nodesGroupDevs[numaNode], &GroupDevs{ + DevPath: devPath, + Devs: mappedDevs, + }) + } else { + groupDevs := make(SorttedGroupDevs, 0) + nodesGroupDevs[numaNode] = append(groupDevs, &GroupDevs{ + DevPath: devPath, + Devs: mappedDevs, + }) + } + } + + reserveRate := map[string]float32{} + reserveRateStr := host.GetMetadata(ctx, api.HOSTMETA_RESERVED_CPUS_RATE, nil) + reserveRateJ, err := jsonutils.ParseString(reserveRateStr) + if err != nil { + return nil, errors.Wrap(err, "parse reserveRateStr") + } + err = reserveRateJ.Unmarshal(&reserveRate) + if err != nil { + return nil, errors.Wrap(err, "unmarshal reserveRateStr") + } + + nodeNoDevIds := map[int]int{} + for i := range topo.Nodes { + nodeId := strconv.Itoa(topo.Nodes[i].ID) + if _, ok := nodesGroupDevs[nodeId]; !ok { + nodeInt, _ := strconv.Atoi(nodeId) + nodeNoDevIds[nodeInt] = -1 + } + } + // + //for nodeId, _ := range reserveRate { + // if _, ok := nodesGroupDevs[nodeId]; !ok { + // nodeInt, _ := strconv.Atoi(nodeId) + // nodeNoDevIds[nodeInt] = -1 + // } + //} + + reserveNodes := map[string][]string{} + for i := range topo.Nodes { + if _, ok := nodeNoDevIds[topo.Nodes[i].ID]; ok { + minDistance := int(math.MaxInt16) + selectNodeId := "" + for nodeId, _ := range nodesGroupDevs { + nodeInt, _ := strconv.Atoi(nodeId) + if topo.Nodes[i].Distances[nodeInt] < minDistance { + selectNodeId = strconv.Itoa(nodeInt) + minDistance = topo.Nodes[i].Distances[nodeInt] + } + } + noDevNodeId := strconv.Itoa(topo.Nodes[i].ID) + log.Debugf("node %s select node %s", noDevNodeId, selectNodeId) + if nodes, ok := reserveNodes[selectNodeId]; ok { + reserveNodes[selectNodeId] = append(nodes, noDevNodeId) + } else { + reserveNodes[selectNodeId] = []string{noDevNodeId} + } + } + } + reserveRates := map[string]SNodeIsolateDevicesInfo{} + for nodeId, devGroups := range nodesGroupDevs { + nodeCnt := 1 + nodeReserveRate := reserveRate[nodeId] + if nodes, ok := reserveNodes[nodeId]; ok { + for i := range nodes { + nodeReserveRate += reserveRate[nodes[i]] + nodeCnt += 1 + } + } + nodeReserveRate = nodeReserveRate / float32(nodeCnt) + devCnt := 0 + for i := range devGroups { + devCnt += len(devGroups[i].Devs) + } + reserveRates[nodeId] = SNodeIsolateDevicesInfo{ + TotalDevCount: devCnt, + ReservedRate: nodeReserveRate, + } + log.Debugf("node %v nodeCnt %v nodeReserveRate %v", nodeId, nodeCnt, nodeReserveRate) + } + return reserveRates, nil +} + +func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByModel( + ctx context.Context, guest *SGuest, host *SHost, devConfig *api.IsolatedDeviceConfig, + userCred mcclient.TokenCredential, usedDevMap map[string]*SIsolatedDevice, preferNumaNodes []int, +) error { if len(devConfig.Model) == 0 { return fmt.Errorf("Not found model from info: %#v", devConfig) } @@ -645,8 +787,8 @@ func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByModel(ctx contex if err != nil || len(devs) == 0 { return fmt.Errorf("Can't found model %s on host %s", devConfig.Model, host.Id) } - // 1. group devices by device_path - groupDevs := make(SorttedGroupDevs, 0) + // 1. group devices by device_path and numa nodes + //groupDevs := make(SorttedGroupDevs, 0) mapDevs := map[string][]SIsolatedDevice{} for i := range devs { dev := devs[i] @@ -661,41 +803,187 @@ func (manager *SIsolatedDeviceManager) attachHostDeviceToGuestByModel(ctx contex } mapDevs[devPath] = gdevs } - for devPath, mappedDevs := range mapDevs { - groupDevs = append(groupDevs, &GroupDevs{ - DevPath: devPath, - Devs: mappedDevs, - }) - } - sort.Sort(groupDevs) - var preferNumaNode int8 = -1 - for _, dev := range usedDevMap { - if dev.NumaNode >= 0 { - preferNumaNode = dev.NumaNode - break + var groupDevs SorttedGroupDevs + if len(preferNumaNodes) > 0 { + groupDevs = make(SorttedGroupDevs, 0) + for devPath, mappedDevs := range mapDevs { + groupDevs = append(groupDevs, &GroupDevs{ + DevPath: devPath, + Devs: mappedDevs, + }) + } + } else { + nodesGroupDevs := map[int8]SorttedGroupDevs{} + for devPath, mappedDevs := range mapDevs { + numaNode := mappedDevs[0].NumaNode + if _, ok := nodesGroupDevs[numaNode]; ok { + nodesGroupDevs[numaNode] = append(nodesGroupDevs[numaNode], &GroupDevs{ + DevPath: devPath, + Devs: mappedDevs, + }) + } else { + groupDevs := make(SorttedGroupDevs, 0) + nodesGroupDevs[numaNode] = append(groupDevs, &GroupDevs{ + DevPath: devPath, + Devs: mappedDevs, + }) + } + } + + var selectedNode int8 = -1 + if len(nodesGroupDevs) == 1 { + for nodeId := range nodesGroupDevs { + selectedNode = nodeId + } + } else { + reservedCpusStr := host.GetMetadata(ctx, api.HOSTMETA_RESERVED_CPUS_INFO, nil) + if len(reservedCpusStr) > 0 { + topoObj, err := host.SysInfo.Get("topology") + if err != nil { + return errors.Wrap(err, "get topology from host sys_info") + } + topo := new(hostapi.HostTopology) + if err := topoObj.Unmarshal(topo); err != nil { + return errors.Wrap(err, "Unmarshal host topology struct") + } + nodesReserveRate, err := manager.getDevNodesUsedRate(ctx, host, devConfig, topo) + if err != nil { + return err + } + var selectedNodeUtil float32 = 1.0 + for nodeId, gds := range nodesGroupDevs { + freeDevCnt := 0 + for i := range gds { + freeDevCnt += len(gds[i].Devs) + } + + nodeTotalCnt := nodesReserveRate[strconv.Itoa(int(nodeId))].TotalDevCount + usedDevCnt := nodeTotalCnt - freeDevCnt + + nodeReserveRate := nodesReserveRate[strconv.Itoa(int(nodeId))].ReservedRate + nodeCnt := (1 - nodeReserveRate) * float32(nodeTotalCnt) + nodeutil := float32(usedDevCnt) / nodeCnt + log.Debugf("selectedNodeUtil node %v util %v usedDevCnt %v totalDevCnt %v", nodeId, nodeutil, usedDevCnt, nodeCnt) + if nodeutil < selectedNodeUtil { + selectedNodeUtil = nodeutil + selectedNode = nodeId + } + } + } else { + var selectedNodeDevCnt = 0 + for nodeId, gds := range nodesGroupDevs { + devCnt := 0 + for i := range gds { + devCnt += len(gds[i].Devs) + } + if devCnt > selectedNodeDevCnt { + selectedNodeDevCnt = devCnt + selectedNode = nodeId + } + } + } } + log.Debugf("selectedNodeUtil node %v", selectedNode) + groupDevs = nodesGroupDevs[selectedNode] } + sort.Sort(groupDevs) var selectedDev *SIsolatedDevice - if preferNumaNode >= 0 { + if len(preferNumaNodes) > 0 { + topoObj, err := host.SysInfo.Get("topology") + if err != nil { + return errors.Wrap(err, "get topology from host sys_info") + } + hostTopo := new(hostapi.HostTopology) + if err := topoObj.Unmarshal(hostTopo); err != nil { + return errors.Wrap(err, "Unmarshal host topology struct") + } + + if len(groupDevs) == 1 && groupDevs[0].DevPath == "" { + minDistancesDevIdx := -1 + minDistances := math.MaxInt32 + for i := range groupDevs[0].Devs { + if groupDevs[0].Devs[i].NumaNode < 0 { + continue + } + devNodeId := groupDevs[0].Devs[i].NumaNode + for j := range hostTopo.Nodes { + if hostTopo.Nodes[j].ID == int(devNodeId) { + devDistance := 0 + for k := range preferNumaNodes { + devDistance += hostTopo.Nodes[j].Distances[preferNumaNodes[k]] + } + if devDistance < minDistances { + minDistances = devDistance + minDistancesDevIdx = i + } + } + } + } + if minDistancesDevIdx >= 0 { + selectedDev = &groupDevs[0].Devs[minDistancesDevIdx] + } + } else { + minDistancesGroupIdx := -1 + minDistances := math.MaxInt32 + log.Infof("devtype %s grouplength %d", groupDevs[0].Devs[0].DevType, len(groupDevs)) + + for i := range groupDevs { + if groupDevs[i].Devs[0].NumaNode < 0 { + continue + } + devNodeId := groupDevs[i].Devs[0].NumaNode + for j := range hostTopo.Nodes { + if hostTopo.Nodes[j].ID == int(devNodeId) { + devDistance := 0 + for k := range preferNumaNodes { + devDistance += hostTopo.Nodes[j].Distances[preferNumaNodes[k]] + } + if devDistance < minDistances { + minDistances = devDistance + minDistancesGroupIdx = i + } + } + } + } + if minDistancesGroupIdx >= 0 { + selectedDev = &groupDevs[minDistancesGroupIdx].Devs[0] + } + } + } + if selectedDev == nil { for i := range groupDevs { - if groupDevs[i].DevPath == "" { + if groupDevs[i].DevPath != "" { for j := range groupDevs[i].Devs { - if groupDevs[i].Devs[j].NumaNode == preferNumaNode { + dev := groupDevs[i].Devs[j] + devAddr := strings.Split(dev.Addr, "-")[0] + if _, ok := usedDevMap[devAddr]; ok { + continue + } else { selectedDev = &groupDevs[i].Devs[j] break } } - } else if groupDevs[i].Devs[0].NumaNode == preferNumaNode { - selectedDev = &groupDevs[i].Devs[0] + } else { + dev := groupDevs[i].Devs[0] + devAddr := strings.Split(dev.Addr, "-")[0] + if _, ok := usedDevMap[devAddr]; ok { + continue + } else { + selectedDev = &groupDevs[i].Devs[0] + } + } + if selectedDev != nil { break } } } + if selectedDev == nil { selectedDev = &groupDevs[0].Devs[0] } + return guest.attachIsolatedDevice(ctx, userCred, selectedDev, devConfig.NetworkIndex, devConfig.DiskIndex) } @@ -758,6 +1046,29 @@ func (manager *SIsolatedDeviceManager) findHostUnusedByDevConfig(model, devType, return manager.findHostUnusedByDevAttr(model, "dev_type", devType, hostId, wireId) } +func (manager *SIsolatedDeviceManager) findHostDevsByDevConfig(model, devType, hostId, wireId string) ([]SIsolatedDevice, error) { + return manager.findHostDevsByDevAttr(model, "dev_type", devType, hostId, wireId) +} +func (manager *SIsolatedDeviceManager) findHostDevsByDevAttr(model, attrKey, attrVal, hostId, wireId string) ([]SIsolatedDevice, error) { + devs := make([]SIsolatedDevice, 0) + q := manager.Query() + q = q.Equals("model", model).Equals("host_id", hostId) + if attrVal != "" { + q.Equals(attrKey, attrVal) + } + if wireId != "" { + wire := WireManager.FetchWireById(wireId) + if wire.VpcId == api.DEFAULT_VPC_ID { + q = q.Equals("wire_id", wireId) + } + } + err := db.FetchModelObjects(manager, q, &devs) + if err != nil { + return nil, err + } + return devs, nil +} + func (manager *SIsolatedDeviceManager) findHostUnusedByDevAttr(model, attrKey, attrVal, hostId, wireId string) ([]SIsolatedDevice, error) { devs := make([]SIsolatedDevice, 0) q := manager.findUnusedQuery() @@ -933,6 +1244,73 @@ func (man *SIsolatedDeviceManager) GetSpecShouldCheckStatus(query *jsonutils.JSO return true, nil } +func (man *SIsolatedDeviceManager) BatchGetModelSpecs(statusCheck bool) (jsonutils.JSONObject, error) { + hostQ := HostManager.Query() + q := man.Query("vendor_device_id", "model", "dev_type") + if statusCheck { + q = q.IsNullOrEmpty("guest_id") + hostQ = hostQ.Equals("status", api.BAREMETAL_RUNNING).IsTrue("enabled"). + In("host_type", []string{api.HOST_TYPE_HYPERVISOR, api.HOST_TYPE_CONTAINER, api.HOST_TYPE_ZETTAKIT}) + } + hostSQ := hostQ.SubQuery() + q.Join(hostSQ, sqlchemy.Equals(q.Field("host_id"), hostSQ.Field("id"))) + + q.AppendField(hostSQ.Field("host_type")) + q.GroupBy(hostSQ.Field("host_type"), q.Field("vendor_device_id"), q.Field("model"), q.Field("dev_type")) + q.AppendField(sqlchemy.COUNT("*")) + + rows, err := q.Rows() + if err != nil { + return nil, errors.Wrap(err, "failed get specs") + } + defer rows.Close() + res := jsonutils.NewDict() + + for rows.Next() { + var hostType, vendorDeviceId, m, t string + var count int + if err := rows.Scan(&vendorDeviceId, &m, &t, &hostType, &count); err != nil { + return nil, errors.Wrap(err, "get model spec scan rows") + } + vendor := GetVendorByVendorDeviceId(vendorDeviceId) + specKeys := man.getSpecKeys(vendor, m, t) + specKey := GetSpecIdentKey(specKeys) + spec := man.getSpecByRows(hostType, vendorDeviceId, m, t, &count) + res.Set(specKey, spec) + } + + return res, nil +} + +func (man *SIsolatedDeviceManager) getSpecByRows(hostType, vendorDeviceId, model, devType string, count *int) *jsonutils.JSONDict { + var vdev bool + var hypervisor string + if utils.IsInStringArray(devType, api.VITRUAL_DEVICE_TYPES) { + vdev = true + } + if utils.IsInStringArray(devType, api.VALID_CONTAINER_DEVICE_TYPES) { + hypervisor = api.HYPERVISOR_POD + } else { + hypervisor = api.HYPERVISOR_KVM + } + if hostType == api.HOST_TYPE_ZETTAKIT { + hypervisor = api.HYPERVISOR_ZETTAKIT + } + + ret := jsonutils.NewDict() + ret.Set("virtual_dev", jsonutils.NewBool(vdev)) + ret.Set("hypervisor", jsonutils.NewString(hypervisor)) + ret.Set("dev_type", jsonutils.NewString(devType)) + ret.Set("model", jsonutils.NewString(model)) + ret.Set("pci_id", jsonutils.NewString(vendorDeviceId)) + ret.Set("vendor", jsonutils.NewString(GetVendorByVendorDeviceId(vendorDeviceId))) + if count != nil { + ret.Set("count", jsonutils.NewInt(int64(*count))) + } + + return ret +} + type GpuSpec struct { DevType string `json:"dev_type,allowempty"` Model string `json:"model,allowempty"` @@ -942,34 +1320,17 @@ type GpuSpec struct { } func (self *SIsolatedDevice) GetSpec(statusCheck bool) *jsonutils.JSONDict { + host := self.getHost() if statusCheck { if len(self.GuestId) > 0 { return nil } - host := self.getHost() if host.Status != api.BAREMETAL_RUNNING || !host.GetEnabled() || - (host.HostType != api.HOST_TYPE_HYPERVISOR && host.HostType != api.HOST_TYPE_CONTAINER) { + (host.HostType != api.HOST_TYPE_HYPERVISOR && host.HostType != api.HOST_TYPE_CONTAINER && host.HostType != api.HOST_TYPE_ZETTAKIT) { return nil } } - var vdev bool - var hypervisor string - if utils.IsInStringArray(self.DevType, api.VITRUAL_DEVICE_TYPES) { - vdev = true - } - if utils.IsInStringArray(self.DevType, api.VALID_CONTAINER_DEVICE_TYPES) { - hypervisor = api.HYPERVISOR_POD - } else { - hypervisor = api.HYPERVISOR_KVM - } - ret := jsonutils.NewDict() - ret.Set("virtual_dev", jsonutils.NewBool(vdev)) - ret.Set("hypervisor", jsonutils.NewString(hypervisor)) - ret.Set("dev_type", jsonutils.NewString(self.DevType)) - ret.Set("model", jsonutils.NewString(self.Model)) - ret.Set("pci_id", jsonutils.NewString(self.VendorDeviceId)) - ret.Set("vendor", jsonutils.NewString(self.getVendor())) - return ret + return IsolatedDeviceManager.getSpecByRows(host.HostType, self.VendorDeviceId, self.Model, self.DevType, nil) } func (self *SIsolatedDevice) GetGpuSpec() *GpuSpec { @@ -986,6 +1347,10 @@ func (man *SIsolatedDeviceManager) GetSpecIdent(spec *jsonutils.JSONDict) []stri devType, _ := spec.GetString("dev_type") vendor, _ := spec.GetString("vendor") model, _ := spec.GetString("model") + return man.getSpecKeys(vendor, model, devType) +} + +func (man *SIsolatedDeviceManager) getSpecKeys(vendor, model, devType string) []string { keys := []string{ fmt.Sprintf("type:%s", devType), fmt.Sprintf("vendor:%s", vendor), @@ -1128,7 +1493,10 @@ func (manager *SIsolatedDeviceManager) GetAllDevsOnHost(hostId string) ([]SIsola func (manager *SIsolatedDeviceManager) GetUnusedDevsOnHost(hostId string, model string, count int) ([]SIsolatedDevice, error) { devs := make([]SIsolatedDevice, 0) - q := manager.Query().Equals("host_id", hostId).Equals("model", model).IsNullOrEmpty("guest_id").Limit(count) + q := manager.Query().Equals("host_id", hostId).Equals("model", model).IsNullOrEmpty("guest_id") + if count > 0 { + q = q.Limit(count) + } err := db.FetchModelObjects(manager, q, &devs) if err != nil { return nil, err @@ -1238,6 +1606,19 @@ func (model *SIsolatedDevice) GetOwnerId() mcclient.IIdentityProvider { return nil } +func (model *SIsolatedDevice) syncWithCloudIsolateDevice(ctx context.Context, userCred mcclient.TokenCredential, dev cloudprovider.IsolateDevice) error { + _, err := db.Update(model, func() error { + model.Name = dev.GetName() + model.Model = dev.GetModel() + model.Addr = dev.GetAddr() + model.DevType = dev.GetDevType() + model.NumaNode = dev.GetNumaNode() + model.VendorDeviceId = dev.GetVendorDeviceId() + return nil + }) + return err +} + func (model *SIsolatedDevice) SetNetworkIndex(idx int) error { _, err := db.Update(model, func() error { model.NetworkIndex = idx diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_clusters.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_clusters.go index 5666106b..23c00ef7 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_clusters.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_clusters.go @@ -42,6 +42,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=cloud_kube_cluster +// +onecloud:swagger-gen-model-plural=cloud_kube_clusters type SKubeClusterManager struct { db.SEnabledStatusInfrasResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_node_pools.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_node_pools.go index a94239ee..6faa9f1a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_node_pools.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_node_pools.go @@ -37,6 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=cloud_kube_node_pool +// +onecloud:swagger-gen-model-plural=cloud_kube_node_pools type SKubeNodePoolManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_nodes.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_nodes.go index 65b983b3..6c1a10f4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_nodes.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/kube_nodes.go @@ -35,6 +35,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=cloud_kube_node +// +onecloud:swagger-gen-model-plural=cloud_kube_nodes type SKubeNodeManager struct { db.SStatusStandaloneResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceracls.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceracls.go index 8aeab6c2..64e00d12 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceracls.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceracls.go @@ -36,6 +36,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalanceracl +// +onecloud:swagger-gen-model-plural=loadbalanceracls type SLoadbalancerAclManager struct { SLoadbalancerLogSkipper diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceragents.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceragents.go index fec0cf2f..ff0321ab 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceragents.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalanceragents.go @@ -44,6 +44,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalanceragent +// +onecloud:swagger-gen-model-plural=loadbalanceragents type SLoadbalancerAgentManager struct { SLoadbalancerLogSkipper db.SStandaloneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackendgroups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackendgroups.go index 16b4ff1c..005e1018 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackendgroups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackendgroups.go @@ -40,6 +40,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancerbackendgroup +// +onecloud:swagger-gen-model-plural=loadbalancerbackendgroups type SLoadbalancerBackendGroupManager struct { SLoadbalancerLogSkipper db.SStatusStandaloneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackends.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackends.go index d41bcd91..2b9d805c 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackends.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerbackends.go @@ -40,6 +40,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancerbackend +// +onecloud:swagger-gen-model-plural=loadbalancerbackends type SLoadbalancerBackendManager struct { SLoadbalancerLogSkipper db.SStatusStandaloneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancercertificates.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancercertificates.go index 5d88017f..4d9af777 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancercertificates.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancercertificates.go @@ -44,6 +44,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancercertificate +// +onecloud:swagger-gen-model-plural=loadbalancercertificates type SLoadbalancerCertificateManager struct { SLoadbalancerLogSkipper db.SSharableVirtualResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerclusters.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerclusters.go index 7b72d342..0981d9f4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerclusters.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerclusters.go @@ -35,6 +35,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancercluster +// +onecloud:swagger-gen-model-plural=loadbalancerclusters type SLoadbalancerClusterManager struct { db.SStandaloneResourceBaseManager SZoneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlistenerrules.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlistenerrules.go index 69b11cc6..2c88693a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlistenerrules.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlistenerrules.go @@ -37,6 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancerlistenerrule +// +onecloud:swagger-gen-model-plural=loadbalancerlistenerrules type SLoadbalancerListenerRuleManager struct { SLoadbalancerLogSkipper db.SStatusStandaloneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlisteners.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlisteners.go index 740c5ebf..3ebabb20 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlisteners.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancerlisteners.go @@ -39,6 +39,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=loadbalancerlistener +// +onecloud:swagger-gen-model-plural=loadbalancerlisteners type SLoadbalancerListenerManager struct { SLoadbalancerLogSkipper db.SStatusStandaloneResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancers.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancers.go index 4e868c3d..7616bf01 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancers.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/loadbalancers.go @@ -749,6 +749,59 @@ func (lb *SLoadbalancer) ValidateUpdateData(ctx context.Context, userCred mcclie return data, nil } +type SLoadbalancerUsageCount struct { + Id string + api.LoadbalancerUsage +} + +func (lm *SLoadbalancerManager) query(manager db.IModelManager, field string, lbIds []string, filter func(*sqlchemy.SQuery) *sqlchemy.SQuery) *sqlchemy.SSubQuery { + q := manager.Query() + + if filter != nil { + q = filter(q) + } + + sq := q.SubQuery() + + return sq.Query( + sq.Field("loadbalancer_id"), + sqlchemy.COUNT(field), + ).In("loadbalancer_id", lbIds).GroupBy(sq.Field("loadbalancer_id")).SubQuery() +} + +func (manager *SLoadbalancerManager) TotalResourceCount(lbIds []string) (map[string]api.LoadbalancerUsage, error) { + // backendGroup + lbgSQ := manager.query(LoadbalancerBackendGroupManager, "backend_group_cnt", lbIds, nil) + // listener + lisSQ := manager.query(LoadbalancerListenerManager, "listener_cnt", lbIds, nil) + + lb := manager.Query().SubQuery() + lbQ := lb.Query( + sqlchemy.SUM("backend_group_count", lbgSQ.Field("backend_group_cnt")), + sqlchemy.SUM("listener_count", lisSQ.Field("listener_cnt")), + ) + + lbQ.AppendField(lbQ.Field("id")) + + lbQ = lbQ.LeftJoin(lbgSQ, sqlchemy.Equals(lbQ.Field("id"), lbgSQ.Field("loadbalancer_id"))) + lbQ = lbQ.LeftJoin(lisSQ, sqlchemy.Equals(lbQ.Field("id"), lisSQ.Field("loadbalancer_id"))) + + lbQ = lbQ.Filter(sqlchemy.In(lbQ.Field("id"), lbIds)).GroupBy(lbQ.Field("id")) + + lbCount := []SLoadbalancerUsageCount{} + err := lbQ.All(&lbCount) + if err != nil { + return nil, errors.Wrapf(err, "lbQ.All") + } + + result := map[string]api.LoadbalancerUsage{} + for i := range lbCount { + result[lbCount[i].Id] = lbCount[i].LoadbalancerUsage + } + + return result, nil +} + func (man *SLoadbalancerManager) FetchCustomizeColumns( ctx context.Context, userCred mcclient.TokenCredential, @@ -838,6 +891,12 @@ func (man *SLoadbalancerManager) FetchCustomizeColumns( }) } + usage, err := man.TotalResourceCount(lbIds) + if err != nil { + log.Errorf("TotalResourceCount error: %v", err) + return rows + } + for i := range rows { eip, ok := eipMap[lbIds[i]] if ok { @@ -850,6 +909,7 @@ func (man *SLoadbalancerManager) FetchCustomizeColumns( rows[i].BackendGroup = bg } rows[i].Secgroups, _ = groups[lbIds[i]] + rows[i].LoadbalancerUsage, _ = usage[lbIds[i]] } return rows diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/mongodb.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/mongodb.go index 23038a14..41b76577 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/mongodb.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/mongodb.go @@ -43,6 +43,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=mongodb +// +onecloud:swagger-gen-model-plural=mongodbs type SMongoDBManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/natdtable.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/natdtable.go index 0bde0112..21b0a1ee 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/natdtable.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/natdtable.go @@ -34,6 +34,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=natdentry +// +onecloud:swagger-gen-model-plural=natdentries type SNatDEntryManager struct { SNatEntryManager } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/natgateways.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/natgateways.go index 43c96f0b..a58aed8f 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/natgateways.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/natgateways.go @@ -43,6 +43,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=natgateway +// +onecloud:swagger-gen-model-plural=natgateways type SNatGatewayManager struct { db.SStatusInfrasResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/natstable.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/natstable.go index c4845f89..0233e097 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/natstable.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/natstable.go @@ -34,6 +34,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=natsentry +// +onecloud:swagger-gen-model-plural=natsenties type SNatSEntryManager struct { SNatEntryManager SNetworkResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_flows.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_flows.go index bee59449..5606d3fa 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_flows.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_flows.go @@ -34,6 +34,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=tap_flow +// +onecloud:swagger-gen-model-plural=tap_flows type SNetTapFlowManager struct { db.SEnabledStatusStandaloneResourceBaseManager } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_services.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_services.go index 7005d307..96884cc5 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_services.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/net_tap_services.go @@ -37,6 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=tap_service +// +onecloud:swagger-gen-model-plural=tap_services type SNetTapServiceManager struct { db.SEnabledStatusStandaloneResourceBaseManager } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/netinterfaces.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/netinterfaces.go index fe849101..84b8dc9b 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/netinterfaces.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/netinterfaces.go @@ -56,7 +56,7 @@ type SNetInterface struct { // Bridge名称 Bridge string `width:"64" charset:"ascii" nullable:"true"` // 接口名称 - Interface string `width:"16" charset:"ascii" nullable:"true"` + Interface string `width:"64" charset:"ascii" nullable:"true"` } // +onecloud:swagger-gen-ignore diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/networkaddresses.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/networkaddresses.go index 77a20ece..4353abd6 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/networkaddresses.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/networkaddresses.go @@ -37,6 +37,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=networkaddress +// +onecloud:swagger-gen-model-plural=networkaddresses type SNetworkAddressManager struct { db.SStandaloneAnonResourceBaseManager SNetworkResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/networkinterfaces.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/networkinterfaces.go index f4272fa6..f44a0bfd 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/networkinterfaces.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/networkinterfaces.go @@ -33,6 +33,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=networkinterface +// +onecloud:swagger-gen-model-plural=networkinterfaces type SNetworkInterfaceManager struct { db.SStatusInfrasResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/networks.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/networks.go index ff91a69e..e618a27e 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/networks.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/networks.go @@ -516,8 +516,10 @@ func (snet *SNetwork) GetNTP() string { func (snet *SNetwork) GetDomain() string { if len(snet.GuestDomain) > 0 { return snet.GuestDomain - } else { + } else if !apis.IsIllegalSearchDomain(options.Options.DNSDomain) { return options.Options.DNSDomain + } else { + return "" } } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/pod_driver.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/pod_driver.go index 3fff890c..70542294 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/pod_driver.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/pod_driver.go @@ -44,5 +44,8 @@ type IPodDriver interface { RequestCommitContainer(ctx context.Context, userCred mcclient.TokenCredential, task IContainerTask) error RequestSaveVolumeMountImage(ctx context.Context, userCred mcclient.TokenCredential, task IContainerTask) error RequestExecSyncContainer(ctx context.Context, userCred mcclient.TokenCredential, ctr *SContainer, input *compute.ContainerExecSyncInput) (jsonutils.JSONObject, error) - RequestSetContainerResourcesLimit(ctx context.Context, cred mcclient.TokenCredential, c *SContainer, limit *apis.ContainerResources) (jsonutils.JSONObject, error) + RequestSetContainerResourcesLimit(ctx context.Context, userCred mcclient.TokenCredential, c *SContainer, limit *apis.ContainerResources) (jsonutils.JSONObject, error) + + RequestAddVolumeMountPostOverlay(ctx context.Context, userCred mcclient.TokenCredential, task IContainerTask) error + RequestRemoveVolumeMountPostOverlay(ctx context.Context, userCred mcclient.TokenCredential, task IContainerTask) error } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/purge.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/purge.go index 236ce33d..97fa9ea4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/purge.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/purge.go @@ -63,6 +63,10 @@ func (self *SCloudregion) purgeAll(ctx context.Context, managerId string) error if err != nil { return errors.Wrapf(err, "purgeResources") } + err = self.purgeQuotas(ctx, managerId) + if err != nil { + return errors.Wrapf(err, "purgeQuotas") + } // fix #20036 避免regional子网未删除, 导致zone残留 zones, err := self.GetZones() @@ -268,6 +272,20 @@ func (self *SNatGateway) purge(ctx context.Context, userCred mcclient.TokenCrede return self.SInfrasResourceBase.Delete(ctx, userCred) } +func (self *SCloudregion) purgeQuotas(ctx context.Context, managerId string) error { + quotas := CloudproviderQuotaManager.Query("id").Equals("manager_id", managerId).Equals("cloudregion_id", self.Id) + pairs := []purgePair{ + {manager: CloudproviderQuotaManager, key: "id", q: quotas}, + } + for i := range pairs { + err := pairs[i].purgeAll(ctx) + if err != nil { + return err + } + } + return nil +} + func (self *SCloudregion) purgeResources(ctx context.Context, managerId string) error { buckets := BucketManager.Query("id").Equals("manager_id", managerId).Equals("cloudregion_id", self.Id) ess := ElasticSearchManager.Query("id").Equals("manager_id", managerId).Equals("cloudregion_id", self.Id) @@ -903,8 +921,10 @@ func (cprvd *SCloudprovider) purge(ctx context.Context, userCred mcclient.TokenC dnszones := DnsZoneManager.Query("id").Equals("manager_id", cprvd.Id) records := DnsRecordManager.Query("id").In("dns_zone_id", dnszones.SubQuery()) dnsVpcs := DnsZoneVpcManager.Query("row_id").In("dns_zone_id", dnszones.SubQuery()) + quotas := CloudproviderQuotaManager.Query("id").Equals("manager_id", cprvd.Id) pairs := []purgePair{ + {manager: CloudproviderQuotaManager, key: "id", q: quotas}, {manager: DnsZoneVpcManager, key: "row_id", q: dnsVpcs}, {manager: DnsRecordManager, key: "id", q: records}, {manager: DnsZoneManager, key: "id", q: dnszones}, diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/regiondrivers.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/regiondrivers.go index de20ac5c..5ef67fc4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/regiondrivers.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/regiondrivers.go @@ -101,6 +101,7 @@ type ISecurityGroupDriver interface { RequestCreateSecurityGroup(ctx context.Context, userCred mcclient.TokenCredential, secgroup *SSecurityGroup, rules api.SSecgroupRuleResourceSet) error // 根据安全组归属vpc还是region进行过滤 GetSecurityGroupFilter(vpc *SVpc) (func(q *sqlchemy.SQuery) *sqlchemy.SQuery, error) + GetDefaultSecurityGroupNamePrefix() string CreateDefaultSecurityGroup(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, vpc *SVpc) (*SSecurityGroup, error) RequestPrepareSecurityGroups(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, secgroups []SSecurityGroup, vpc *SVpc, callback func(ids []string) error, task taskman.ITask) error RequestDeleteSecurityGroup(ctx context.Context, userCred mcclient.TokenCredential, secgroup *SSecurityGroup, task taskman.ITask) error diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/resource_syncstatus.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/resource_syncstatus.go index 08ac6833..3b879bb5 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/resource_syncstatus.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/resource_syncstatus.go @@ -43,6 +43,5 @@ func StartResourceSyncStatusTask(ctx context.Context, userCred mcclient.TokenCre return err } obj.SetStatus(ctx, userCred, apis.STATUS_SYNC_STATUS, "perform_syncstatus") - task.ScheduleRun(nil) - return nil + return task.ScheduleRun(nil) } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_activity.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_activity.go index 9b798f7d..a887ab8d 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_activity.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_activity.go @@ -29,6 +29,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=scalingactivity +// +onecloud:swagger-gen-model-plural=scalingactivities type SScalingActivityManager struct { db.SStatusStandaloneResourceBaseManager SScalingGroupResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_group.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_group.go index ad53f568..1a835e11 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_group.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_group.go @@ -39,6 +39,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=scalinggroup +// +onecloud:swagger-gen-model-plural=scalinggroups type SScalingGroupManager struct { db.SVirtualResourceBaseManager SCloudregionResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_policy.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_policy.go index 8589341f..8241e54a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_policy.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/scaling_policy.go @@ -36,6 +36,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=scalingpolicy +// +onecloud:swagger-gen-model-plural=scalingpolicies type SScalingPolicyManager struct { db.SVirtualResourceBaseManager SScalingGroupResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/secgrouprules.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/secgrouprules.go index 4ff29a33..2707ae61 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/secgrouprules.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/secgrouprules.go @@ -42,6 +42,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=secgrouprule +// +onecloud:swagger-gen-model-plural=secgrouprules type SSecurityGroupRuleManager struct { db.SResourceBaseManager db.SStatusResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/secgroups.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/secgroups.go index 76326a48..cc8d4c58 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/secgroups.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/secgroups.go @@ -50,6 +50,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=secgroup +// +onecloud:swagger-gen-model-plural=secgroups type SSecurityGroupManager struct { db.SSharableVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/server_skus.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/server_skus.go index 796517da..4c590b46 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/server_skus.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/server_skus.go @@ -49,6 +49,8 @@ import ( "yunion.io/x/onecloud/pkg/util/yunionmeta" ) +// +onecloud:swagger-gen-model-singular=serversku +// +onecloud:swagger-gen-model-plural=serverskus type SServerSkuManager struct { db.SEnabledStatusStandaloneResourceBaseManager SCloudregionResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/service_catalog.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/service_catalog.go index 2f66af5f..50266e33 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/service_catalog.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/service_catalog.go @@ -33,6 +33,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=servicecatalog +// +onecloud:swagger-gen-model-plural=servicecatalogs type SServiceCatalogManager struct { db.SSharableVirtualResourceBaseManager } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshotpolicy.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshotpolicy.go index 6a10e2cd..7e864ee3 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshotpolicy.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshotpolicy.go @@ -39,6 +39,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=snapshotpolicy +// +onecloud:swagger-gen-model-plural=snapshotpolicies type SSnapshotPolicyManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshots.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshots.go index 0fa27f31..c87e8bb4 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshots.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/snapshots.go @@ -206,6 +206,11 @@ func (manager *SSnapshotManager) ListItemFilter( q = q.In("disk_id", gdq) } + if query.Unused { + sq := DiskManager.Query("id").Distinct().SubQuery() + q = q.NotIn("disk_id", sq) + } + return q, nil } diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/specs.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/specs.go index 7696ddba..cceed013 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/specs.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/specs.go @@ -38,6 +38,10 @@ type ISpecModel interface { GetSpec(statusCheck bool) *jsonutils.JSONDict } +type IBatchSpecModel interface { + BatchGetModelSpecs(statusCheck bool) (jsonutils.JSONObject, error) +} + func GetAllModelSpecs(ctx context.Context, userCred mcclient.TokenCredential, query *jsonutils.JSONDict) (jsonutils.JSONObject, error) { mans := []ISpecModelManager{HostManager, IsolatedDeviceManager, GuestManager} return GetModelsSpecs(ctx, userCred, query, mans...) @@ -80,15 +84,20 @@ func GetModelSpec(manager ISpecModelManager, model ISpecModel) (jsonutils.JSONOb } func getModelSpecs(manager ISpecModelManager, ctx context.Context, userCred mcclient.TokenCredential, query *jsonutils.JSONDict) (jsonutils.JSONObject, error) { - items, err := ListItems(manager, ctx, userCred, query) + statusCheck, err := manager.GetSpecShouldCheckStatus(query) if err != nil { return nil, err } - retDict := jsonutils.NewDict() - statusCheck, err := manager.GetSpecShouldCheckStatus(query) + if bm, ok := manager.(IBatchSpecModel); ok { + return bm.BatchGetModelSpecs(statusCheck) + } + + items, err := ListItems(manager, ctx, userCred, query) if err != nil { return nil, err } + retDict := jsonutils.NewDict() + for _, obj := range items { specObj := obj.(ISpecModel) spec := specObj.GetSpec(statusCheck) diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/sslcertificate.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/sslcertificate.go index 011a8ef3..4f96a951 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/sslcertificate.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/sslcertificate.go @@ -33,6 +33,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=sslcertificate +// +onecloud:swagger-gen-model-plural=sslcertificates type SSSLCertificateManager struct { db.SVirtualResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/storageresource.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/storageresource.go index 53542432..508c2651 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/storageresource.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/storageresource.go @@ -128,6 +128,17 @@ func (manager *SStorageResourceBaseManager) ListItemFilter( } q = q.Equals("storage_id", storageObj.GetId()) } + if len(query.StorageHostId) > 0 { + hostInput := api.HostResourceInput{} + hostInput.HostId = query.StorageHostId + var err error + _, hostInput, err = ValidateHostResourceInput(ctx, userCred, hostInput) + if err != nil { + return nil, errors.Wrap(err, "ValidateHostResourceInput") + } + hostStoragesQ := HoststorageManager.Query().Equals("host_id", hostInput.HostId).SubQuery() + q = q.Join(hostStoragesQ, sqlchemy.Equals(q.Field("storage_id"), hostStoragesQ.Field("storage_id"))) + } subq := StorageManager.Query("id").Snapshot() subq, err := manager.SZoneResourceBaseManager.ListItemFilter(ctx, subq, userCred, query.ZonalFilterListInput) if err != nil { diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/storages.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/storages.go index 6bac8b2f..1a55b240 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/storages.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/storages.go @@ -96,7 +96,7 @@ type SStorage struct { StoragecacheId string `width:"36" charset:"ascii" nullable:"true" list:"domain" get:"domain" update:"domain" create:"domain_optional"` // master host id - MasterHost string `width:"36" charset:"ascii" nullable:"true" list:"user" create:"optional" update:"user" json:"master_host"` + MasterHost string `width:"36" charset:"ascii" nullable:"true" list:"user" json:"master_host"` // indicating whether system disk can be allocated in this storage // 是否可以用作系统盘存储 @@ -121,7 +121,7 @@ func (self *SStorage) ValidateUpdateData(ctx context.Context, userCred mcclient. if self.StorageConf != nil { confs, _ := self.StorageConf.GetMap() for k, v := range confs { - if input.StorageConf.Contains(k) { + if !input.StorageConf.Contains(k) { continue } input.StorageConf.Set(k, v) @@ -618,6 +618,21 @@ func (manager *SStorageManager) FetchCustomizeColumns( } storage := objs[i].(*SStorage) storageIds[i] = storage.Id + if rows[i].ManagerId == "" && rows[i].MasterHost == "" && + utils.IsInStringArray(storage.StorageType, api.SHARED_STORAGE) { + if host, err := storage.GetMasterHost(); host != nil { + rows[i].MasterHost = host.Id + rows[i].MasterHostName = host.Name + } else { + log.Errorf("storage %s failed get master host %s", storageIds[i], err) + } + } + if rows[i].MasterHost != "" && rows[i].MasterHostName == "" { + if host := HostManager.FetchHostById(rows[i].MasterHost); host != nil { + rows[i].MasterHostName = host.Name + } + } + rows[i].Capacity = storage.GetCapacity() rows[i].VCapacity = int64(float32(rows[i].Capacity) * storage.GetOvercommitBound()) rows[i].ActualUsed = storage.ActualCapacityUsed @@ -750,6 +765,13 @@ func (self *SStorage) GetOvercommitBound() float32 { } func (self *SStorage) GetMasterHost() (*SHost, error) { + if self.MasterHost != "" { + host := HostManager.FetchHostById(self.MasterHost) + if host != nil && host.Enabled.IsTrue() && host.HostStatus == api.HOST_ONLINE { + return host, nil + } + } + hosts := HostManager.Query().SubQuery() hoststorages := HoststorageManager.Query().SubQuery() @@ -757,18 +779,31 @@ func (self *SStorage) GetMasterHost() (*SHost, error) { q = q.Filter(sqlchemy.Equals(hoststorages.Field("storage_id"), self.Id)) q = q.IsTrue("enabled") q = q.Equals("host_status", api.HOST_ONLINE).Asc("id") - if self.MasterHost != "" { - q.Equals("id", self.MasterHost) - } + host := SHost{} host.SetModelManager(HostManager, &host) err := q.First(&host) if err != nil { return nil, errors.Wrapf(err, "q.First") } + + if utils.IsInStringArray(self.StorageType, api.SHARED_STORAGE) { + if err := self.UpdateMasterHost(host.Id); err != nil { + log.Errorf("storage %s udpate master host failed %s: %s", self.GetName(), host.Id, err) + } + } + return &host, nil } +func (self *SStorage) UpdateMasterHost(hostId string) error { + _, err := db.Update(self, func() error { + self.MasterHost = hostId + return nil + }) + return err +} + func (self *SStorage) GetZoneId() string { if len(self.ZoneId) > 0 { return self.ZoneId @@ -1275,6 +1310,7 @@ func (manager *SStorageManager) totalCapacityQ( storages.Field("capacity"), storages.Field("reserved"), storages.Field("cmtbound"), + storages.Field("actual_capacity_used"), storages.Field("storage_type"), storages.Field("medium_type"), stmt.Field("used_capacity"), @@ -1298,6 +1334,7 @@ type StorageStat struct { Capacity int Reserved int Cmtbound float32 + ActualCapacityUsed int64 StorageType string MediumType string UsedCapacity int @@ -1311,16 +1348,17 @@ type StorageStat struct { } type StoragesCapacityStat struct { - Capacity int64 - CapacityVirtual float64 - CapacityUsed int64 - CountUsed int - CapacityUnready int64 - CountUnready int - AttachedCapacity int64 - CountAttached int - DetachedCapacity int64 - CountDetached int + Capacity int64 + CapacityVirtual float64 + CapacityUsed int64 + ActualCapacityUsed int64 + CountUsed int + CapacityUnready int64 + CountUnready int + AttachedCapacity int64 + CountAttached int + DetachedCapacity int64 + CountDetached int MediumeCapacity map[string]int64 StorageTypeCapacity map[string]int64 @@ -1342,6 +1380,7 @@ func (manager *SStorageManager) calculateCapacity(q *sqlchemy.SQuery) StoragesCa tCapa int64 = 0 tVCapa float64 = 0 tUsed int64 = 0 + aUsed int64 = 0 cUsed int = 0 tFailed int64 = 0 cFailed int = 0 @@ -1384,6 +1423,7 @@ func (manager *SStorageManager) calculateCapacity(q *sqlchemy.SQuery) StoragesCa tVCapa += float64(stat.Capacity-stat.Reserved) * float64(stat.Cmtbound) mCapaUsed, sCapaUsed = add(mCapaUsed, sCapaUsed, stat.MediumType, stat.StorageType, int64(stat.UsedCapacity)) tUsed += int64(stat.UsedCapacity) + aUsed += int64(stat.ActualCapacityUsed) cUsed += stat.UsedCount tFailed += int64(stat.FailedCapacity) mFailed, sFailed = add(mFailed, sFailed, stat.MediumType, stat.StorageType, int64(stat.FailedCapacity)) @@ -1401,6 +1441,7 @@ func (manager *SStorageManager) calculateCapacity(q *sqlchemy.SQuery) StoragesCa StorageTypeCapacity: sCapa, CapacityVirtual: tVCapa, CapacityUsed: tUsed, + ActualCapacityUsed: aUsed, MediumeCapacityUsed: mCapaUsed, StorageTypeCapacityUsed: sCapaUsed, CountUsed: cUsed, @@ -1454,7 +1495,9 @@ func (self *SStorage) createDisk(ctx context.Context, name string, diskConfig *a disk.SetModelManager(DiskManager, &disk) disk.Name = name - disk.fetchDiskInfo(diskConfig) + if err := disk.fetchDiskInfo(diskConfig); err != nil { + return nil, errors.Wrap(err, "fetchDiskInfo") + } disk.StorageId = self.Id disk.AutoDelete = autoDelete @@ -2044,3 +2087,33 @@ func (storage *SStorage) GetDetailsHardwareInfo(ctx context.Context, userCred mc func (storage *SStorage) PerformSetHardwareInfo(ctx context.Context, userCred mcclient.TokenCredential, _ jsonutils.JSONObject, data *api.StorageHardwareInfo) (*api.StorageHardwareInfo, error) { return data, storage.setHardwareInfo(ctx, userCred, data) } + +func StoragesCleanRecycleDiskfiles(ctx context.Context, userCred mcclient.TokenCredential, isStart bool) { + // get shared storages + q := StorageManager.Query().IsNullOrEmpty("manager_id") + q = q.In("storage_type", api.SHARED_STORAGE) + + storages := make([]SStorage, 0) + err := q.All(&storages) + if err != nil { + log.Errorf("StoragesCleanRecycleDiskfiles failed get storages %s", err) + return + } + + for i := range storages { + storages[i].SetModelManager(StorageManager, &storages[i]) + log.Infof("storage %s start clean recycle diskfiles", storages[i].GetName()) + host, err := storages[i].GetMasterHost() + if err != nil { + log.Errorf("StoragesCleanRecycleDiskfiles storage %s failed get master host: %s", storages[i].GetName(), err) + continue + } + url := fmt.Sprintf("/storages/%s/clean-recycle-diskfiles", storages[i].Id) + body := jsonutils.NewDict() + _, err = host.Request(ctx, userCred, "POST", url, mcclient.GetTokenHeaders(userCred), body) + if err != nil { + log.Errorf("StoragesCleanRecycleDiskfiles storage %s request failed %s", storages[i].GetName(), err) + continue + } + } +} diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/vpcs.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/vpcs.go index 1671dbf5..14c21ddc 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/vpcs.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/vpcs.go @@ -2036,14 +2036,14 @@ func (self *SVpc) CheckSecurityGroupConsistent(secgroup *SSecurityGroup) error { if secgroup.VpcId != self.Id { return httperrors.NewInvalidStatusError("The security group does not belong to the vpc") } - } else if len(secgroup.CloudregionId) > 0 { - if secgroup.CloudregionId != self.CloudregionId { - return httperrors.NewInvalidStatusError("The security group and vpc are in different areas") - } } else if len(secgroup.GlobalvpcId) > 0 { if secgroup.GlobalvpcId != self.GlobalvpcId { return httperrors.NewInvalidStatusError("The security group and vpc are in different global vpc") } + } else if len(secgroup.CloudregionId) > 0 { + if secgroup.CloudregionId != self.CloudregionId { + return httperrors.NewInvalidStatusError("The security group and vpc are in different areas") + } } return nil } @@ -2059,7 +2059,7 @@ func (self *SVpc) GetSecurityGroups() ([]SSecurityGroup, error) { } func (self *SVpc) GetDefaultSecurityGroup(ownerId mcclient.IIdentityProvider, filter func(q *sqlchemy.SQuery) *sqlchemy.SQuery) (*SSecurityGroup, error) { - q := SecurityGroupManager.Query().Equals("status", api.SECGROUP_STATUS_READY).Like("name", "default%") + q := SecurityGroupManager.Query().Equals("status", api.SECGROUP_STATUS_READY).Like("name", "%"+"default"+"%") q = filter(q) q = q.Filter( diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/models/waf_regexsets.go b/vendor/yunion.io/x/onecloud/pkg/compute/models/waf_regexsets.go index 70a7b21c..eb96ac1a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/models/waf_regexsets.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/models/waf_regexsets.go @@ -35,6 +35,8 @@ import ( "yunion.io/x/onecloud/pkg/util/stringutils2" ) +// +onecloud:swagger-gen-model-singular=waf_regexset +// +onecloud:swagger-gen-model-plural=waf_regexsets type SWafRegexSetManager struct { db.SStatusInfrasResourceBaseManager db.SExternalizedResourceBaseManager diff --git a/vendor/yunion.io/x/onecloud/pkg/compute/options/options.go b/vendor/yunion.io/x/onecloud/pkg/compute/options/options.go index 83e25f85..0ac27eec 100644 --- a/vendor/yunion.io/x/onecloud/pkg/compute/options/options.go +++ b/vendor/yunion.io/x/onecloud/pkg/compute/options/options.go @@ -142,7 +142,7 @@ type ComputeOptions struct { CloudSyncWorkerCount int `help:"how many current synchronization threads" default:"5"` CloudProviderSyncWorkerCount int `help:"how many current providers synchronize their regions, practically no limit" default:"10"` - CloudAutoSyncIntervalSeconds int `help:"frequency to check auto sync tasks" default:"30"` + CloudAutoSyncIntervalSeconds int `help:"frequency to check auto sync tasks" default:"300"` DefaultSyncIntervalSeconds int `help:"minimal synchronization interval, default 15 minutes" default:"900"` MaxCloudAccountErrorCount int `help:"maximal consecutive error count allow for a cloud account" default:"5"` diff --git a/vendor/yunion.io/x/onecloud/pkg/hostman/guestman/desc/desc.go b/vendor/yunion.io/x/onecloud/pkg/hostman/guestman/desc/desc.go index ee550509..35a7533a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/hostman/guestman/desc/desc.go +++ b/vendor/yunion.io/x/onecloud/pkg/hostman/guestman/desc/desc.go @@ -60,7 +60,8 @@ type SCpuNumaPin struct { Unregular bool NodeId *uint16 `json:",omitempty"` - VcpuPin []SVCpuPin `json:",omitempty"` + VcpuPin []SVCpuPin `json:",omitempty"` + ExtraCpuCount int `json:"extra_cpu_count"` } type SVCpuPin struct { diff --git a/vendor/yunion.io/x/onecloud/pkg/hostman/hostutils/hostutils.go b/vendor/yunion.io/x/onecloud/pkg/hostman/hostutils/hostutils.go index fdb557cd..b9da93d8 100644 --- a/vendor/yunion.io/x/onecloud/pkg/hostman/hostutils/hostutils.go +++ b/vendor/yunion.io/x/onecloud/pkg/hostman/hostutils/hostutils.go @@ -71,11 +71,13 @@ type IHost interface { IsX8664() bool GetHostTopology() *hostapi.HostTopology GetReservedCpusInfo() *cpuset.CPUSet + GetReservedMemMb() int IsHugepagesEnabled() bool HugepageSizeKb() int IsNumaAllocateEnabled() bool - CpuCmtBound() int + CpuCmtBound() float32 + MemCmtBound() float32 IsKvmSupport() bool IsNestedVirtualization() bool diff --git a/vendor/yunion.io/x/onecloud/pkg/hostman/isolated_device/isolated_device.go b/vendor/yunion.io/x/onecloud/pkg/hostman/isolated_device/isolated_device.go index 98c17a4b..7f04dffe 100644 --- a/vendor/yunion.io/x/onecloud/pkg/hostman/isolated_device/isolated_device.go +++ b/vendor/yunion.io/x/onecloud/pkg/hostman/isolated_device/isolated_device.go @@ -38,14 +38,39 @@ const ( ) type CloudDeviceInfo struct { - Id string `json:"id"` - GuestId string `json:"guest_id"` - HostId string `json:"host_id"` - DevType string `json:"dev_type"` - VendorDeviceId string `json:"vendor_device_id"` - Addr string `json:"addr"` - DetectedOnHost bool `json:"detected_on_host"` - MdevId string `json:"mdev_id"` + Id string `json:"id"` + GuestId string `json:"guest_id"` + HostId string `json:"host_id"` + DevType string `json:"dev_type"` + VendorDeviceId string `json:"vendor_device_id"` + Addr string `json:"addr"` + DetectedOnHost bool `json:"detected_on_host"` + MdevId string `json:"mdev_id"` + Model string `json:"model"` + WireId string `json:"wire_id"` + OvsOffloadInterface string `json:"ovs_offload_interface"` + IsInfinibandNic bool `json:"is_infiniband_nic"` + NvmeSizeMB int `json:"nvme_size_mb"` + DevicePath string `json:"device_path"` + MpsMemoryLimit int `json:"mps_memory_limit"` + MpsMemoryTotal int `json:"mps_memory_total"` + MpsThreadPercentage int `json:"mps_thread_percentage"` + NumaNode int `json:"numa_node"` + PcieInfo *api.IsolatedDevicePCIEInfo `json:"pcie_info"` + + // The frame rate limiter (FRL) configuration in frames per second + FRL string `json:"frl"` + // The frame buffer size in Mbytes + Framebuffer string `json:"framebuffer"` + // The maximum resolution per display head, eg: 5120x2880 + MaxResolution string `json:"max_resolution"` + // The maximum number of virtual display heads that the vGPU type supports + // In computer graphics and display technology, the term "head" is commonly used to + // describe the physical interface of a display device or display output. + // It refers to a connection point on the monitor, such as HDMI, DisplayPort, or VGA interface. + NumHeads string `json:"num_heads"` + // The maximum number of vGPU instances per physical GPU + MaxInstance string `json:"max_instance"` } type IHost interface { @@ -127,6 +152,7 @@ type IsolatedDeviceManager interface { BatchCustomProbe() AppendDetachedDevice(dev *CloudDeviceInfo) GetQemuParams(devAddrs []string) *QemuParams + CheckDevIsNeedUpdate(dev IDevice, devInfo *CloudDeviceInfo) bool } type isolatedDeviceManager struct { @@ -454,6 +480,54 @@ func (man *isolatedDeviceManager) getSession() *mcclient.ClientSession { return man.host.GetSession() } +func (man *isolatedDeviceManager) CheckDevIsNeedUpdate(dev IDevice, devInfo *CloudDeviceInfo) bool { + if dev.GetDeviceType() != devInfo.DevType { + return true + } + if dev.GetModelName() != devInfo.Model { + return true + } + if dev.GetWireId() != devInfo.WireId { + return true + } + if dev.IsInfinibandNic() != devInfo.IsInfinibandNic { + return true + } + if dev.GetOvsOffloadInterfaceName() != devInfo.OvsOffloadInterface { + return true + } + if dev.GetNVMESizeMB() > 0 && devInfo.NvmeSizeMB > 0 && dev.GetNVMESizeMB() != devInfo.NvmeSizeMB { + return true + } + if numaNode, _ := dev.GetNumaNode(); numaNode != devInfo.NumaNode { + return true + } + if dev.GetMdevId() != devInfo.MdevId { + return true + } + if info := dev.GetPCIEInfo(); info != nil && devInfo.PcieInfo == nil { + return true + } + if profile := dev.GetNVIDIAVgpuProfile(); profile != nil { + if val, _ := profile["frl"]; val != devInfo.FRL { + return true + } + if val, _ := profile["framebuffer"]; val != devInfo.Framebuffer { + return true + } + if val, _ := profile["max_resolution"]; val != devInfo.MaxResolution { + return true + } + if val, _ := profile["num_heads"]; val != devInfo.NumHeads { + return true + } + if val, _ := profile["max_instance"]; val != devInfo.MaxInstance { + return true + } + } + return false +} + func (man *isolatedDeviceManager) GetDeviceByIdent(vendorDevId, addr, mdevId string) IDevice { for _, dev := range man.devices { if dev.GetVendorDeviceId() == vendorDevId && dev.GetAddr() == addr && dev.GetMdevId() == mdevId { @@ -528,6 +602,7 @@ func (man *isolatedDeviceManager) GetQemuParams(devAddrs []string) *QemuParams { type SBaseDevice struct { dev *PCIDevice + originAddr string cloudId string hostId string guestId string @@ -577,12 +652,17 @@ func (dev *SBaseDevice) SetDeviceInfo(info CloudDeviceInfo) { } } -func SyncDeviceInfo(session *mcclient.ClientSession, hostId string, dev IDevice) (jsonutils.JSONObject, error) { +func SyncDeviceInfo(session *mcclient.ClientSession, hostId string, dev IDevice, needUpdate bool) (jsonutils.JSONObject, error) { if len(dev.GetHostId()) == 0 { dev.SetHostId(hostId) } data := GetApiResourceData(dev) if len(dev.GetCloudId()) != 0 { + if !needUpdate { + log.Infof("Update %s isolated_device: do nothing", dev.GetCloudId()) + return nil, nil + } + log.Infof("Update %s isolated_device: %s", dev.GetCloudId(), data.String()) return modules.IsolatedDevices.Update(session, dev.GetCloudId(), data) } @@ -602,7 +682,15 @@ func (dev *SBaseDevice) GetAddr() string { return dev.dev.Addr } -func (dev *SBaseDevice) SetAddr(addr string) { +func (dev *SBaseDevice) GetOriginAddr() string { + if dev.originAddr != "" { + return dev.originAddr + } + return dev.dev.Addr +} + +func (dev *SBaseDevice) SetAddr(addr, originAddr string) { + dev.originAddr = originAddr dev.dev.Addr = addr } diff --git a/vendor/yunion.io/x/onecloud/pkg/hostman/options/options.go b/vendor/yunion.io/x/onecloud/pkg/hostman/options/options.go index 02dcb651..5a5b38f3 100644 --- a/vendor/yunion.io/x/onecloud/pkg/hostman/options/options.go +++ b/vendor/yunion.io/x/onecloud/pkg/hostman/options/options.go @@ -55,6 +55,11 @@ type SHostBaseOptions struct { ImageCacheCleanupPercentage int `help:"The cleanup threshold ratio of image cache size v.s. total storage size" default:"12"` ImageCacheCleanupOnStartup bool `help:"Cleanup image cache on host startup" default:"false"` ImageCacheCleanupDryRun bool `help:"Dry run cleanup image cache" default:"false"` + + TelegrafKafkaOutputTopic string `json:"telegraf_kafka_output_topic" help:"telegraf kafka output topic"` + TelegrafKafkaOutputSaslUsername string `json:"telegraf_kafka_output_sasl_username" help:"telegraf kafka output sasl_username"` + TelegrafKafkaOutputSaslPassword string `json:"telegraf_kafka_output_sasl_password" help:"telegraf kafka output sasl_password"` + TelegrafKafkaOutputSaslMechanism string `json:"telegraf_kafka_output_sasl_mechanism" help:"telegraf kafka output sasl_mechanism"` } type SHostOptions struct { @@ -105,10 +110,11 @@ type SHostOptions struct { LinuxDefaultRootUser bool `help:"Default account for linux system is root"` WindowsDefaultAdminUser bool `default:"true" help:"Default account for Windows system is Administrator"` - BlockIoScheduler string `help:"Block IO scheduler, deadline or cfq" default:"deadline"` - EnableKsm bool `help:"Enable Kernel Same Page Merging"` - HugepagesOption string `help:"Hugepages option: disable|native|transparent" default:"transparent"` - HugepageSizeMb int `help:"hugepage size mb default 1G" default:"1024"` + BlockIoScheduler string `help:"HDD Block IO scheduler, deadline or cfq" default:"deadline"` + SsdBlockIoScheduler string `help:"SSD Block IO scheduler, none deadline or cfq" default:"none"` + EnableKsm bool `help:"Enable Kernel Same Page Merging"` + HugepagesOption string `help:"Hugepages option: disable|native|transparent" default:"transparent"` + HugepageSizeMb int `help:"hugepage size mb default 1G" default:"1024"` // PrivatePrefixes []string `help:"IPv4 private prefixes"` LocalImagePath []string `help:"Local image storage paths"` @@ -134,6 +140,7 @@ type SHostOptions struct { SetVncPassword bool `default:"true" help:"Auto set vnc password after monitor connected"` UseBootVga bool `default:"false" help:"Use boot VGA GPU for guest"` + EnableStrictCpuBind bool `default:"false" help:"Enable strict cpu bind, one vcpu bind one pcpu"` EnableHostAgentNumaAllocate bool `default:"false" help:"Enable host agent numa allocate"` EnableCpuBinding bool `default:"true" help:"Enable cpu binding and rebalance"` EnableOpenflowController bool `default:"false"` @@ -157,6 +164,8 @@ type SHostOptions struct { MaxReservedMemory int `default:"10240" help:"host reserved memory"` DefaultRequestWorkerCount int `default:"8" help:"default request worker count"` + ContainerStartWorkerCount int `default:"1" help:"container start worker count"` + ContainerStopWorkerCount int `default:"1" help:"container stop worker count"` AllowSwitchVMs bool `help:"allow machines run as switch (spoof mac)" default:"true"` AllowRouterVMs bool `help:"allow machines run as router (spoof ip)" default:"true"` @@ -232,6 +241,7 @@ type SHostOptions struct { EnableDirtyRecoverySeconds int `help:"Seconds to delay enable dirty guests recovery feature, default 15 minutes" default:"900"` EnableContainerCniPortmap bool `help:"Use container cni portmap plugin" default:"false"` + DisableReconcileContainer bool `help:"disable reconcile container" default:"false"` } func (o SHostOptions) HostLocalNetconfPath(br string) string { diff --git a/vendor/yunion.io/x/onecloud/pkg/hostman/system_service/telegraf.go b/vendor/yunion.io/x/onecloud/pkg/hostman/system_service/telegraf.go index d3e12640..bc32e1bf 100644 --- a/vendor/yunion.io/x/onecloud/pkg/hostman/system_service/telegraf.go +++ b/vendor/yunion.io/x/onecloud/pkg/hostman/system_service/telegraf.go @@ -17,6 +17,7 @@ package system_service import ( "context" "fmt" + "net/url" "sort" "strings" @@ -28,6 +29,14 @@ import ( "yunion.io/x/onecloud/pkg/util/procutils" ) +const ( + TELEGRAF_INPUT_RADEONTOP = "radeontop" + TELEGRAF_INPUT_RADEONTOP_DEV_PATHS = "device_paths" + TELEGRAF_INPUT_CONF_BIN_PATH = "bin_path" + TELEGAF_INPUT_NETDEV = "ni_rsrc_mon" + TELEGAF_INPUT_VASMI = "vasmi" +) + type STelegraf struct { *SBaseSystemService } @@ -103,19 +112,116 @@ func (s *STelegraf) GetConfig(kwargs map[string]interface{}) string { conf += " timeout = \"30s\"\n" conf += "\n" } + /* + * + * [[outputs.kafka]] + * ## URLs of kafka brokers + * brokers = ["localhost:9092"] + * ## Kafka topic for producer messages + * topic = "telegraf" + * ## Optional SASL Config + * sasl_username = "kafka" + * sasl_password = "secret" + * ## Optional SASL: + * ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + * ## (defaults to PLAIN) + * sasl_mechanism = "PLAIN" + */ + if kafka, ok := kwargs["kafka"]; ok { + kafkaConf, _ := kafka.(map[string]interface{}) + conf += "[[outputs.kafka]]\n" + for _, k := range []string{ + "brokers", + "topic", + "sasl_username", + "sasl_password", + "sasl_mechanism", + } { + if val, ok := kafkaConf[k]; ok { + if k == "brokers" { + brokers, _ := val.([]string) + for i := range brokers { + brokers[i] = fmt.Sprintf("\"%s\"", brokers[i]) + } + conf += fmt.Sprintf(" brokers = [%s]\n", strings.Join(brokers, ", ")) + } else { + conf += fmt.Sprintf(" %s = \"%s\"\n", k, val) + } + } + } + conf += " compression_codec = 0\n" + conf += " required_acks = -1\n" + conf += " max_retry = 3\n" + conf += " data_format = \"json\"\n" + conf += " json_timestamp_units = \"1ms\"\n" + conf += " routing_tag = \"host\"\n" + conf += "\n" + } + /* + * [[outputs.opentsdb]] + * host = "http://127.0.0.1" + * port = 17000 + * http_batch_size = 50 + * http_path = "/opentsdb/put" + * debug = false + * separator = "_" + */ + if opentsdb, ok := kwargs["opentsdb"]; ok { + opentsdbConf, _ := opentsdb.(map[string]interface{}) + urlstr := opentsdbConf["url"].(string) + urlParts, err := url.Parse(urlstr) + if err != nil { + log.Errorf("malformed opentsdb url: %s: %s", urlstr, err) + } else { + port := urlParts.Port() + if len(port) == 0 { + if urlParts.Scheme == "http" { + port = "80" + } else if urlParts.Scheme == "https" { + port = "443" + } + } + conf += "[[outputs.opentsdb]]\n" + conf += fmt.Sprintf(" host = \"%s://%s\"\n", urlParts.Scheme, urlParts.Hostname()) + conf += fmt.Sprintf(" port = %s\n", urlParts.Port()) + conf += " http_batch_size = 50\n" + conf += fmt.Sprintf(" http_path = \"%s\"\n", urlParts.Path) + conf += " debug = false\n" + conf += " separator = \"_\"\n" + conf += "\n" + } + } conf += "[[inputs.cpu]]\n" - conf += " percpu = false\n" + conf += " percpu = true\n" conf += " totalcpu = true\n" conf += " collect_cpu_time = false\n" conf += " report_active = true\n" conf += "\n" conf += "[[inputs.disk]]\n" - conf += " ignore_mount_points = [\"/etc/telegraf\", \"/etc/hosts\", \"/etc/hostname\", \"/etc/resolv.conf\", \"/dev/termination-log\"]\n" - conf += " ignore_fs = [\"tmpfs\", \"devtmpfs\", \"overlay\", \"squashfs\", \"iso9660\", \"rootfs\", \"hugetlbfs\", \"autofs\"]\n" + ignoreMountPoints := []string{ + "/etc/telegraf", + "/etc/hosts", + "/etc/hostname", + "/etc/resolv.conf", + "/dev/termination-log", + } + for i := range ignoreMountPoints { + ignoreMountPoints[i] = fmt.Sprintf("%q", ignoreMountPoints[i]) + } + ignorePathSegments := []string{ + "/run/k3s/containerd/", + } + ignorePathSegments = append(ignorePathSegments, kwargs["server_path"].(string)) + for i := range ignorePathSegments { + ignorePathSegments[i] = fmt.Sprintf("%q", ignorePathSegments[i]) + } + conf += " ignore_mount_points = [" + strings.Join(ignoreMountPoints, ", ") + "]\n" + conf += " ignore_path_segments = [" + strings.Join(ignorePathSegments, ", ") + "]\n" + conf += " ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"squashfs\", \"iso9660\", \"rootfs\", \"hugetlbfs\", \"autofs\", \"aufs\"]\n" conf += "\n" conf += "[[inputs.diskio]]\n" conf += " skip_serial_number = false\n" - conf += " excludes = \"^nbd\"\n" + conf += " excludes = \"^(nbd|loop)\"\n" conf += "\n" conf += "[[inputs.kernel]]\n" conf += "\n" @@ -132,6 +238,8 @@ func (s *STelegraf) GetConfig(kwargs map[string]interface{}) string { conf += "[[inputs.smart]]\n" conf += " path=\"/usr/sbin/smartctl\"\n" conf += "\n" + conf += "[[inputs.sensors]]\n" + conf += "\n" conf += "[[inputs.net]]\n" if nics, ok := kwargs["nics"]; ok { ns, _ := nics.([]map[string]interface{}) @@ -157,6 +265,10 @@ func (s *STelegraf) GetConfig(kwargs map[string]interface{}) string { } conf += "[[inputs.netstat]]\n" conf += "\n" + conf += "[[inputs.bond]]\n" + conf += "\n" + conf += "[[inputs.temp]]\n" + conf += "\n" conf += "[[inputs.nstat]]\n" conf += "\n" conf += "[[inputs.ntpq]]\n" @@ -171,6 +283,8 @@ func (s *STelegraf) GetConfig(kwargs map[string]interface{}) string { conf += "[[inputs.internal]]\n" conf += " collect_memstats = false\n" conf += "\n" + conf += "[[inputs.linux_sysctl_fs]]\n" + conf += "\n" conf += "[[inputs.http_listener_v2]]\n" conf += " service_address = \"127.0.0.1:8087\"\n" conf += " path = \"/write\"\n" @@ -187,6 +301,33 @@ func (s *STelegraf) GetConfig(kwargs map[string]interface{}) string { conf += " keep_field_names = true\n" conf += "\n" } + + if radontop, ok := kwargs[TELEGRAF_INPUT_RADEONTOP]; ok { + radontopMap, _ := radontop.(map[string]interface{}) + devPaths := radontopMap[TELEGRAF_INPUT_RADEONTOP_DEV_PATHS].([]string) + devPathStr := make([]string, len(devPaths)) + for i, devPath := range devPaths { + devPathStr[i] = fmt.Sprintf("\"%s\"", devPath) + } + conf += fmt.Sprintf("[[inputs.%s]]\n", TELEGRAF_INPUT_RADEONTOP) + conf += fmt.Sprintf(" bin_path = \"%s\"\n", radontopMap[TELEGRAF_INPUT_CONF_BIN_PATH].(string)) + conf += fmt.Sprintf(" %s = [%s]\n", TELEGRAF_INPUT_RADEONTOP_DEV_PATHS, strings.Join(devPathStr, ", ")) + conf += "\n" + } + + if netdev, ok := kwargs[TELEGAF_INPUT_NETDEV]; ok { + netdevMap, _ := netdev.(map[string]interface{}) + conf += fmt.Sprintf("[[inputs.%s]]\n", TELEGAF_INPUT_NETDEV) + conf += fmt.Sprintf(" bin_path = \"%s\"\n", netdevMap[TELEGRAF_INPUT_CONF_BIN_PATH].(string)) + conf += "\n" + } + + if vasmi, ok := kwargs[TELEGAF_INPUT_VASMI]; ok { + vasmiMap, _ := vasmi.(map[string]interface{}) + conf += fmt.Sprintf("[[inputs.%s]]\n", TELEGAF_INPUT_VASMI) + conf += fmt.Sprintf(" bin_path = \"%s\"\n", vasmiMap[TELEGRAF_INPUT_CONF_BIN_PATH].(string)) + conf += "\n" + } return conf } diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/compute/mod_metadatas.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/compute/mod_metadatas.go index 46746023..277dd3cb 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/compute/mod_metadatas.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/compute/mod_metadatas.go @@ -16,7 +16,6 @@ package compute import ( "fmt" - "strings" "yunion.io/x/jsonutils" "yunion.io/x/pkg/util/printutils" @@ -78,22 +77,21 @@ func (this *MetadataManager) getModule(session *mcclient.ClientSession, params j } if len(resources) >= 1 { resource := resources[0] - keyString := resource + "s" - if strings.HasSuffix(resource, "y") && resource != NatGateways.GetKeyword() { - keyString = resource[:len(resource)-1] + "ies" - } find := false + keyStrings := []string{resource, resource + "s", resource + "ies"} mods, _ := modulebase.GetRegisterdModules() - if utils.IsInStringArray(keyString, mods) { - mod, err := modulebase.GetModule(session, keyString) - if err != nil { - return nil, err + for _, keyString := range keyStrings { + if utils.IsInStringArray(keyString, mods) { + mod, err := modulebase.GetModule(session, keyString) + if err == nil { + service = mod.ServiceType() + find = true + break + } } - service = mod.ServiceType() - find = true } if !find { - return nil, fmt.Errorf("No such module %s", keyString) + return nil, fmt.Errorf("No such module %s", resource) } } } diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/identity/mod_credentials.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/identity/mod_credentials.go index 57f3792c..21b81e95 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/identity/mod_credentials.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/identity/mod_credentials.go @@ -490,6 +490,21 @@ func (manager *SCredentialManager) CreateTotpSecret(s *mcclient.ClientSession, u return totp.Totp, nil } +func (manager *SCredentialManager) CreateContainerImageSecret(s *mcclient.ClientSession, projectId string, name string, blob *api.CredentialContainerImageBlob) (jsonutils.JSONObject, error) { + blobJson := jsonutils.Marshal(blob) + input := &api.CredentialCreateInput{ + Type: api.CONTAINER_IMAGE_TYPE, + ProjectId: projectId, + Blob: blobJson.String(), + } + input.Name = name + obj, err := manager.Create(s, jsonutils.Marshal(input)) + if err != nil { + return nil, errors.Wrap(err, "CreateContainerImageSecret") + } + return obj, nil +} + func (manager *SCredentialManager) SaveRecoverySecrets(s *mcclient.ClientSession, uid string, questions []SRecoverySecret) error { _, err := manager.GetRecoverySecrets(s, uid) if err == nil { diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/alert.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/alert.go index 5b4f4948..c78fbb60 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/alert.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/alert.go @@ -52,7 +52,7 @@ type SNotificationManager struct { func NewNotificationManager() *SNotificationManager { man := modules.NewMonitorV2Manager( "alert_notification", "alert_notifications", - []string{"id", "name", "type", "is_default", "disable_resolve_message", "send_reminder", "settings"}, + []string{"id", "name", "type", "is_default", "disable_resolve_message", "send_reminder", "frequency", "settings"}, []string{}) return &SNotificationManager{ ResourceManager: &man, @@ -65,7 +65,7 @@ type SAlertnotificationManager struct { func NewAlertnotificationManager() *SAlertnotificationManager { man := modules.NewJointMonitorV2Manager("alertnotification", "alertnotifications", - []string{"Alert_ID", "Alert", "Notification_ID", "Notification", "Used_by", "State"}, + []string{"Alert_ID", "Alert", "Notification_ID", "Notification", "Used_by", "State", "Frequency"}, []string{}, Alerts, Notifications) return &SAlertnotificationManager{&man} diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/helper.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/helper.go index 09919b69..26b29a15 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/helper.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/monitor/helper.go @@ -282,10 +282,11 @@ type AlertQuery struct { policy string resultFormat string - selects *AlertQuerySelects - where *AlertQueryWhere - groupBy *AlertQueryGroupBy - resultReducer *monitor.Condition + selects *AlertQuerySelects + where *AlertQueryWhere + groupBy *AlertQueryGroupBy + resultReducer *monitor.Condition + resultReducerOrder monitor.ResultReducerOrder } func NewAlertQuery(database string, measurement string) *AlertQuery { @@ -404,6 +405,11 @@ func (q *AlertQuery) Reducer(rType string, params []float64) *AlertQuery { return q } +func (q *AlertQuery) ReducerOrder(rType monitor.ResultReducerOrder) *AlertQuery { + q.resultReducerOrder = rType + return q +} + type AlertQuerySelects struct { parts []*AlertQuerySelect } @@ -675,8 +681,9 @@ func (input *MetricQueryInput) ToQueryData() *monitor.MetricQueryInput { } data.MetricQuery = []*monitor.AlertQuery{ { - Model: input.query.ToMetricQuery(), - ResultReducer: input.query.resultReducer, + Model: input.query.ToMetricQuery(), + ResultReducer: input.query.resultReducer, + ResultReducerOrder: input.query.resultReducerOrder, }, } diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/scheduler/mod_scheduler.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/scheduler/mod_scheduler.go index 24618a7e..a76b0f47 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/scheduler/mod_scheduler.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/modules/scheduler/mod_scheduler.go @@ -21,6 +21,7 @@ import ( "yunion.io/x/jsonutils" api "yunion.io/x/onecloud/pkg/apis/scheduler" + "yunion.io/x/onecloud/pkg/httperrors" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/onecloud/pkg/mcclient/auth" "yunion.io/x/onecloud/pkg/mcclient/modulebase" @@ -118,6 +119,18 @@ func (this *SchedulerManager) DoForecast(s *mcclient.ClientSession, params jsonu return obj, err } +func (this *SchedulerManager) DoHistoryList(s *mcclient.ClientSession, params jsonutils.JSONObject) (jsonutils.JSONObject, error) { + return this.HistoryList(s, params) +} + +func (this *SchedulerManager) DoHistoryShow(s *mcclient.ClientSession, params jsonutils.JSONObject) (jsonutils.JSONObject, error) { + sessionId, err := params.GetString("session_id") + if err != nil { + return nil, httperrors.NewNotFoundError("session_id") + } + return this.HistoryShow(s, sessionId, params) +} + func (this *SchedulerManager) Cleanup(s *mcclient.ClientSession, params jsonutils.JSONObject) (jsonutils.JSONObject, error) { url := newSchedURL("cleanup") return modulebase.Post(this.ResourceManager, s, url, params, "") diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/options/base.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/options/base.go index 6646c1b0..da6a48f0 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/options/base.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/options/base.go @@ -245,7 +245,7 @@ type BaseListOptions struct { Manager []string `help:"List objects belonging to the cloud provider" json:"manager,omitempty"` Account string `help:"List objects belonging to the cloud account" json:"account,omitempty"` - Provider []string `help:"List objects from the provider" choices:"OneCloud|VMware|Aliyun|Apsara|Qcloud|Azure|Aws|Huawei|OpenStack|Ucloud|VolcEngine|ZStack|Google|Ctyun|Cloudpods|Nutanix|BingoCloud|IncloudSphere|JDcloud|Proxmox|Ceph|CephFS|Ecloud|HCSO|HCS|HCSOP|H3C|S3|RemoteFile|Ksyun|Baidu|QingCloud|OracleCloud|SangFor" json:"provider,omitempty"` + Provider []string `help:"List objects from the provider" choices:"OneCloud|VMware|Aliyun|Apsara|Qcloud|Azure|Aws|Huawei|OpenStack|Ucloud|VolcEngine|ZStack|Google|Ctyun|Cloudpods|Nutanix|BingoCloud|IncloudSphere|JDcloud|Proxmox|Ceph|CephFS|Ecloud|HCSO|HCS|HCSOP|H3C|S3|RemoteFile|Ksyun|Baidu|QingCloud|OracleCloud|SangFor|ZettaKit|UIS" json:"provider,omitempty"` Brand []string `help:"List objects belonging to a special brand"` CloudEnv string `help:"Cloud environment" choices:"public|private|onpremise|private_or_onpremise" json:"cloud_env,omitempty"` PublicCloud *bool `help:"List objects belonging to public cloud" json:"public_cloud"` @@ -262,6 +262,8 @@ type BaseListOptions struct { Id []string `help:"filter by id"` // Name []string `help:"fitler by name"` + + Status []string `help:"filter by status"` } func (opts *BaseListOptions) addTag(keyPrefix, tagstr string, idx int, params *jsonutils.JSONDict) error { diff --git a/vendor/yunion.io/x/onecloud/pkg/mcclient/options/webconsole.go b/vendor/yunion.io/x/onecloud/pkg/mcclient/options/webconsole.go index d8b43131..8d6632ac 100644 --- a/vendor/yunion.io/x/onecloud/pkg/mcclient/options/webconsole.go +++ b/vendor/yunion.io/x/onecloud/pkg/mcclient/options/webconsole.go @@ -108,3 +108,8 @@ type WebConsoleServerRdpOptions struct { Height *int Dpi *int } + +type WebConsoleContainerExecOptions struct { + WebConsoleOptions + ID string `help:"Container id or name"` +} diff --git a/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/models.go b/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/models.go index f8c39fd0..8cc93f63 100644 --- a/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/models.go +++ b/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/models.go @@ -88,13 +88,13 @@ func NewQueryResult() *QueryResult { func FormatRawName(idx int, name string, groupByTags []string, tags map[string]string, diffTagKeys sets.String) string { // when group by tag specified - if len(groupByTags) != 0 { + /*if len(groupByTags) != 0 { for key, val := range tags { if strings.Contains(strings.Join(groupByTags, ","), key) { return val } } - } + }*/ genHint := func(k, v string) string { return fmt.Sprintf("%s=%s", k, v) diff --git a/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/query_endpoint.go b/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/query_endpoint.go index 80c5af09..52977bad 100644 --- a/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/query_endpoint.go +++ b/vendor/yunion.io/x/onecloud/pkg/monitor/tsdb/query_endpoint.go @@ -25,6 +25,8 @@ import ( type TsdbQueryEndpoint interface { Query(ctx context.Context, ds *DataSource, query *TsdbQuery) (*Response, error) FilterMeasurement(ctx context.Context, ds *DataSource, from, to string, ms *monitor.InfluxMeasurement, tagFilter *monitor.MetricQueryTag) (*monitor.InfluxMeasurement, error) + FillSelect(query *monitor.AlertQuery, isAlert bool) *monitor.AlertQuery + FillGroupBy(query *monitor.AlertQuery, inputQuery *monitor.MetricQueryInput, tagId string, isAlert bool) *monitor.AlertQuery } var registry map[string]GetTsdbQueryEndpointFn diff --git a/vendor/yunion.io/x/onecloud/pkg/util/fileutils2/fileutils.go b/vendor/yunion.io/x/onecloud/pkg/util/fileutils2/fileutils.go index 0fb95776..30e81e01 100644 --- a/vendor/yunion.io/x/onecloud/pkg/util/fileutils2/fileutils.go +++ b/vendor/yunion.io/x/onecloud/pkg/util/fileutils2/fileutils.go @@ -152,6 +152,10 @@ func GetAllBlkdevsIoSchedulers() ([]string, error) { return nil, errors.Wrap(err, "ioutil.ReadDir(/sys/block)") } for _, b := range blockDevs { + // check is a block device + if !Exists(path.Join("/sys/block", b.Name(), "device")) { + continue + } if IsBlockDevMounted(b.Name()) { conf, err := GetBlkdevConfig(b.Name(), "queue/scheduler") if err != nil { @@ -186,10 +190,63 @@ func ChangeAllBlkdevsParams(params map[string]string) { return } for _, b := range blockDevs { - if IsBlockDevMounted(b.Name()) { - for k, v := range params { - ChangeBlkdevParameter(b.Name(), k, v) - } + if !Exists(path.Join("/sys/block", b.Name(), "device")) { + continue + } + for k, v := range params { + ChangeBlkdevParameter(b.Name(), k, v) + } + } + } +} + +func BlockDevIsSsd(dev string) bool { + rotational := path.Join("/sys/block", dev, "queue", "rotational") + res, err := FileGetContents(rotational) + if err != nil { + log.Errorf("FileGetContents fail %s %s", rotational, err) + return false + } + return strings.TrimSpace(res) == "0" +} + +func ChangeSsdBlkdevsParams(params map[string]string) { + if _, err := os.Stat("/sys/block"); !os.IsNotExist(err) { + blockDevs, err := ioutil.ReadDir("/sys/block") + if err != nil { + log.Errorf("ReadDir /sys/block error: %s", err) + return + } + for _, b := range blockDevs { + if !Exists(path.Join("/sys/block", b.Name(), "device")) { + continue + } + if !BlockDevIsSsd(b.Name()) { + continue + } + for k, v := range params { + ChangeBlkdevParameter(b.Name(), k, v) + } + } + } +} + +func ChangeHddBlkdevsParams(params map[string]string) { + if _, err := os.Stat("/sys/block"); !os.IsNotExist(err) { + blockDevs, err := ioutil.ReadDir("/sys/block") + if err != nil { + log.Errorf("ReadDir /sys/block error: %s", err) + return + } + for _, b := range blockDevs { + if !Exists(path.Join("/sys/block", b.Name(), "device")) { + continue + } + if BlockDevIsSsd(b.Name()) { + continue + } + for k, v := range params { + ChangeBlkdevParameter(b.Name(), k, v) } } } diff --git a/vendor/yunion.io/x/onecloud/pkg/util/logclient/consts.go b/vendor/yunion.io/x/onecloud/pkg/util/logclient/consts.go index 7c7b8147..09fa83d5 100644 --- a/vendor/yunion.io/x/onecloud/pkg/util/logclient/consts.go +++ b/vendor/yunion.io/x/onecloud/pkg/util/logclient/consts.go @@ -292,4 +292,7 @@ const ( ACT_TRANSFERRED_REJECTED = "trans_rejected" ACT_ADD_RATE = "add_rate" ACT_REMOVE_RATE = "remove_rate" + + ACT_CLONE = "clone" + ACT_REBUILD = "rebuild" ) diff --git a/vendor/yunion.io/x/onecloud/pkg/util/pod/cgroup.go b/vendor/yunion.io/x/onecloud/pkg/util/pod/cgroup.go index 704760c3..021e322f 100644 --- a/vendor/yunion.io/x/onecloud/pkg/util/pod/cgroup.go +++ b/vendor/yunion.io/x/onecloud/pkg/util/pod/cgroup.go @@ -32,6 +32,7 @@ type CgroupUtil interface { SetCPUCfs(ctrId string, quota int64, period int64) error SetDevicesAllow(ctrId string, allows []string) error SetPidsMax(ctrId string, max int) error + SetCpusetCloneChildren(ctrId string) error } type podCgroupV1Util struct { @@ -92,3 +93,8 @@ func (p podCgroupV1Util) SetPidsMax(ctrId string, max int) error { pidFp := p.getContainerCGFilePath("pids", ctrId, "pids.max") return p.write(pidFp, fmt.Sprintf("%d", max)) } + +func (p podCgroupV1Util) SetCpusetCloneChildren(ctrId string) error { + ccFp := p.getContainerCGFilePath("cpuset", ctrId, "cgroup.clone_children") + return p.write(ccFp, "1") +} diff --git a/vendor/yunion.io/x/onecloud/pkg/util/pod/pod.go b/vendor/yunion.io/x/onecloud/pkg/util/pod/pod.go index 739ba382..06b4368a 100644 --- a/vendor/yunion.io/x/onecloud/pkg/util/pod/pod.go +++ b/vendor/yunion.io/x/onecloud/pkg/util/pod/pod.go @@ -17,6 +17,8 @@ package pod import ( "context" "fmt" + "path/filepath" + "strconv" "strings" "time" @@ -24,8 +26,11 @@ import ( "google.golang.org/grpc/credentials/insecure" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" + + "yunion.io/x/onecloud/pkg/util/procutils" ) type CRI interface { @@ -36,7 +41,7 @@ type CRI interface { RemovePod(ctx context.Context, podId string) error CreateContainer(ctx context.Context, podId string, podConfig *runtimeapi.PodSandboxConfig, ctrConfig *runtimeapi.ContainerConfig, withPull bool) (string, error) StartContainer(ctx context.Context, id string) error - StopContainer(ctx context.Context, ctrId string, timeout int64) error + StopContainer(ctx context.Context, ctrId string, timeout int64, tryRemove bool, force bool) error RemoveContainer(ctx context.Context, ctrId string) error RunContainers(ctx context.Context, podConfig *runtimeapi.PodSandboxConfig, containerConfigs []*runtimeapi.ContainerConfig, runtimeHandler string) (*RunContainersResponse, error) ListContainers(ctx context.Context, opts ListContainerOptions) ([]*runtimeapi.Container, error) @@ -175,6 +180,7 @@ func (c crictl) PullImageWithSandbox(ctx context.Context, image string, auth *ru func (c crictl) CreateContainer(ctx context.Context, podId string, podConfig *runtimeapi.PodSandboxConfig, ctrConfig *runtimeapi.ContainerConfig, withPull bool) (string, error) { + req := &runtimeapi.CreateContainerRequest{ PodSandboxId: podId, Config: ctrConfig, @@ -321,6 +327,28 @@ func (c crictl) ListPods(ctx context.Context, opts ListPodOptions) ([]*runtimeap } func (c crictl) RemovePod(ctx context.Context, podId string) error { + maxTries := 10 + interval := 5 * time.Second + errs := []error{} + for tries := 0; tries < maxTries; tries++ { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + err := c.removePod(ctx, podId) + if err == nil { + return nil + } + if strings.Contains(err.Error(), "code = NotFound") { + return nil + } + dur := interval * time.Duration(tries+1) + log.Warningf("try to remove pod %s after %s: %v", podId, dur, err) + errs = append(errs, errors.Wrapf(err, "try %d", tries)) + time.Sleep(dur) + } + return errors.NewAggregate(errs) +} + +func (c crictl) removePod(ctx context.Context, podId string) error { if _, err := c.GetRuntimeClient().RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{ PodSandboxId: podId, }); err != nil { @@ -329,7 +357,65 @@ func (c crictl) RemovePod(ctx context.Context, podId string) error { return nil } -func (c crictl) StopContainer(ctx context.Context, ctrId string, timeout int64) error { +func (c crictl) stopContainerWithRetry(ctx context.Context, ctrId string, timeout int64, maxTries int) error { + interval := 5 * time.Second + errs := []error{} + for tries := 0; tries < maxTries; tries++ { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err := c.stopContainer(ctx, ctrId, timeout) + if err == nil { + return nil + } + if strings.Contains(err.Error(), "code = NotFound") { + return nil + } + dur := interval * time.Duration(tries+1) + log.Warningf("try to restop container %s after %s, timeout: %d: %v", ctrId, dur, timeout, err) + // set timeout to 0 to stop forcely + timeout = 0 + errs = append(errs, errors.Wrapf(err, "try %d", tries)) + time.Sleep(dur) + } + return errors.NewAggregate(errs) +} + +func (c crictl) StopContainer(ctx context.Context, ctrId string, timeout int64, tryRemove bool, force bool) error { + errs := []error{} + isStopped := false + if force { + if err := c.forceKillContainer(ctx, ctrId); err != nil { + log.Infof("force kill container %s error: %v", ctrId, err) + errs = append(errs, errors.Wrap(err, "forceKillContainer")) + } else { + isStopped = true + } + } else { + maxTries := 5 + if err := c.stopContainerWithRetry(ctx, ctrId, timeout, maxTries); err != nil { + errs = append(errs, errors.Wrap(err, "stopContainer")) + } else { + isStopped = true + } + } + if tryRemove { + // try force remove container + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + if err := c.RemoveContainer(ctx, ctrId); err != nil { + errs = append(errs, errors.Wrapf(err, "try remove container %s", ctrId)) + } else { + return nil + } + } + if isStopped { + return nil + } + return errors.NewAggregate(errs) +} + +func (c crictl) stopContainer(ctx context.Context, ctrId string, timeout int64) error { if _, err := c.GetRuntimeClient().StopContainer(ctx, &runtimeapi.StopContainerRequest{ ContainerId: ctrId, Timeout: timeout, @@ -339,6 +425,54 @@ func (c crictl) StopContainer(ctx context.Context, ctrId string, timeout int64) return nil } +func (c crictl) forceKillContainer(ctx context.Context, ctrId string) error { + cs, err := c.containerStatus(ctx, ctrId, true) + if err != nil { + return errors.Wrap(err, "get containerStatus") + } + info := cs.GetInfo() + infoStr := info["info"] + if infoStr == "" { + return errors.Errorf("empty info: %s", infoStr) + } + infoObj, err := jsonutils.ParseString(infoStr) + if err != nil { + return errors.Wrapf(err, "invalid info: %s", infoStr) + } + pidInt, err := infoObj.Int("pid") + if err != nil { + return errors.Wrapf(err, "get pid from %s", infoObj) + } + pid := fmt.Sprintf("%d", pidInt) + // get ppid + pStatusFile := filepath.Join("/proc", pid, "task", pid, "status") + out, err := procutils.NewRemoteCommandAsFarAsPossible("sh", "-c", fmt.Sprintf("cat %s | grep PPid: | awk '{print $2}'", pStatusFile)).Output() + if err != nil { + return errors.Wrapf(err, "get ppid from %s, out: %s", pStatusFile, out) + } + ppidStr := strings.TrimSpace(string(out)) + ppid, err := strconv.Atoi(ppidStr) + if err != nil { + return errors.Wrapf(err, "invalid ppid str %s from %s", ppidStr, pStatusFile) + } + ppCmdlineFile := filepath.Join("/proc", ppidStr, "cmdline") + ppCmdline, err := procutils.NewRemoteCommandAsFarAsPossible("cat", ppCmdlineFile).Output() + if err != nil { + return errors.Wrapf(err, "get cmdline from %s, out: %s", ppCmdlineFile, ppCmdline) + } + log.Infof("try to kill container %s, pid %s parent process(%d): %s", ctrId, pid, ppid, ppCmdline) + killOut, err := procutils.NewRemoteCommandAsFarAsPossible("kill", "-9", ppidStr).Output() + if err != nil { + killErr := errors.Wrapf(err, "kill -9 %s, out: %s", ppidStr, killOut) + log.Errorf("kill container %s, pid %s parent process(%d): %s, error: %v", ctrId, pid, ppid, ppCmdline, killErr) + return killErr + } + if err := c.stopContainerWithRetry(ctx, ctrId, 0, 5); err != nil { + return errors.Wrapf(err, "stop container %s after kill parent process", ctrId) + } + return nil +} + func (c crictl) RemoveContainer(ctx context.Context, ctrId string) error { _, err := c.GetRuntimeClient().RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ ContainerId: ctrId, @@ -350,9 +484,13 @@ func (c crictl) RemoveContainer(ctx context.Context, ctrId string) error { } func (c crictl) ContainerStatus(ctx context.Context, ctrId string) (*runtimeapi.ContainerStatusResponse, error) { + return c.containerStatus(ctx, ctrId, false) +} + +func (c crictl) containerStatus(ctx context.Context, ctrId string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { req := &runtimeapi.ContainerStatusRequest{ ContainerId: ctrId, - Verbose: false, + Verbose: verbose, } return c.GetRuntimeClient().ContainerStatus(ctx, req) } diff --git a/vendor/yunion.io/x/onecloud/pkg/util/pod/securitycontext.go b/vendor/yunion.io/x/onecloud/pkg/util/pod/securitycontext.go new file mode 100644 index 00000000..2a8b4165 --- /dev/null +++ b/vendor/yunion.io/x/onecloud/pkg/util/pod/securitycontext.go @@ -0,0 +1,57 @@ +// Copyright 2019 Yunion +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pod + +import "yunion.io/x/onecloud/pkg/apis" + +// https://github.com/kubernetes/kubernetes/blob/release-1.26/pkg/securitycontext/util.go#L213-L236 +var ( + // These *must* be kept in sync with moby/moby. + // https://github.com/moby/moby/blob/master/oci/defaults.go#L116-L134 + // @jessfraz will watch changes to those files upstream. + defaultMaskedPaths = []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + } + defaultReadonlyPaths = []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } +) + +func GetDefaultMaskedPaths(unmasks apis.ContainerProcMountType) []string { + if unmasks == apis.ContainerUnmaskedProcMount { + return []string{} + } + return defaultMaskedPaths +} + +func GetReadonlyPaths(unmasks apis.ContainerProcMountType) []string { + if unmasks == apis.ContainerUnmaskedProcMount { + return []string{} + } + return defaultReadonlyPaths +} diff --git a/vendor/yunion.io/x/onecloud/pkg/util/sysutils/nics.go b/vendor/yunion.io/x/onecloud/pkg/util/sysutils/nics.go index 7abf0dd0..7b182205 100644 --- a/vendor/yunion.io/x/onecloud/pkg/util/sysutils/nics.go +++ b/vendor/yunion.io/x/onecloud/pkg/util/sysutils/nics.go @@ -71,7 +71,17 @@ func Nics() ([]*types.SNicDevInfo, error) { if carrier == "1" { up = true } - mac, _ := net.ParseMAC(GetSysConfigQuiet(filepath.Join(netPath, "address"))) + macStr := GetSysConfigQuiet(filepath.Join(netPath, "address")) + permMacStr := GetSysConfigQuiet(filepath.Join(netPath, "bonding_slave/perm_hwaddr")) + var mac net.HardwareAddr + if len(permMacStr) > 0 { + mac, _ = net.ParseMAC(permMacStr) + } else if len(macStr) > 0 { + mac, _ = net.ParseMAC(macStr) + } else { + // no valid mac address + continue + } mtuStr := GetSysConfigQuiet(filepath.Join(netPath, "mtu")) mtu := 0 if len(mtuStr) > 0 {