diff --git a/.gitignore b/.gitignore index 9bc44df588..302416b0f4 100644 --- a/.gitignore +++ b/.gitignore @@ -5,24 +5,13 @@ .idea/* -# command binaries -/cmd/agentctl/agentctl -/cmd/vpp-agent/vpp-agent -/cmd/vpp-agent-ctl/vpp-agent-ctl - -# example binaries -/examples/govpp_call/govpp_call -/examples/grpc_vpp/notifications/notifications -/examples/grpc_vpp/remote_client/remote_client -/examples/idx_bd_cache/idx_bd_cache -/examples/idx_iface_cache/idx_iface_cache -/examples/idx_mapping_lookup/idx_mapping_lookup -/examples/idx_mapping_watcher/idx_mapping_watcher -/examples/idx_veth_cache/idx_veth_cache -/examples/localclient_linux/tap/tap -/examples/localclient_linux/veth/veth -/examples/localclient_vpp/nat/nat -/examples/localclient_vpp/plugins/plugins +# ignore executables (extensionless files) +/cmd/**/* +!/cmd/**/ +!/cmd/**/*.* +/examples/**/* +!/examples/**/ +!/examples/**/*.* # test results /tests/perf/log diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e57af2322..b502ca6094 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,10 +96,10 @@ Cn-infra 1.5 brought new and better flavors and it would be a shame not to implement it in the vpp-agent. The old flavors package was removed and replaced with this new concept, - visible in [app package vpp-agent](app/vpp_agent.go). All examples were also updated. + visible in [app package vpp-agent](cmd/vpp-agent/app/vpp_agent.go). All examples were also updated. ## Breaking Changes -- Flavors were replaced with [new way](app) of managing plugins. +- Flavors were replaced with [new way](cmd/vpp-agent/app) of managing plugins. - REST interface URLs were changed, see [readme](plugins/rest/README.md) for complete list. ## New Features @@ -587,7 +587,7 @@ Ability to extend the behavior of the VPP Agent by creating new plugins on top o New plugins can access API for configured: [VPP Network interfaces](plugins/vpp/ifplugin/ifaceidx), [Bridge domains](plugins/vpp/l2plugin/l2idx) and [VETHs](plugins/linux/ifplugin/ifaceidx) -based on [idxvpp](idxvpp) threadsafe map tailored for VPP data +based on [idxvpp](pkg/idxvpp) threadsafe map tailored for VPP data with advanced features (multiple watchers, secondary indexes). VPP Agent is embeddable in different software projects and with different systems diff --git a/Gopkg.lock b/Gopkg.lock index 79192736ab..46a70f94e1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -30,12 +30,20 @@ revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" [[projects]] - digest = "1:f9ae348e1f793dcf9ed930ed47136a67343dbd6809c5c91391322267f4476892" + digest = "1:ed77032e4241e3b8329c9304d66452ed196e795876e14be677a546f36b94e67a" + name = "github.com/DataDog/zstd" + packages = ["."] + pruneopts = "UT" + revision = "c7161f8c63c045cbc7ca051dcc969dd0e4054de2" + version = "v1.3.5" + +[[projects]] + digest = "1:bf42be3cb1519bf8018dfd99720b1005ee028d947124cab3ccf965da59381df6" name = "github.com/Microsoft/go-winio" packages = ["."] pruneopts = "UT" - revision = "97e4973ce50b2ff5f09635a57e2b88a037aae829" - version = "v0.4.11" + revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" + version = "v0.4.7" [[projects]] branch = "master" @@ -46,39 +54,31 @@ revision = "cd527374f1e5bff4938207604a14f2e38a9cf512" [[projects]] - digest = "1:b6957a1836b6d7e51e5a71391f5c08fa5d866f57e4ac425fa5f183d518a5657b" + digest = "1:59ced12f3862e56e91115f7f24969db8b609b0a6355063b8d1355aceb52d5f14" name = "github.com/Shopify/sarama" packages = [ ".", "mocks", ] pruneopts = "UT" - revision = "ec843464b50d4c8b56403ec9d589cf41ea30e722" - version = "v1.19.0" + revision = "03a43f93cd29dc549e6d9b11892795c206f9c38c" + version = "v1.20.1" [[projects]] branch = "master" - digest = "1:fb728f8e2bf32598af96a8ff2d4c9cb394c3853a851dbf72c0b88d3c9887127f" + digest = "1:ec2d85f88fafc85b18d543d3435dbec6a4f895017ce73d45ae7c7fff2b57c29a" name = "github.com/bennyscetbun/jsongo" packages = ["."] pruneopts = "UT" - revision = "88b16279eead33f9fb0749ee13858472cf01b25c" + revision = "a97f4e906f8da60776250125ca8531adfd8ec44f" [[projects]] branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + digest = "1:5bb36304653e73c2ced864d49c9f344e7141a7ceef852442edcea212094ebc3c" name = "github.com/beorn7/perks" packages = ["quantile"] pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - digest = "1:965fa961ce7d2a2b0586ff1510c7268d9a8db78c0f08e15c3510ce7cceb963a9" - name = "github.com/boltdb/bolt" - packages = ["."] - pruneopts = "UT" - revision = "fd01fc79c553a8e99d512a07e8e0c63d4a3ccfc5" + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" [[projects]] digest = "1:7b81d2ed76bf960333a8020c4b8c22abd6072f0b54ad31c66e90e6a17a19315a" @@ -98,27 +98,27 @@ [[projects]] branch = "master" - digest = "1:c307799fe01c92c8049378347b93b658a0f3be2d10a5dab8cc0798f45ee6759a" + digest = "1:c3806bfe3d2c9baa0d4ebd90f0a59c7505969bcaab024b5185d20ccd66c9400c" name = "github.com/buger/goterm" packages = ["."] pruneopts = "UT" - revision = "c206103e1f37c0c6c5c039706305ea2aa6e8ad3b" + revision = "6d19e6a8df12fdfc44a90a24b677a6d04a80b91f" [[projects]] branch = "master" - digest = "1:ddbcc8c842f4e5cf1fbf87f197a38ccf90c3930694b1beddb0776a0bc2d58089" + digest = "1:1ed8f94f16010f9e1a419a818ec8ed5765d04561e6bfcc872e3a5a16fbae08b2" name = "github.com/containerd/console" packages = ["."] pruneopts = "UT" - revision = "0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f" + revision = "c12b1e7919c14469339a5d38f2f8ed9b64a9de23" [[projects]] branch = "master" - digest = "1:e48c63e818c67fbf3d7afe20bba33134ab1a5bf384847385384fd027652a5a96" + digest = "1:fc8dbcc2a5de7c093e167828ebbdf551641761d2ad75431d3a167d467a264115" name = "github.com/containerd/continuity" packages = ["pathdriver"] pruneopts = "UT" - revision = "bea7585dbfac2847dbf49a6b8e7738a36c09bc75" + revision = "b2b946a77f5973f420514090d6f6dd58b08303f0" [[projects]] digest = "1:7ae4616edd396840ccc1506d287211ef883d3e93604dde0c9d1dd5721bb449d5" @@ -138,7 +138,7 @@ version = "v3.3.10" [[projects]] - digest = "1:be1b689cdb07159630a2232bc1c9750e4c1cfaf3ff07fd8afec78d018902e882" + digest = "1:85968a9e07d94b5f3d15993b9884ab5fcc41c8b7b6418df8b6a3baa387bb8d68" name = "github.com/coreos/go-systemd" packages = [ "activation", @@ -146,8 +146,8 @@ "util", ] pruneopts = "UT" - revision = "9002847aa1425fb6ac49077c0a630b3b67e0fbfd" - version = "v18" + revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab" + version = "v17" [[projects]] digest = "1:6e2ff82d2fe11ee35ec8dceb4346b8144a761f1c8655592c4ebe99a92fcec327" @@ -158,20 +158,20 @@ version = "v4" [[projects]] - digest = "1:ec66ad050342a3573ed2f5a4337d51b4c6d5d2a717cc6c9ecf86b081235a5759" + digest = "1:ec52d81d56e55d307da2d3dfc97ab952f9cec61d7df839ba5dd464ec08f9759e" name = "github.com/cyphar/filepath-securejoin" packages = ["."] pruneopts = "UT" - revision = "a261ee33d7a517f054effbf451841abaafe3e0fd" - version = "v0.2.2" + revision = "06bda8370f45268db985f7af15732444d94ed51c" + version = "v0.2.1" [[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" packages = ["spew"] pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" [[projects]] digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" @@ -182,8 +182,7 @@ version = "v3.2.0" [[projects]] - branch = "master" - digest = "1:9947f1584dea52b039a8653d5e74339619c0b082e38ca579d1ce1a7da2624897" + digest = "1:4ec8504b68b75f562efc4b291cba33194f88eaa4e68d9fc449e5b7ca68539329" name = "github.com/docker/docker" packages = [ "api/types", @@ -197,7 +196,6 @@ "api/types/swarm", "api/types/swarm/runtime", "api/types/versions", - "errdefs", "opts", "pkg/fileutils", "pkg/homedir", @@ -210,23 +208,23 @@ "pkg/system", ] pruneopts = "UT" - revision = "46652b00adba805c17f369aa84565127778976a8" + revision = "3dfb26ab3cbf961298f8ce3f94659b5fe4146ceb" [[projects]] - digest = "1:ade935c55cd6d0367c843b109b09c9d748b1982952031414740750fdf94747eb" + digest = "1:87dcb59127512b84097086504c16595cf8fef35b9e0bfca565dfc06e198158d7" name = "github.com/docker/go-connections" packages = ["nat"] pruneopts = "UT" - revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55" - version = "v0.4.0" + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + version = "v0.3.0" [[projects]] - digest = "1:6f82cacd0af5921e99bf3f46748705239b36489464f4529a1589bc895764fb18" + digest = "1:57d39983d01980c1317c2c5c6dd4b5b0c4a804ad2df800f2f6cbcd6a6d05f6ca" name = "github.com/docker/go-units" packages = ["."] pruneopts = "UT" - revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" - version = "v0.3.3" + revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" + version = "v0.3.2" [[projects]] digest = "1:00d0d550a1f1d7ca03270ebc1e136f21f6b9dab37f0c37e9a90d56d2f7afcff9" @@ -245,11 +243,11 @@ [[projects]] branch = "master" - digest = "1:79f16588b5576b1b3cd90e48d2374cc9a1a8776862d28d8fd0f23b0e15534967" + digest = "1:0448d1c1a596941c608762912fe7c865f88d9ffa45afb8af1edcf1401762bf7e" name = "github.com/eapache/go-xerial-snappy" packages = ["."] pruneopts = "UT" - revision = "776d5712da21bc4762676d614db1d8a64f4238b0" + revision = "040cc1a32f578808623071247fdbd5cc43f37f5f" [[projects]] digest = "1:444b82bfe35c83bbcaf84e310fb81a1f9ece03edfed586483c869e2c046aef69" @@ -259,14 +257,6 @@ revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" version = "v1.1.0" -[[projects]] - digest = "1:f4f6279cb37479954644babd8f8ef00584ff9fa63555d2c6718c1c3517170202" - name = "github.com/elazarl/go-bindata-assetfs" - packages = ["."] - pruneopts = "UT" - revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43" - version = "v1.0.0" - [[projects]] digest = "1:ad7ffa16c33f19ee2c17bce7ee865f5632cdc90d908aa313849900ecb3f722df" name = "github.com/evalphobia/logrus_fluent" @@ -292,7 +282,7 @@ version = "v1.4.7" [[projects]] - digest = "1:e4d1d8f3455dadab4e0f159acaff9075985ddaf6e807247923c088d964f1c845" + digest = "1:027133e2c23fd4ff467e66d928ab4a64f7fd7cb2f77acdb580c483f7389d22d7" name = "github.com/fsouza/go-dockerclient" packages = [ ".", @@ -301,8 +291,8 @@ "internal/term", ] pruneopts = "UT" - revision = "f3bdb27a96799b10006a884ef2fed89284466276" - version = "v1.3.1" + revision = "8842d40dbf5ee062d80f9dc429db31a0fe0cdc73" + version = "v1.2.2" [[projects]] digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" @@ -321,7 +311,7 @@ version = "v1.0.1" [[projects]] - digest = "1:34a9a60fade37f8009ed4a19e02924198aba3eabfcc120ee5c6002b7de17212d" + digest = "1:991bb96360eb8db70a40e9c496769fe6a7bbac4047f8376494d9837397078327" name = "github.com/go-redis/redis" packages = [ ".", @@ -334,22 +324,31 @@ "internal/util", ] pruneopts = "UT" - revision = "b3d9bf10f6666b2ee5100a6f3f84f4caf3b4e37d" - version = "v6.14.2" + revision = "480db94d33e6088e08d628833b6c0705451d24bb" + version = "v6.13.2" [[projects]] - digest = "1:dae52e85e2f3d0a734799e74ec0711d3ddd673b21a9e83be6c899af64c3a98f6" + digest = "1:57fa4c058c21ce25d0b7272518dd746065117abf6cc706158b0d361202024520" name = "github.com/godbus/dbus" packages = ["."] pruneopts = "UT" - revision = "66d97aec3384421e393c2a76b770a1b5c31d07a8" - version = "v5.0" + revision = "a389bdde4dd695d414e47b755e95e72b7826432c" + version = "v4.1.0" + +[[projects]] + digest = "1:3fdbd53962bb796a751552e6be1fb3dbd293d6d5851c0c48f611a82480769d7c" + name = "github.com/gogo/googleapis" + packages = ["google/rpc"] + pruneopts = "UT" + revision = "8558fb44d2f1fc223118afc694129d2c2d2924d1" + version = "v1.1.0" [[projects]] - digest = "1:9b56c3d59d7899c60a02f4d0e8592fe70b19f3b6b47553849b20fe63a62efe66" + digest = "1:f5ccd717b5f093cbabc51ee2e7a5979b92f17d217f9031d6d64f337101c408e4" name = "github.com/gogo/protobuf" packages = [ "gogoproto", + "jsonpb", "plugin/compare", "plugin/defaultcheck", "plugin/description", @@ -373,15 +372,26 @@ "protoc-gen-gogo/generator/internal/remap", "protoc-gen-gogo/grpc", "protoc-gen-gogo/plugin", + "sortkeys", + "types", "vanity", "vanity/command", ] + pruneopts = "T" + revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7" + version = "v1.2.0" + +[[projects]] + digest = "1:a93379ca78a24c7fad97ff3bffdc7f2285ff61f26dd6beedeab0e1391718e95c" + name = "github.com/gogo/status" + packages = ["."] pruneopts = "UT" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" + revision = "23951469965b0104342ad7b9625586150b6baf5b" + version = "v1.0.2" [[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" + branch = "master" + digest = "1:823d6cdd461935cd426e76e4eec90c22938db664054eabda0bcf63ec0274566e" name = "github.com/golang/protobuf" packages = [ "proto", @@ -391,8 +401,7 @@ "ptypes/timestamp", ] pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" + revision = "b27b920f9e71b439b873b17bf99f56467623814a" [[projects]] branch = "master" @@ -403,12 +412,12 @@ revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] - digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1" + digest = "1:160eabf7a69910fd74f29c692718bc2437c1c1c7d4c9dea9712357752a70e5df" name = "github.com/gorilla/context" packages = ["."] pruneopts = "UT" - revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" - version = "v1.1.1" + revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" + version = "v1.1" [[projects]] digest = "1:e73f5b0152105f18bc131fba127d9949305c8693f8a762588a82a48f61756f5f" @@ -419,20 +428,32 @@ version = "v1.6.2" [[projects]] - digest = "1:662f988f80b8e4208a0004d1e9d67543602fdd00b7106a857b8339e3d276ae1e" + digest = "1:a93a4de7b00fb9cc952bc0483e9a32cd273adbbd7f3282bfe89656b2f88fb08c" + name = "github.com/grpc-ecosystem/go-grpc-middleware" + packages = [ + ".", + "auth", + "util/metautils", + ] + pruneopts = "UT" + revision = "c250d6563d4d4c20252cd865923440e829844f4e" + version = "v1.0.0" + +[[projects]] + digest = "1:f8cb7c367c825e0c0be75f17e9b003d39b1240a1535fbbf095a18d7bb0d0c9c9" name = "github.com/hashicorp/consul" packages = ["api"] pruneopts = "UT" - revision = "e8757838a49feeb682c7e6ad6b78694a78b2096b" - version = "v1.3.0" + revision = "e716d1b5f8be252b3e53906c6d5632e0228f30fa" + version = "v1.2.2" [[projects]] - digest = "1:f47d6109c2034cb16bd62b220e18afd5aa9d5a1630fe5d937ad96a4fb7cbb277" + branch = "master" + digest = "1:77cb3be9b21ba7f1a4701e870c84ea8b66e7d74c7c8951c58155fdadae9414ec" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] pruneopts = "UT" - revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18" - version = "v0.5.0" + revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" [[projects]] branch = "master" @@ -467,16 +488,8 @@ version = "v1.0" [[projects]] - digest = "1:0a69a1c0db3591fcefb47f115b224592c8dfa4368b7ba9fae509d5e16cdc95c8" - name = "github.com/konsorten/go-windows-terminal-sequences" - packages = ["."] - pruneopts = "UT" - revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:ff5d9579ce740b0f05978eb4df1e8c177829c29b11a90304257d8e7413a061cf" + branch = "dev" + digest = "1:103b685a90ea6f52d661d374fe351ed376c2fe180c25bb59873ec6003e390cdf" name = "github.com/ligato/cn-infra" packages = [ "agent", @@ -488,7 +501,6 @@ "datasync/resync", "datasync/syncbase", "db/keyval", - "db/keyval/bolt", "db/keyval/consul", "db/keyval/etcd", "db/keyval/kvproto", @@ -520,47 +532,47 @@ "utils/safeclose", ] pruneopts = "T" - revision = "f597195a15762fa2bb1ce5bfdd875ec96c63d153" + revision = "baf4f60d8a6792fd6ade9f93a4227f391b4d5ead" [[projects]] branch = "master" - digest = "1:be40f095cd741773905744f16c1f7a21fd9226ccd0529f019deb7da6d667f71c" + digest = "1:7616dd8d9ddca4d4d8aa0e3793f66015c8c8bf9a3f2387be6be59347f43a75c0" name = "github.com/logrusorgru/aurora.git" packages = ["."] pruneopts = "UT" - revision = "a7b3b318ed4e1ae5b80602b08627267303c68572" + revision = "d694e6f975a9109e2b063829d563a7c153c4b53c" [[projects]] branch = "master" - digest = "1:40bf4b24f9127737b22d3b86ff0bef3c3e9d87604d6260d9f9b9125e7995ff49" + digest = "1:fd0b11e9149f5aa2a2f17c7f577c905a93c04633ae6e0b2b51f486bfe68fadad" name = "github.com/lunixbochs/struc" packages = ["."] pruneopts = "UT" - revision = "02e4c2afbb2ac4bae6876f52c8273fc4cf5a4b0a" + revision = "ef56447db6a068ad9e52bc54a1aff5fb9e1ed2dd" [[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" [[projects]] - digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af" + branch = "master" + digest = "1:8eb17c2ec4df79193ae65b621cd1c0c4697db3bc317fe6afdc76d7f2746abd05" name = "github.com/mitchellh/go-homedir" packages = ["."] pruneopts = "UT" - revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" - version = "v1.0.0" + revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" [[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + branch = "master" + digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] branch = "master" @@ -579,7 +591,7 @@ version = "v1.7.4-pre" [[projects]] - digest = "1:7a137fb7718928e473b7d805434ae563ec41790d3d227cdc64e8b14d1cab8a1f" + digest = "1:29f294e6a3b9d30629266b2765a8c203056387941e600405b8e8871c84653042" name = "github.com/onsi/gomega" packages = [ ".", @@ -596,8 +608,8 @@ "types", ] pruneopts = "UT" - revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" - version = "v1.4.3" + revision = "b6ea1ea48f981d0f615a154a45eabb9dd466556d" + version = "v1.4.1" [[projects]] digest = "1:ee4d4af67d93cc7644157882329023ce9a7bcfce956a079069a9405521c7cc8d" @@ -674,23 +686,23 @@ version = "v1.0.0" [[projects]] - digest = "1:e39a5ee8fcbec487f8fc68863ef95f2b025e0739b0e4aa55558a2b4cf8f0ecf0" + digest = "1:29803f52611cbcc1dfe55b456e9fdac362af7248b3d29d7ea1bec0a12e71dff4" name = "github.com/pierrec/lz4" packages = [ ".", "internal/xxh32", ] pruneopts = "UT" - revision = "635575b42742856941dbc767b44905bb9ba083f6" - version = "v2.0.7" + revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00" + version = "v2.0.3" [[projects]] - digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" name = "github.com/pkg/errors" packages = ["."] pruneopts = "UT" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" [[projects]] digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" @@ -705,15 +717,15 @@ [[projects]] branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b" name = "github.com/prometheus/client_model" packages = ["go"] pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" [[projects]] branch = "master" - digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" + digest = "1:fcce8c26e13e3d5018d5c42de857e8b700354d36afb900dd82bc642383981661" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -721,28 +733,28 @@ "model", ] pruneopts = "UT" - revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" + revision = "89604d197083d4781071d3c65855d24ecfb0a563" [[projects]] branch = "master" - digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" + digest = "1:19504758005dbeb5c6c33e562dd51532da38c3717752e73791296c9288421dbf" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", - "nfs", + "nfsd", "xfs", ] pruneopts = "UT" - revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" + revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1" [[projects]] branch = "master" - digest = "1:d38f81081a389f1466ec98192cf9115a82158854d6f01e1c23e2e7554b97db71" + digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" name = "github.com/rcrowley/go-metrics" packages = ["."] pruneopts = "UT" - revision = "3113b8401b8a98917cde58f8bbd42a1b1c03b1fd" + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925" @@ -761,15 +773,15 @@ version = "v0.9.0" [[projects]] - digest = "1:d567a75ca7997f273776fc257f338d329c2e5877a8105ff8bd0234f75d45d4c0" + digest = "1:606e395bc6f4011ae992e7ae0c613839d20ea097b8a9468e1683b22cefd8fd77" name = "github.com/sirupsen/logrus" packages = [ ".", "hooks/syslog", ] pruneopts = "UT" - revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95" - version = "v1.2.0" + revision = "3e01752db0189b9157070a0e1668a620f9a85da2" + version = "v1.0.6" [[projects]] digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" @@ -780,20 +792,20 @@ version = "v0.0.3" [[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" + revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" + version = "v1.0.2" [[projects]] branch = "master" - digest = "1:e14e467ed00ab98665623c5060fa17e3d7079be560ffc33cabafd05d35894f05" + digest = "1:e865a1cd94806d1bb0eaa0bffba2ccb5e25ac42e1f55328c83d5e3399c9961a4" name = "github.com/syndtr/gocapability" packages = ["capability"] pruneopts = "UT" - revision = "d98352740cb2c55f81556b63d4a1ec64c5a319c2" + revision = "33e07d32887e1e06b7c025f27ce52f62c7990bc0" [[projects]] digest = "1:eaa6698f44de8f2977e93c9b946e60a8af75f565058658aad2df8032b55c84e5" @@ -805,11 +817,11 @@ [[projects]] branch = "master" - digest = "1:2ac7a6137a887542d0775ac7754948d4d34bad1fcbd4a7cbe9013c2211b1b566" + digest = "1:e833d953c8467158fd0250a053ec390dd899945d4c6b87b07920ad431fc80dfe" name = "github.com/unrolled/render" packages = ["."] pruneopts = "UT" - revision = "4c664cb3ad2f9856d1debd49977df56221ab45a9" + revision = "65450fb6b2d3595beca39f969c411db8f8d5c806" [[projects]] digest = "1:b24d38b282bacf9791408a080f606370efa3d364e4b5fd9ba0f7b87786d3b679" @@ -821,14 +833,14 @@ [[projects]] branch = "master" - digest = "1:bbdc3aac716e0d1f799e18b8461e7f5a5e74f9c069b121afd160b8fe2c2ba462" + digest = "1:5f912b2a10701e13b05b826b29620f8ed5a83e855b1d4114dfff489535a96df3" name = "github.com/vishvananda/netlink" packages = [ ".", "nl", ] pruneopts = "UT" - revision = "093e80f9fa21ff7b6d400d4cc54933558fd446a4" + revision = "56b1bd27a9a363862ef67916bb450fe240787d07" [[projects]] branch = "master" @@ -840,7 +852,7 @@ [[projects]] branch = "master" - digest = "1:001a4e7a40e50ff2ef32e2556bca50c4f77daa457db3ac6afc8bea9bb2122cfb" + digest = "1:19f86cbdeb3e5cf117b52da99a655e2438ec65ae3240687ef4a520582e825df8" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -848,11 +860,11 @@ "ssh/terminal", ] pruneopts = "UT" - revision = "4d3f4d9ffa16a13f451c3b2999e9c49e9750bf06" + revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b" [[projects]] branch = "master" - digest = "1:8b5682a2f0d3f944f12b9e87aeb6bbf451b55665ff6b87999c999b2240c390e9" + digest = "1:6b9d693aad7155c20f594c83ffb8e4dc74db309236a99459a384f57264dbad24" name = "golang.org/x/net" packages = [ "context", @@ -867,20 +879,20 @@ "trace", ] pruneopts = "UT" - revision = "610586996380ceef02dd726cc09df7e00a3f8e56" + revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2" [[projects]] - branch = "master" - digest = "1:417d27a82efb8473554234a282be33d23b0d6adc121e636b55950f913ac071d6" + digest = "1:0dafafed83f125cdc945a014b2dec15e5b5d8cd2d77a2d1e3763120b08ab381b" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "UT" - revision = "9b800f95dbbc54abff0acf7ee32d88ba4e328c89" + revision = "4910a1d54f876d7b22162a85f4d066d3ee649450" [[projects]] + branch = "master" digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" name = "golang.org/x/text" packages = [ @@ -912,42 +924,36 @@ "unicode/rangetable", ] pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" + revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" [[projects]] branch = "master" - digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "c830210a61dfaa790e1920f8d0470fc27bc2efbe" + revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" [[projects]] - digest = "1:8f56475624fb72854d06ca16c2f7032e3cea14a63074e9c199ba8d46431c1127" + digest = "1:c52f29435ecb5b76c37e7f0098b6a50dbe60f8624d820827d0fede75c40199a1" name = "google.golang.org/grpc" packages = [ ".", "balancer", "balancer/base", "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", "codes", "connectivity", "credentials", - "credentials/internal", "encoding", "encoding/proto", "grpclog", "health/grpc_health_v1", "internal", "internal/backoff", - "internal/binarylog", "internal/channelz", "internal/envconfig", "internal/grpcrand", - "internal/grpcsync", - "internal/syscall", "internal/transport", "keepalive", "metadata", @@ -961,16 +967,16 @@ "tap", ] pruneopts = "UT" - revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8" - version = "v1.17.0" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" [[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" + branch = "v2" + digest = "1:73e6fda93622790d2371344759df06ff5ff2fac64a6b6e8832b792e7402956e7" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" + revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" [solve-meta] analyzer-name = "dep" @@ -983,12 +989,14 @@ "git.fd.io/govpp.git/cmd/binapi-generator", "git.fd.io/govpp.git/core", "github.com/buger/goterm", - "github.com/elazarl/go-bindata-assetfs", "github.com/fsouza/go-dockerclient", - "github.com/ghodss/yaml", "github.com/go-errors/errors", + "github.com/gogo/protobuf/gogoproto", + "github.com/gogo/protobuf/jsonpb", "github.com/gogo/protobuf/proto", "github.com/gogo/protobuf/protoc-gen-gogo", + "github.com/gogo/protobuf/types", + "github.com/gogo/status", "github.com/ligato/cn-infra/agent", "github.com/ligato/cn-infra/config", "github.com/ligato/cn-infra/datasync", @@ -996,8 +1004,8 @@ "github.com/ligato/cn-infra/datasync/kvdbsync/local", "github.com/ligato/cn-infra/datasync/msgsync", "github.com/ligato/cn-infra/datasync/resync", + "github.com/ligato/cn-infra/datasync/syncbase", "github.com/ligato/cn-infra/db/keyval", - "github.com/ligato/cn-infra/db/keyval/bolt", "github.com/ligato/cn-infra/db/keyval/consul", "github.com/ligato/cn-infra/db/keyval/etcd", "github.com/ligato/cn-infra/db/keyval/kvproto", @@ -1013,7 +1021,6 @@ "github.com/ligato/cn-infra/logging/logrus", "github.com/ligato/cn-infra/logging/measure", "github.com/ligato/cn-infra/logging/measure/model/apitrace", - "github.com/ligato/cn-infra/messaging", "github.com/ligato/cn-infra/messaging/kafka", "github.com/ligato/cn-infra/rpc/grpc", "github.com/ligato/cn-infra/rpc/prometheus", @@ -1027,6 +1034,7 @@ "github.com/namsral/flag", "github.com/onsi/gomega", "github.com/opencontainers/runc", + "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/sirupsen/logrus", @@ -1035,7 +1043,9 @@ "github.com/vishvananda/netlink", "github.com/vishvananda/netns", "golang.org/x/net/context", + "golang.org/x/sys/unix", "google.golang.org/grpc", + "google.golang.org/grpc/codes", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 309e0f012a..506eedddcd 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -12,7 +12,7 @@ required = [ # Constraints [[constraint]] - branch = "master" + branch = "dev" name = "github.com/ligato/cn-infra" [[constraint]] @@ -27,13 +27,9 @@ required = [ name = "github.com/fsouza/go-dockerclient" version = "1.0" -[[constraint]] - name = "github.com/ghodss/yaml" - version = "1.0.0" - [[constraint]] name = "github.com/gogo/protobuf" - version = "1.1.1" + version = "1.2.0" [[constraint]] branch = "master" @@ -79,6 +75,10 @@ required = [ name = "google.golang.org/grpc" version = "1.14.0" +[[constraint]] + name = "github.com/opencontainers/runc" + version = "v1.0.0-rc5" + # Overrides [[override]] @@ -97,10 +97,6 @@ required = [ name = "github.com/coreos/etcd" version = "=3.3.10" -[[constraint]] - name = "github.com/opencontainers/runc" - version = "v1.0.0-rc5" - # Prunes [prune] @@ -110,3 +106,7 @@ required = [ [[prune.project]] name = "github.com/ligato/cn-infra" unused-packages = false + + [[prune.project]] + name = "github.com/gogo/protobuf" + unused-packages = false diff --git a/Makefile b/Makefile index 434adf13f5..74f8bd0a70 100644 --- a/Makefile +++ b/Makefile @@ -28,6 +28,11 @@ build: cmd examples # Clean all clean: clean-cmd clean-examples +# Install commands +agent: + @echo "=> installing agent ${VERSION}" + @go install -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} ./cmd/vpp-agent + # Install commands install: @echo "=> installing commands ${VERSION}" @@ -38,13 +43,13 @@ install: # Build commands cmd: @echo "=> building commands ${VERSION}" - cd cmd/vpp-agent && go build -i -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd cmd/vpp-agent-ctl && go build -i -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd cmd/agentctl && go build -i -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd cmd/vpp-agent && go build -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd cmd/vpp-agent-ctl && go build -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd cmd/agentctl && go build -ldflags "${LDFLAGS}" -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} # Clean commands clean-cmd: - @echo "=> cleaning binaries" + @echo "=> cleaning command binaries" rm -f ./cmd/vpp-agent/vpp-agent rm -f ./cmd/vpp-agent-ctl/vpp-agent-ctl rm -f ./cmd/agentctl/agentctl @@ -52,34 +57,38 @@ clean-cmd: # Build examples examples: @echo "=> building examples" - cd examples/govpp_call && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/idx_bd_cache && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/idx_iface_cache && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/idx_mapping_lookup && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/idx_mapping_watcher && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/idx_veth_cache && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/localclient_linux/tap && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/localclient_linux/veth && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/localclient_vpp/nat && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/localclient_vpp/plugins && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/grpc_vpp/remote_client && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} - cd examples/grpc_vpp/notifications && go build -i -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/custom_model && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/govpp_call && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/grpc_vpp/remote_client && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/grpc_vpp/notifications && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/acl && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/interconnect && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/l2 && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/acl && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/nat && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/kvscheduler/vpp-l3 && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/localclient_linux/tap && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/localclient_linux/veth && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/localclient_vpp/nat && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} + cd examples/localclient_vpp/plugins && go build -tags="${GO_BUILD_TAGS}" ${GO_BUILD_ARGS} # Clean examples clean-examples: @echo "=> cleaning examples" - rm -f examples/govpp_call/govpp_call - rm -f examples/idx_bd_cache/idx_bd_cache - rm -f examples/idx_iface_cache/idx_iface_cache - rm -f examples/idx_mapping_lookup/idx_mapping_lookup - rm -f examples/idx_mapping_watcher/idx_mapping_watcher - rm -f examples/idx_veth_cache/idx_veth_cache - rm -f examples/localclient_linux/tap/tap - rm -f examples/localclient_linux/veth/veth - rm -f examples/localclient_vpp/nat/nat - rm -f examples/localclient_vpp/plugins/plugins - rm -f examples/grpc_vpp/notifications/notifications - rm -f examples/grpc_vpp/remote_client/remote_client + cd examples/custom_model && go clean + cd examples/govpp_call && go clean + cd examples/grpc_vpp/remote_client && go clean + cd examples/grpc_vpp/notifications && go clean + cd examples/kvscheduler/acl && go clean + cd examples/kvscheduler/interconnect && go clean + cd examples/kvscheduler/l2 && go clean + cd examples/kvscheduler/acl && go clean + cd examples/kvscheduler/nat && go clean + cd examples/kvscheduler/vpp-l3 && go clean + cd examples/localclient_linux/tap && go clean + cd examples/localclient_linux/veth && go clean + cd examples/localclient_vpp/nat && go clean + cd examples/localclient_vpp/plugins && go clean # Run tests test: @@ -101,21 +110,22 @@ test-cover-xml: test-cover @echo "=> coverage report generated into ${COVER_DIR}/coverage.xml" # Code generation -generate: generate-proto generate-binapi +generate: generate-proto generate-binapi generate-desc-adapters # Get generator tools get-proto-generators: - go install ./vendor/github.com/gogo/protobuf/protoc-gen-gogo + @go install ./vendor/github.com/gogo/protobuf/protoc-gen-gogo # Generate proto models generate-proto: get-proto-generators @echo "=> generating proto" cd plugins/linux/model && go generate cd plugins/vpp/model && go generate + ./scripts/genprotos.sh # Get generator tools get-binapi-generators: - go install ./vendor/git.fd.io/govpp.git/cmd/binapi-generator + @go install ./vendor/git.fd.io/govpp.git/cmd/binapi-generator # Generate binary api generate-binapi: get-binapi-generators @@ -123,6 +133,25 @@ generate-binapi: get-binapi-generators cd plugins/vpp/binapi && go generate @echo "=> applying fix patches" find plugins/vpp/binapi -maxdepth 1 -type f -name '*.patch' -exec patch --no-backup-if-mismatch -p1 -i {} \; + @echo + +get-desc-adapter-generator: + @go install ./plugins/kvscheduler/descriptor-adapter + +generate-desc-adapters: get-desc-adapter-generator + @echo "=> generating descriptor adapters" + cd plugins/linuxv2/ifplugin && go generate + cd plugins/linuxv2/l3plugin && go generate + cd plugins/vppv2/aclplugin && go generate + cd plugins/vppv2/ifplugin && go generate + cd plugins/vppv2/ipsecplugin && go generate + cd plugins/vppv2/l2plugin && go generate + cd plugins/vppv2/l3plugin && go generate + cd plugins/vppv2/natplugin && go generate + cd plugins/vppv2/puntplugin && go generate + cd plugins/vppv2/stnplugin && go generate + cd plugins/vppv2/puntplugin && go generate + @echo verify-binapi: @echo "=> verifying binary api" diff --git a/api/configurator/configurator.pb.go b/api/configurator/configurator.pb.go new file mode 100644 index 0000000000..1baa3890da --- /dev/null +++ b/api/configurator/configurator.pb.go @@ -0,0 +1,890 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: configurator/configurator.proto + +package configurator // import "github.com/ligato/vpp-agent/api/configurator" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import linux "github.com/ligato/vpp-agent/api/models/linux" +import vpp "github.com/ligato/vpp-agent/api/models/vpp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Config groups all supported config data into single message. +type Config struct { + VppConfig *vpp.ConfigData `protobuf:"bytes,1,opt,name=vpp_config,json=vppConfig,proto3" json:"vpp_config,omitempty"` + LinuxConfig *linux.ConfigData `protobuf:"bytes,2,opt,name=linux_config,json=linuxConfig,proto3" json:"linux_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{0} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (dst *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(dst, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetVppConfig() *vpp.ConfigData { + if m != nil { + return m.VppConfig + } + return nil +} + +func (m *Config) GetLinuxConfig() *linux.ConfigData { + if m != nil { + return m.LinuxConfig + } + return nil +} + +// Notification groups all notification data into single message. +type Notification struct { + // Types that are valid to be assigned to Notification: + // *Notification_VppNotification + // *Notification_LinuxNotification + Notification isNotification_Notification `protobuf_oneof:"notification"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Notification) Reset() { *m = Notification{} } +func (m *Notification) String() string { return proto.CompactTextString(m) } +func (*Notification) ProtoMessage() {} +func (*Notification) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{1} +} +func (m *Notification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Notification.Unmarshal(m, b) +} +func (m *Notification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Notification.Marshal(b, m, deterministic) +} +func (dst *Notification) XXX_Merge(src proto.Message) { + xxx_messageInfo_Notification.Merge(dst, src) +} +func (m *Notification) XXX_Size() int { + return xxx_messageInfo_Notification.Size(m) +} +func (m *Notification) XXX_DiscardUnknown() { + xxx_messageInfo_Notification.DiscardUnknown(m) +} + +var xxx_messageInfo_Notification proto.InternalMessageInfo + +type isNotification_Notification interface { + isNotification_Notification() +} + +type Notification_VppNotification struct { + VppNotification *vpp.Notification `protobuf:"bytes,1,opt,name=vpp_notification,json=vppNotification,proto3,oneof"` +} +type Notification_LinuxNotification struct { + LinuxNotification *linux.Notification `protobuf:"bytes,2,opt,name=linux_notification,json=linuxNotification,proto3,oneof"` +} + +func (*Notification_VppNotification) isNotification_Notification() {} +func (*Notification_LinuxNotification) isNotification_Notification() {} + +func (m *Notification) GetNotification() isNotification_Notification { + if m != nil { + return m.Notification + } + return nil +} + +func (m *Notification) GetVppNotification() *vpp.Notification { + if x, ok := m.GetNotification().(*Notification_VppNotification); ok { + return x.VppNotification + } + return nil +} + +func (m *Notification) GetLinuxNotification() *linux.Notification { + if x, ok := m.GetNotification().(*Notification_LinuxNotification); ok { + return x.LinuxNotification + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Notification) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Notification_OneofMarshaler, _Notification_OneofUnmarshaler, _Notification_OneofSizer, []interface{}{ + (*Notification_VppNotification)(nil), + (*Notification_LinuxNotification)(nil), + } +} + +func _Notification_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Notification) + // notification + switch x := m.Notification.(type) { + case *Notification_VppNotification: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.VppNotification); err != nil { + return err + } + case *Notification_LinuxNotification: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LinuxNotification); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Notification.Notification has unexpected type %T", x) + } + return nil +} + +func _Notification_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Notification) + switch tag { + case 1: // notification.vpp_notification + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(vpp.Notification) + err := b.DecodeMessage(msg) + m.Notification = &Notification_VppNotification{msg} + return true, err + case 2: // notification.linux_notification + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(linux.Notification) + err := b.DecodeMessage(msg) + m.Notification = &Notification_LinuxNotification{msg} + return true, err + default: + return false, nil + } +} + +func _Notification_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Notification) + // notification + switch x := m.Notification.(type) { + case *Notification_VppNotification: + s := proto.Size(x.VppNotification) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Notification_LinuxNotification: + s := proto.Size(x.LinuxNotification) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type UpdateRequest struct { + // Update describes config data to be updated. + Update *Config `protobuf:"bytes,1,opt,name=update,proto3" json:"update,omitempty"` + // The full_resync option can be used + // to overwrite all existing data. + FullResync bool `protobuf:"varint,2,opt,name=full_resync,json=fullResync,proto3" json:"full_resync,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } +func (m *UpdateRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRequest) ProtoMessage() {} +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{2} +} +func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateRequest.Unmarshal(m, b) +} +func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRequest.Merge(dst, src) +} +func (m *UpdateRequest) XXX_Size() int { + return xxx_messageInfo_UpdateRequest.Size(m) +} +func (m *UpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo + +func (m *UpdateRequest) GetUpdate() *Config { + if m != nil { + return m.Update + } + return nil +} + +func (m *UpdateRequest) GetFullResync() bool { + if m != nil { + return m.FullResync + } + return false +} + +type UpdateResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } +func (m *UpdateResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateResponse) ProtoMessage() {} +func (*UpdateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{3} +} +func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateResponse.Unmarshal(m, b) +} +func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateResponse.Merge(dst, src) +} +func (m *UpdateResponse) XXX_Size() int { + return xxx_messageInfo_UpdateResponse.Size(m) +} +func (m *UpdateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo + +type DeleteRequest struct { + // Delete describes config data to be deleted. + Delete *Config `protobuf:"bytes,1,opt,name=delete,proto3" json:"delete,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{4} +} +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(dst, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetDelete() *Config { + if m != nil { + return m.Delete + } + return nil +} + +type DeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{5} +} +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(dst, src) +} +func (m *DeleteResponse) XXX_Size() int { + return xxx_messageInfo_DeleteResponse.Size(m) +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo + +type GetRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{6} +} +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (dst *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(dst, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +type GetResponse struct { + // Config describes desired config retrieved from agent. + Config *Config `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{7} +} +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (dst *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(dst, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetConfig() *Config { + if m != nil { + return m.Config + } + return nil +} + +type DumpRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpRequest) Reset() { *m = DumpRequest{} } +func (m *DumpRequest) String() string { return proto.CompactTextString(m) } +func (*DumpRequest) ProtoMessage() {} +func (*DumpRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{8} +} +func (m *DumpRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpRequest.Unmarshal(m, b) +} +func (m *DumpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpRequest.Marshal(b, m, deterministic) +} +func (dst *DumpRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpRequest.Merge(dst, src) +} +func (m *DumpRequest) XXX_Size() int { + return xxx_messageInfo_DumpRequest.Size(m) +} +func (m *DumpRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DumpRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpRequest proto.InternalMessageInfo + +type DumpResponse struct { + // Dump describes running config dumped from southbound. + Dump *Config `protobuf:"bytes,1,opt,name=dump,proto3" json:"dump,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpResponse) Reset() { *m = DumpResponse{} } +func (m *DumpResponse) String() string { return proto.CompactTextString(m) } +func (*DumpResponse) ProtoMessage() {} +func (*DumpResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{9} +} +func (m *DumpResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpResponse.Unmarshal(m, b) +} +func (m *DumpResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpResponse.Marshal(b, m, deterministic) +} +func (dst *DumpResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpResponse.Merge(dst, src) +} +func (m *DumpResponse) XXX_Size() int { + return xxx_messageInfo_DumpResponse.Size(m) +} +func (m *DumpResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DumpResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpResponse proto.InternalMessageInfo + +func (m *DumpResponse) GetDump() *Config { + if m != nil { + return m.Dump + } + return nil +} + +// NotificationRequest represent a notification request which contains +// index of next required message +type NotificationRequest struct { + Idx uint32 `protobuf:"varint,1,opt,name=idx,proto3" json:"idx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationRequest) Reset() { *m = NotificationRequest{} } +func (m *NotificationRequest) String() string { return proto.CompactTextString(m) } +func (*NotificationRequest) ProtoMessage() {} +func (*NotificationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{10} +} +func (m *NotificationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationRequest.Unmarshal(m, b) +} +func (m *NotificationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationRequest.Marshal(b, m, deterministic) +} +func (dst *NotificationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationRequest.Merge(dst, src) +} +func (m *NotificationRequest) XXX_Size() int { + return xxx_messageInfo_NotificationRequest.Size(m) +} +func (m *NotificationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationRequest proto.InternalMessageInfo + +func (m *NotificationRequest) GetIdx() uint32 { + if m != nil { + return m.Idx + } + return 0 +} + +// Response to notification request 'get'. Returns indexed notification. +type NotificationResponse struct { + // Index of following notification + NextIdx uint32 `protobuf:"varint,1,opt,name=next_idx,json=nextIdx,proto3" json:"next_idx,omitempty"` + // Notification data + Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationResponse) Reset() { *m = NotificationResponse{} } +func (m *NotificationResponse) String() string { return proto.CompactTextString(m) } +func (*NotificationResponse) ProtoMessage() {} +func (*NotificationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_configurator_4bd663f44563fc57, []int{11} +} +func (m *NotificationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationResponse.Unmarshal(m, b) +} +func (m *NotificationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationResponse.Marshal(b, m, deterministic) +} +func (dst *NotificationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationResponse.Merge(dst, src) +} +func (m *NotificationResponse) XXX_Size() int { + return xxx_messageInfo_NotificationResponse.Size(m) +} +func (m *NotificationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationResponse proto.InternalMessageInfo + +func (m *NotificationResponse) GetNextIdx() uint32 { + if m != nil { + return m.NextIdx + } + return 0 +} + +func (m *NotificationResponse) GetNotification() *Notification { + if m != nil { + return m.Notification + } + return nil +} + +func init() { + proto.RegisterType((*Config)(nil), "configurator.Config") + proto.RegisterType((*Notification)(nil), "configurator.Notification") + proto.RegisterType((*UpdateRequest)(nil), "configurator.UpdateRequest") + proto.RegisterType((*UpdateResponse)(nil), "configurator.UpdateResponse") + proto.RegisterType((*DeleteRequest)(nil), "configurator.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "configurator.DeleteResponse") + proto.RegisterType((*GetRequest)(nil), "configurator.GetRequest") + proto.RegisterType((*GetResponse)(nil), "configurator.GetResponse") + proto.RegisterType((*DumpRequest)(nil), "configurator.DumpRequest") + proto.RegisterType((*DumpResponse)(nil), "configurator.DumpResponse") + proto.RegisterType((*NotificationRequest)(nil), "configurator.NotificationRequest") + proto.RegisterType((*NotificationResponse)(nil), "configurator.NotificationResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ConfiguratorClient is the client API for Configurator service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ConfiguratorClient interface { + // Get is used for listing desired config. + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + // Update is used for updating desired config. + Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + // Delete is used for deleting desired config. + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + // Dump is used for dumping running config. + Dump(ctx context.Context, in *DumpRequest, opts ...grpc.CallOption) (*DumpResponse, error) + // Notify is used for subscribing to notifications. + Notify(ctx context.Context, in *NotificationRequest, opts ...grpc.CallOption) (Configurator_NotifyClient, error) +} + +type configuratorClient struct { + cc *grpc.ClientConn +} + +func NewConfiguratorClient(cc *grpc.ClientConn) ConfiguratorClient { + return &configuratorClient{cc} +} + +func (c *configuratorClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/configurator.Configurator/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configuratorClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { + out := new(UpdateResponse) + err := c.cc.Invoke(ctx, "/configurator.Configurator/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configuratorClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/configurator.Configurator/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configuratorClient) Dump(ctx context.Context, in *DumpRequest, opts ...grpc.CallOption) (*DumpResponse, error) { + out := new(DumpResponse) + err := c.cc.Invoke(ctx, "/configurator.Configurator/Dump", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *configuratorClient) Notify(ctx context.Context, in *NotificationRequest, opts ...grpc.CallOption) (Configurator_NotifyClient, error) { + stream, err := c.cc.NewStream(ctx, &_Configurator_serviceDesc.Streams[0], "/configurator.Configurator/Notify", opts...) + if err != nil { + return nil, err + } + x := &configuratorNotifyClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Configurator_NotifyClient interface { + Recv() (*NotificationResponse, error) + grpc.ClientStream +} + +type configuratorNotifyClient struct { + grpc.ClientStream +} + +func (x *configuratorNotifyClient) Recv() (*NotificationResponse, error) { + m := new(NotificationResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ConfiguratorServer is the server API for Configurator service. +type ConfiguratorServer interface { + // Get is used for listing desired config. + Get(context.Context, *GetRequest) (*GetResponse, error) + // Update is used for updating desired config. + Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + // Delete is used for deleting desired config. + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + // Dump is used for dumping running config. + Dump(context.Context, *DumpRequest) (*DumpResponse, error) + // Notify is used for subscribing to notifications. + Notify(*NotificationRequest, Configurator_NotifyServer) error +} + +func RegisterConfiguratorServer(s *grpc.Server, srv ConfiguratorServer) { + s.RegisterService(&_Configurator_serviceDesc, srv) +} + +func _Configurator_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/configurator.Configurator/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Configurator_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/configurator.Configurator/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Update(ctx, req.(*UpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Configurator_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/configurator.Configurator/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Configurator_Dump_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DumpRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConfiguratorServer).Dump(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/configurator.Configurator/Dump", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConfiguratorServer).Dump(ctx, req.(*DumpRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Configurator_Notify_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(NotificationRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ConfiguratorServer).Notify(m, &configuratorNotifyServer{stream}) +} + +type Configurator_NotifyServer interface { + Send(*NotificationResponse) error + grpc.ServerStream +} + +type configuratorNotifyServer struct { + grpc.ServerStream +} + +func (x *configuratorNotifyServer) Send(m *NotificationResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Configurator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "configurator.Configurator", + HandlerType: (*ConfiguratorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Configurator_Get_Handler, + }, + { + MethodName: "Update", + Handler: _Configurator_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Configurator_Delete_Handler, + }, + { + MethodName: "Dump", + Handler: _Configurator_Dump_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Notify", + Handler: _Configurator_Notify_Handler, + ServerStreams: true, + }, + }, + Metadata: "configurator/configurator.proto", +} + +func init() { + proto.RegisterFile("configurator/configurator.proto", fileDescriptor_configurator_4bd663f44563fc57) +} + +var fileDescriptor_configurator_4bd663f44563fc57 = []byte{ + // 520 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0xe9, 0x36, 0x85, 0xf1, 0x92, 0x6e, 0xad, 0xd7, 0x43, 0x17, 0x90, 0x06, 0xbe, 0xb0, + 0x03, 0xa4, 0x68, 0x70, 0x00, 0xaa, 0xed, 0xb0, 0x56, 0x1a, 0x5c, 0x90, 0x88, 0xc4, 0x85, 0x03, + 0x55, 0xd6, 0xb8, 0x25, 0x52, 0x6a, 0x7b, 0x8d, 0x53, 0x75, 0xdf, 0x87, 0xcf, 0xc6, 0xe7, 0x40, + 0xf6, 0x73, 0x54, 0xbb, 0x2a, 0x3d, 0x6c, 0xea, 0xfb, 0xbf, 0xff, 0xfb, 0xbd, 0x17, 0x3f, 0xcb, + 0x70, 0x31, 0x15, 0x7c, 0x56, 0xcc, 0xeb, 0x65, 0xa6, 0xc4, 0x72, 0xe0, 0x06, 0x89, 0x5c, 0x0a, + 0x25, 0x48, 0xe4, 0x6a, 0x71, 0x6f, 0x21, 0x72, 0x56, 0x56, 0x83, 0x95, 0x94, 0xfa, 0x0f, 0x3d, + 0x71, 0xdf, 0xaa, 0x65, 0xc1, 0xeb, 0x35, 0xfe, 0xc7, 0x0c, 0xe5, 0x10, 0x8c, 0x4c, 0x3d, 0x49, + 0x00, 0x56, 0x52, 0x4e, 0x90, 0xd6, 0x6f, 0xbd, 0x6c, 0x5d, 0x86, 0x57, 0xa7, 0x89, 0x66, 0xa0, + 0x61, 0x9c, 0xa9, 0x2c, 0x7d, 0xb6, 0x92, 0xd2, 0xfa, 0x3f, 0x40, 0x64, 0x40, 0x4d, 0xc5, 0x81, + 0xa9, 0xe8, 0x26, 0x48, 0x77, 0x6a, 0x42, 0xa3, 0xa0, 0x40, 0xff, 0xb4, 0x20, 0xfa, 0x26, 0x54, + 0x31, 0x2b, 0xa6, 0x99, 0x2a, 0x04, 0x27, 0x37, 0xd0, 0xd1, 0x6d, 0xb9, 0xa3, 0xd9, 0xe6, 0x5d, + 0xd3, 0xdc, 0x35, 0x7f, 0x79, 0x92, 0x9e, 0xae, 0xa4, 0xf4, 0xea, 0xc7, 0x40, 0x70, 0x0c, 0x8f, + 0x80, 0xc3, 0x9c, 0xd9, 0x61, 0xb6, 0x18, 0x5d, 0xa3, 0xba, 0xe2, 0xed, 0x09, 0x44, 0x6e, 0x3d, + 0xfd, 0x05, 0xed, 0x1f, 0x32, 0xcf, 0x14, 0x4b, 0xd9, 0x43, 0xcd, 0x2a, 0x45, 0xde, 0x40, 0x50, + 0x1b, 0xc1, 0x0e, 0xd7, 0x4b, 0xbc, 0x55, 0xe0, 0xd7, 0xa5, 0xd6, 0x43, 0x2e, 0x20, 0x9c, 0xd5, + 0x65, 0x39, 0x59, 0xb2, 0xea, 0x91, 0x4f, 0xcd, 0x34, 0xc7, 0x29, 0x68, 0x29, 0x35, 0x0a, 0xed, + 0xc0, 0x49, 0xc3, 0xaf, 0xa4, 0xe0, 0x15, 0xa3, 0xd7, 0xd0, 0x1e, 0xb3, 0x92, 0x79, 0x1d, 0x73, + 0x23, 0xec, 0xef, 0x88, 0x1e, 0x0d, 0x6c, 0xca, 0x2d, 0x30, 0x02, 0xb8, 0x63, 0xca, 0xd2, 0xe8, + 0x10, 0x42, 0x13, 0x61, 0x52, 0xc3, 0xbd, 0x45, 0xff, 0x07, 0x8e, 0x22, 0x6d, 0x43, 0x38, 0xae, + 0x17, 0xb2, 0x61, 0x7d, 0x84, 0x08, 0x43, 0x0b, 0xbb, 0x84, 0xa3, 0xbc, 0x5e, 0xc8, 0xbd, 0x28, + 0xe3, 0xa0, 0xaf, 0xe1, 0xcc, 0x3d, 0xf6, 0xe6, 0x53, 0x3b, 0x70, 0x58, 0xe4, 0x6b, 0x53, 0xdf, + 0x4e, 0xf5, 0x4f, 0xfa, 0x00, 0x3d, 0xdf, 0x68, 0x5b, 0x9d, 0xc3, 0x31, 0x67, 0x6b, 0x35, 0xd9, + 0xd8, 0x9f, 0xea, 0xf8, 0x6b, 0xbe, 0x26, 0x37, 0xfe, 0x0a, 0xed, 0x15, 0x88, 0xfd, 0x69, 0x3c, + 0xa8, 0xe7, 0xbf, 0xfa, 0x7b, 0x00, 0xd1, 0xc8, 0xf1, 0x92, 0xcf, 0x70, 0x78, 0xc7, 0x14, 0xe9, + 0xfb, 0x84, 0xcd, 0x99, 0xc6, 0xe7, 0x3b, 0x32, 0x76, 0xce, 0x11, 0x04, 0xb8, 0x5f, 0xf2, 0xdc, + 0x37, 0x79, 0xb7, 0x2a, 0x7e, 0xb1, 0x3b, 0xb9, 0x81, 0xe0, 0x4e, 0xb7, 0x21, 0xde, 0x45, 0xd9, + 0x86, 0xf8, 0xd7, 0x80, 0x5c, 0xc3, 0x91, 0x5e, 0x16, 0xd9, 0x1a, 0xd6, 0xd9, 0x67, 0x1c, 0xef, + 0x4a, 0xd9, 0xf2, 0xef, 0x10, 0x98, 0x33, 0x7b, 0x24, 0xaf, 0xf6, 0x9c, 0xa4, 0x05, 0xd1, 0x7d, + 0x16, 0x04, 0xbe, 0x6b, 0xdd, 0x0e, 0x7f, 0x7e, 0x9a, 0x17, 0xea, 0x77, 0x7d, 0x9f, 0x4c, 0xc5, + 0x62, 0x50, 0x16, 0xf3, 0x4c, 0x09, 0xfd, 0x56, 0xbd, 0xcd, 0xe6, 0x8c, 0xab, 0x41, 0x26, 0x0b, + 0xef, 0x99, 0x1b, 0xba, 0xc1, 0x7d, 0x60, 0x9e, 0xad, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x51, 0xa8, 0x29, 0xab, 0x17, 0x05, 0x00, 0x00, +} diff --git a/api/configurator/configurator.proto b/api/configurator/configurator.proto new file mode 100644 index 0000000000..ac11df39d8 --- /dev/null +++ b/api/configurator/configurator.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package configurator; + +option go_package = "github.com/ligato/vpp-agent/api/configurator;configurator"; + +import "models/vpp/vpp.proto"; +import "models/linux/linux.proto"; + +// Config groups all supported config data into single message. +message Config { + vpp.ConfigData vpp_config = 1; + linux.ConfigData linux_config = 2; +} + +// Notification groups all notification data into single message. +message Notification { + oneof notification { + vpp.Notification vpp_notification = 1; + linux.Notification linux_notification = 2; + } +} + +// Configurator provides basic operations for managing configuration +// and monitoring state. +service Configurator { + // Get is used for listing desired config. + rpc Get(GetRequest) returns (GetResponse); + + // Update is used for updating desired config. + rpc Update(UpdateRequest) returns (UpdateResponse); + + // Delete is used for deleting desired config. + rpc Delete(DeleteRequest) returns (DeleteResponse); + + // Dump is used for dumping running config. + rpc Dump(DumpRequest) returns (DumpResponse); + + // Notify is used for subscribing to notifications. + rpc Notify(NotificationRequest) returns (stream NotificationResponse); +} + +message UpdateRequest { + // Update describes config data to be updated. + Config update = 1; + + // The full_resync option can be used + // to overwrite all existing data. + bool full_resync = 2; +} + +message UpdateResponse { + +} + +message DeleteRequest { + // Delete describes config data to be deleted. + Config delete = 1; +} + +message DeleteResponse { + +} + +message GetRequest { + +} + +message GetResponse { + // Config describes desired config retrieved from agent. + Config config = 1; +} + +message DumpRequest { + +} + +message DumpResponse { + // Dump describes running config dumped from southbound. + Config dump = 1; +} + +// NotificationRequest represent a notification request which contains +// index of next required message +message NotificationRequest { + uint32 idx = 1; +} + +// Response to notification request 'get'. Returns indexed notification. +message NotificationResponse { + // Index of following notification + uint32 next_idx = 1; + // Notification data + Notification notification = 2; +} diff --git a/api/genericmanager/genericmanager.pb.go b/api/genericmanager/genericmanager.pb.go new file mode 100644 index 0000000000..b5e20aefe1 --- /dev/null +++ b/api/genericmanager/genericmanager.pb.go @@ -0,0 +1,1371 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: genericmanager/genericmanager.proto + +package genericmanager // import "github.com/ligato/vpp-agent/api/genericmanager" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import types "github.com/gogo/protobuf/types" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type UpdateResult_Operation int32 + +const ( + UpdateResult_UNSPECIFIED UpdateResult_Operation = 0 + UpdateResult_CREATE UpdateResult_Operation = 1 + UpdateResult_UPDATE UpdateResult_Operation = 2 + UpdateResult_DELETE UpdateResult_Operation = 3 +) + +var UpdateResult_Operation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CREATE", + 2: "UPDATE", + 3: "DELETE", +} +var UpdateResult_Operation_value = map[string]int32{ + "UNSPECIFIED": 0, + "CREATE": 1, + "UPDATE": 2, + "DELETE": 3, +} + +func (x UpdateResult_Operation) String() string { + return proto.EnumName(UpdateResult_Operation_name, int32(x)) +} +func (UpdateResult_Operation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{10, 0} +} + +// Model represents a model description to for recognizing +// different item types. +type Model struct { + // Module describes group of the model. + Module string `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // Version describes concrete version of the model schema. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Type describes name of type described by this model. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Model) Reset() { *m = Model{} } +func (m *Model) String() string { return proto.CompactTextString(m) } +func (*Model) ProtoMessage() {} +func (*Model) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{0} +} +func (m *Model) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Model.Unmarshal(m, b) +} +func (m *Model) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Model.Marshal(b, m, deterministic) +} +func (dst *Model) XXX_Merge(src proto.Message) { + xxx_messageInfo_Model.Merge(dst, src) +} +func (m *Model) XXX_Size() int { + return xxx_messageInfo_Model.Size(m) +} +func (m *Model) XXX_DiscardUnknown() { + xxx_messageInfo_Model.DiscardUnknown(m) +} + +var xxx_messageInfo_Model proto.InternalMessageInfo + +func (m *Model) GetModule() string { + if m != nil { + return m.Module + } + return "" +} + +func (m *Model) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Model) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +// Item represents single instance described by the Model. +type Item struct { + Id *Item_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Data *Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Item) Reset() { *m = Item{} } +func (m *Item) String() string { return proto.CompactTextString(m) } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{1} +} +func (m *Item) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Item.Unmarshal(m, b) +} +func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Item.Marshal(b, m, deterministic) +} +func (dst *Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_Item.Merge(dst, src) +} +func (m *Item) XXX_Size() int { + return xxx_messageInfo_Item.Size(m) +} +func (m *Item) XXX_DiscardUnknown() { + xxx_messageInfo_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_Item proto.InternalMessageInfo + +func (m *Item) GetId() *Item_ID { + if m != nil { + return m.Id + } + return nil +} + +func (m *Item) GetData() *Data { + if m != nil { + return m.Data + } + return nil +} + +// ID represents identifier for distinguishing items. +type Item_ID struct { + Model *Model `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Item_ID) Reset() { *m = Item_ID{} } +func (m *Item_ID) String() string { return proto.CompactTextString(m) } +func (*Item_ID) ProtoMessage() {} +func (*Item_ID) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{1, 0} +} +func (m *Item_ID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Item_ID.Unmarshal(m, b) +} +func (m *Item_ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Item_ID.Marshal(b, m, deterministic) +} +func (dst *Item_ID) XXX_Merge(src proto.Message) { + xxx_messageInfo_Item_ID.Merge(dst, src) +} +func (m *Item_ID) XXX_Size() int { + return xxx_messageInfo_Item_ID.Size(m) +} +func (m *Item_ID) XXX_DiscardUnknown() { + xxx_messageInfo_Item_ID.DiscardUnknown(m) +} + +var xxx_messageInfo_Item_ID proto.InternalMessageInfo + +func (m *Item_ID) GetModel() *Model { + if m != nil { + return m.Model + } + return nil +} + +func (m *Item_ID) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Data represents encoded data for an item. +type Data struct { + Any *types.Any `protobuf:"bytes,1,opt,name=any,proto3" json:"any,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{2} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Data.Unmarshal(m, b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) +} +func (dst *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(dst, src) +} +func (m *Data) XXX_Size() int { + return xxx_messageInfo_Data.Size(m) +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetAny() *types.Any { + if m != nil { + return m.Any + } + return nil +} + +// Item status describes status of an item. +type ItemStatus struct { + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ItemStatus) Reset() { *m = ItemStatus{} } +func (m *ItemStatus) String() string { return proto.CompactTextString(m) } +func (*ItemStatus) ProtoMessage() {} +func (*ItemStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{3} +} +func (m *ItemStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ItemStatus.Unmarshal(m, b) +} +func (m *ItemStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ItemStatus.Marshal(b, m, deterministic) +} +func (dst *ItemStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ItemStatus.Merge(dst, src) +} +func (m *ItemStatus) XXX_Size() int { + return xxx_messageInfo_ItemStatus.Size(m) +} +func (m *ItemStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ItemStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ItemStatus proto.InternalMessageInfo + +func (m *ItemStatus) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *ItemStatus) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type CapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesRequest) Reset() { *m = CapabilitiesRequest{} } +func (m *CapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesRequest) ProtoMessage() {} +func (*CapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{4} +} +func (m *CapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesRequest.Unmarshal(m, b) +} +func (m *CapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *CapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesRequest.Merge(dst, src) +} +func (m *CapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_CapabilitiesRequest.Size(m) +} +func (m *CapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesRequest proto.InternalMessageInfo + +type CapabilitiesResponse struct { + KnownModels []*ModelInfo `protobuf:"bytes,1,rep,name=known_models,json=knownModels,proto3" json:"known_models,omitempty"` + ActiveModules []string `protobuf:"bytes,2,rep,name=active_modules,json=activeModules,proto3" json:"active_modules,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesResponse) Reset() { *m = CapabilitiesResponse{} } +func (m *CapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesResponse) ProtoMessage() {} +func (*CapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{5} +} +func (m *CapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesResponse.Unmarshal(m, b) +} +func (m *CapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *CapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesResponse.Merge(dst, src) +} +func (m *CapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_CapabilitiesResponse.Size(m) +} +func (m *CapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesResponse proto.InternalMessageInfo + +func (m *CapabilitiesResponse) GetKnownModels() []*ModelInfo { + if m != nil { + return m.KnownModels + } + return nil +} + +func (m *CapabilitiesResponse) GetActiveModules() []string { + if m != nil { + return m.ActiveModules + } + return nil +} + +type ModelInfo struct { + Model *Model `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` + Info map[string]string `protobuf:"bytes,2,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelInfo) Reset() { *m = ModelInfo{} } +func (m *ModelInfo) String() string { return proto.CompactTextString(m) } +func (*ModelInfo) ProtoMessage() {} +func (*ModelInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{6} +} +func (m *ModelInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelInfo.Unmarshal(m, b) +} +func (m *ModelInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelInfo.Marshal(b, m, deterministic) +} +func (dst *ModelInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelInfo.Merge(dst, src) +} +func (m *ModelInfo) XXX_Size() int { + return xxx_messageInfo_ModelInfo.Size(m) +} +func (m *ModelInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ModelInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelInfo proto.InternalMessageInfo + +func (m *ModelInfo) GetModel() *Model { + if m != nil { + return m.Model + } + return nil +} + +func (m *ModelInfo) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +type SetConfigRequest struct { + Updates []*UpdateItem `protobuf:"bytes,1,rep,name=updates,proto3" json:"updates,omitempty"` + // The overwrite_all can be set to true to overwrite all other configuration + // (this is also known as Full Resync) + OverwriteAll bool `protobuf:"varint,2,opt,name=overwrite_all,json=overwriteAll,proto3" json:"overwrite_all,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigRequest) Reset() { *m = SetConfigRequest{} } +func (m *SetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*SetConfigRequest) ProtoMessage() {} +func (*SetConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{7} +} +func (m *SetConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigRequest.Unmarshal(m, b) +} +func (m *SetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigRequest.Marshal(b, m, deterministic) +} +func (dst *SetConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigRequest.Merge(dst, src) +} +func (m *SetConfigRequest) XXX_Size() int { + return xxx_messageInfo_SetConfigRequest.Size(m) +} +func (m *SetConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigRequest proto.InternalMessageInfo + +func (m *SetConfigRequest) GetUpdates() []*UpdateItem { + if m != nil { + return m.Updates + } + return nil +} + +func (m *SetConfigRequest) GetOverwriteAll() bool { + if m != nil { + return m.OverwriteAll + } + return false +} + +type UpdateItem struct { + // The item describes item to be updated. + // For a delete operation set fields item.Data to nil. + Item *Item `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` + // The labels can be used to define user-defined labels for item. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateItem) Reset() { *m = UpdateItem{} } +func (m *UpdateItem) String() string { return proto.CompactTextString(m) } +func (*UpdateItem) ProtoMessage() {} +func (*UpdateItem) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{8} +} +func (m *UpdateItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateItem.Unmarshal(m, b) +} +func (m *UpdateItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateItem.Marshal(b, m, deterministic) +} +func (dst *UpdateItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateItem.Merge(dst, src) +} +func (m *UpdateItem) XXX_Size() int { + return xxx_messageInfo_UpdateItem.Size(m) +} +func (m *UpdateItem) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateItem.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateItem proto.InternalMessageInfo + +func (m *UpdateItem) GetItem() *Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *UpdateItem) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type SetConfigResponse struct { + Results []*UpdateResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigResponse) Reset() { *m = SetConfigResponse{} } +func (m *SetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*SetConfigResponse) ProtoMessage() {} +func (*SetConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{9} +} +func (m *SetConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigResponse.Unmarshal(m, b) +} +func (m *SetConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigResponse.Marshal(b, m, deterministic) +} +func (dst *SetConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigResponse.Merge(dst, src) +} +func (m *SetConfigResponse) XXX_Size() int { + return xxx_messageInfo_SetConfigResponse.Size(m) +} +func (m *SetConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigResponse proto.InternalMessageInfo + +func (m *SetConfigResponse) GetResults() []*UpdateResult { + if m != nil { + return m.Results + } + return nil +} + +type UpdateResult struct { + Id *Item_ID `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Op UpdateResult_Operation `protobuf:"varint,2,opt,name=op,proto3,enum=genericmanager.UpdateResult_Operation" json:"op,omitempty"` + Status *ItemStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateResult) Reset() { *m = UpdateResult{} } +func (m *UpdateResult) String() string { return proto.CompactTextString(m) } +func (*UpdateResult) ProtoMessage() {} +func (*UpdateResult) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{10} +} +func (m *UpdateResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateResult.Unmarshal(m, b) +} +func (m *UpdateResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateResult.Marshal(b, m, deterministic) +} +func (dst *UpdateResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateResult.Merge(dst, src) +} +func (m *UpdateResult) XXX_Size() int { + return xxx_messageInfo_UpdateResult.Size(m) +} +func (m *UpdateResult) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateResult.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateResult proto.InternalMessageInfo + +func (m *UpdateResult) GetId() *Item_ID { + if m != nil { + return m.Id + } + return nil +} + +func (m *UpdateResult) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *UpdateResult) GetOp() UpdateResult_Operation { + if m != nil { + return m.Op + } + return UpdateResult_UNSPECIFIED +} + +func (m *UpdateResult) GetStatus() *ItemStatus { + if m != nil { + return m.Status + } + return nil +} + +type GetConfigRequest struct { + Ids []*Item_ID `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (m *GetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{11} +} +func (m *GetConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetConfigRequest.Unmarshal(m, b) +} +func (m *GetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetConfigRequest.Marshal(b, m, deterministic) +} +func (dst *GetConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetConfigRequest.Merge(dst, src) +} +func (m *GetConfigRequest) XXX_Size() int { + return xxx_messageInfo_GetConfigRequest.Size(m) +} +func (m *GetConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetConfigRequest proto.InternalMessageInfo + +func (m *GetConfigRequest) GetIds() []*Item_ID { + if m != nil { + return m.Ids + } + return nil +} + +type GetConfigResponse struct { + Items []*GetConfigResponse_ConfigItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (m *GetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{12} +} +func (m *GetConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetConfigResponse.Unmarshal(m, b) +} +func (m *GetConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetConfigResponse.Marshal(b, m, deterministic) +} +func (dst *GetConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetConfigResponse.Merge(dst, src) +} +func (m *GetConfigResponse) XXX_Size() int { + return xxx_messageInfo_GetConfigResponse.Size(m) +} +func (m *GetConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetConfigResponse proto.InternalMessageInfo + +func (m *GetConfigResponse) GetItems() []*GetConfigResponse_ConfigItem { + if m != nil { + return m.Items + } + return nil +} + +type GetConfigResponse_ConfigItem struct { + Item *Item `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` + Status *ItemStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetConfigResponse_ConfigItem) Reset() { *m = GetConfigResponse_ConfigItem{} } +func (m *GetConfigResponse_ConfigItem) String() string { return proto.CompactTextString(m) } +func (*GetConfigResponse_ConfigItem) ProtoMessage() {} +func (*GetConfigResponse_ConfigItem) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{12, 0} +} +func (m *GetConfigResponse_ConfigItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetConfigResponse_ConfigItem.Unmarshal(m, b) +} +func (m *GetConfigResponse_ConfigItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetConfigResponse_ConfigItem.Marshal(b, m, deterministic) +} +func (dst *GetConfigResponse_ConfigItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetConfigResponse_ConfigItem.Merge(dst, src) +} +func (m *GetConfigResponse_ConfigItem) XXX_Size() int { + return xxx_messageInfo_GetConfigResponse_ConfigItem.Size(m) +} +func (m *GetConfigResponse_ConfigItem) XXX_DiscardUnknown() { + xxx_messageInfo_GetConfigResponse_ConfigItem.DiscardUnknown(m) +} + +var xxx_messageInfo_GetConfigResponse_ConfigItem proto.InternalMessageInfo + +func (m *GetConfigResponse_ConfigItem) GetItem() *Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *GetConfigResponse_ConfigItem) GetStatus() *ItemStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *GetConfigResponse_ConfigItem) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type DumpStateRequest struct { + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpStateRequest) Reset() { *m = DumpStateRequest{} } +func (m *DumpStateRequest) String() string { return proto.CompactTextString(m) } +func (*DumpStateRequest) ProtoMessage() {} +func (*DumpStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{13} +} +func (m *DumpStateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpStateRequest.Unmarshal(m, b) +} +func (m *DumpStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpStateRequest.Marshal(b, m, deterministic) +} +func (dst *DumpStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpStateRequest.Merge(dst, src) +} +func (m *DumpStateRequest) XXX_Size() int { + return xxx_messageInfo_DumpStateRequest.Size(m) +} +func (m *DumpStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DumpStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpStateRequest proto.InternalMessageInfo + +func (m *DumpStateRequest) GetKeys() []string { + if m != nil { + return m.Keys + } + return nil +} + +type DumpStateResponse struct { + States []*DumpStateResponse_StateItem `protobuf:"bytes,1,rep,name=states,proto3" json:"states,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpStateResponse) Reset() { *m = DumpStateResponse{} } +func (m *DumpStateResponse) String() string { return proto.CompactTextString(m) } +func (*DumpStateResponse) ProtoMessage() {} +func (*DumpStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{14} +} +func (m *DumpStateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpStateResponse.Unmarshal(m, b) +} +func (m *DumpStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpStateResponse.Marshal(b, m, deterministic) +} +func (dst *DumpStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpStateResponse.Merge(dst, src) +} +func (m *DumpStateResponse) XXX_Size() int { + return xxx_messageInfo_DumpStateResponse.Size(m) +} +func (m *DumpStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DumpStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpStateResponse proto.InternalMessageInfo + +func (m *DumpStateResponse) GetStates() []*DumpStateResponse_StateItem { + if m != nil { + return m.States + } + return nil +} + +type DumpStateResponse_StateItem struct { + Item *Item `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` + Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpStateResponse_StateItem) Reset() { *m = DumpStateResponse_StateItem{} } +func (m *DumpStateResponse_StateItem) String() string { return proto.CompactTextString(m) } +func (*DumpStateResponse_StateItem) ProtoMessage() {} +func (*DumpStateResponse_StateItem) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{14, 0} +} +func (m *DumpStateResponse_StateItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpStateResponse_StateItem.Unmarshal(m, b) +} +func (m *DumpStateResponse_StateItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpStateResponse_StateItem.Marshal(b, m, deterministic) +} +func (dst *DumpStateResponse_StateItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpStateResponse_StateItem.Merge(dst, src) +} +func (m *DumpStateResponse_StateItem) XXX_Size() int { + return xxx_messageInfo_DumpStateResponse_StateItem.Size(m) +} +func (m *DumpStateResponse_StateItem) XXX_DiscardUnknown() { + xxx_messageInfo_DumpStateResponse_StateItem.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpStateResponse_StateItem proto.InternalMessageInfo + +func (m *DumpStateResponse_StateItem) GetItem() *Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *DumpStateResponse_StateItem) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +type SubscribeRequest struct { + Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} } +func (m *SubscribeRequest) String() string { return proto.CompactTextString(m) } +func (*SubscribeRequest) ProtoMessage() {} +func (*SubscribeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{15} +} +func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubscribeRequest.Unmarshal(m, b) +} +func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) +} +func (dst *SubscribeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeRequest.Merge(dst, src) +} +func (m *SubscribeRequest) XXX_Size() int { + return xxx_messageInfo_SubscribeRequest.Size(m) +} +func (m *SubscribeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo + +func (m *SubscribeRequest) GetSubscriptions() []*Subscription { + if m != nil { + return m.Subscriptions + } + return nil +} + +type SubscribeResponse struct { + Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubscribeResponse) Reset() { *m = SubscribeResponse{} } +func (m *SubscribeResponse) String() string { return proto.CompactTextString(m) } +func (*SubscribeResponse) ProtoMessage() {} +func (*SubscribeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{16} +} +func (m *SubscribeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubscribeResponse.Unmarshal(m, b) +} +func (m *SubscribeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubscribeResponse.Marshal(b, m, deterministic) +} +func (dst *SubscribeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeResponse.Merge(dst, src) +} +func (m *SubscribeResponse) XXX_Size() int { + return xxx_messageInfo_SubscribeResponse.Size(m) +} +func (m *SubscribeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscribeResponse proto.InternalMessageInfo + +func (m *SubscribeResponse) GetNotifications() []*Notification { + if m != nil { + return m.Notifications + } + return nil +} + +type Subscription struct { + Id *Item_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subscription) Reset() { *m = Subscription{} } +func (m *Subscription) String() string { return proto.CompactTextString(m) } +func (*Subscription) ProtoMessage() {} +func (*Subscription) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{17} +} +func (m *Subscription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Subscription.Unmarshal(m, b) +} +func (m *Subscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Subscription.Marshal(b, m, deterministic) +} +func (dst *Subscription) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subscription.Merge(dst, src) +} +func (m *Subscription) XXX_Size() int { + return xxx_messageInfo_Subscription.Size(m) +} +func (m *Subscription) XXX_DiscardUnknown() { + xxx_messageInfo_Subscription.DiscardUnknown(m) +} + +var xxx_messageInfo_Subscription proto.InternalMessageInfo + +func (m *Subscription) GetId() *Item_ID { + if m != nil { + return m.Id + } + return nil +} + +type Notification struct { + Item *Item `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` + Status *ItemStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Notification) Reset() { *m = Notification{} } +func (m *Notification) String() string { return proto.CompactTextString(m) } +func (*Notification) ProtoMessage() {} +func (*Notification) Descriptor() ([]byte, []int) { + return fileDescriptor_genericmanager_0eb219cffc7c9a59, []int{18} +} +func (m *Notification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Notification.Unmarshal(m, b) +} +func (m *Notification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Notification.Marshal(b, m, deterministic) +} +func (dst *Notification) XXX_Merge(src proto.Message) { + xxx_messageInfo_Notification.Merge(dst, src) +} +func (m *Notification) XXX_Size() int { + return xxx_messageInfo_Notification.Size(m) +} +func (m *Notification) XXX_DiscardUnknown() { + xxx_messageInfo_Notification.DiscardUnknown(m) +} + +var xxx_messageInfo_Notification proto.InternalMessageInfo + +func (m *Notification) GetItem() *Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *Notification) GetStatus() *ItemStatus { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*Model)(nil), "genericmanager.Model") + proto.RegisterType((*Item)(nil), "genericmanager.Item") + proto.RegisterType((*Item_ID)(nil), "genericmanager.Item.ID") + proto.RegisterType((*Data)(nil), "genericmanager.Data") + proto.RegisterType((*ItemStatus)(nil), "genericmanager.ItemStatus") + proto.RegisterType((*CapabilitiesRequest)(nil), "genericmanager.CapabilitiesRequest") + proto.RegisterType((*CapabilitiesResponse)(nil), "genericmanager.CapabilitiesResponse") + proto.RegisterType((*ModelInfo)(nil), "genericmanager.ModelInfo") + proto.RegisterMapType((map[string]string)(nil), "genericmanager.ModelInfo.InfoEntry") + proto.RegisterType((*SetConfigRequest)(nil), "genericmanager.SetConfigRequest") + proto.RegisterType((*UpdateItem)(nil), "genericmanager.UpdateItem") + proto.RegisterMapType((map[string]string)(nil), "genericmanager.UpdateItem.LabelsEntry") + proto.RegisterType((*SetConfigResponse)(nil), "genericmanager.SetConfigResponse") + proto.RegisterType((*UpdateResult)(nil), "genericmanager.UpdateResult") + proto.RegisterType((*GetConfigRequest)(nil), "genericmanager.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "genericmanager.GetConfigResponse") + proto.RegisterType((*GetConfigResponse_ConfigItem)(nil), "genericmanager.GetConfigResponse.ConfigItem") + proto.RegisterMapType((map[string]string)(nil), "genericmanager.GetConfigResponse.ConfigItem.LabelsEntry") + proto.RegisterType((*DumpStateRequest)(nil), "genericmanager.DumpStateRequest") + proto.RegisterType((*DumpStateResponse)(nil), "genericmanager.DumpStateResponse") + proto.RegisterType((*DumpStateResponse_StateItem)(nil), "genericmanager.DumpStateResponse.StateItem") + proto.RegisterMapType((map[string]string)(nil), "genericmanager.DumpStateResponse.StateItem.MetadataEntry") + proto.RegisterType((*SubscribeRequest)(nil), "genericmanager.SubscribeRequest") + proto.RegisterType((*SubscribeResponse)(nil), "genericmanager.SubscribeResponse") + proto.RegisterType((*Subscription)(nil), "genericmanager.Subscription") + proto.RegisterType((*Notification)(nil), "genericmanager.Notification") + proto.RegisterEnum("genericmanager.UpdateResult_Operation", UpdateResult_Operation_name, UpdateResult_Operation_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GenericManagerClient is the client API for GenericManager service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GenericManagerClient interface { + // Capabilities returns information about service capabilities + // including list of models supported by the server. + Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) + // SetConfig is used to update desired configuration. + SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) + // GetConfig is used to readt current configuration. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + // DumpState retrieves the current running state. + DumpState(ctx context.Context, in *DumpStateRequest, opts ...grpc.CallOption) (*DumpStateResponse, error) + // Subscribe is used for subscribing to events. + // Notifications are returned by streaming updates. + Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (GenericManager_SubscribeClient, error) +} + +type genericManagerClient struct { + cc *grpc.ClientConn +} + +func NewGenericManagerClient(cc *grpc.ClientConn) GenericManagerClient { + return &genericManagerClient{cc} +} + +func (c *genericManagerClient) Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) { + out := new(CapabilitiesResponse) + err := c.cc.Invoke(ctx, "/genericmanager.GenericManager/Capabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericManagerClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { + out := new(SetConfigResponse) + err := c.cc.Invoke(ctx, "/genericmanager.GenericManager/SetConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericManagerClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := c.cc.Invoke(ctx, "/genericmanager.GenericManager/GetConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericManagerClient) DumpState(ctx context.Context, in *DumpStateRequest, opts ...grpc.CallOption) (*DumpStateResponse, error) { + out := new(DumpStateResponse) + err := c.cc.Invoke(ctx, "/genericmanager.GenericManager/DumpState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericManagerClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (GenericManager_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &_GenericManager_serviceDesc.Streams[0], "/genericmanager.GenericManager/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &genericManagerSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GenericManager_SubscribeClient interface { + Recv() (*SubscribeResponse, error) + grpc.ClientStream +} + +type genericManagerSubscribeClient struct { + grpc.ClientStream +} + +func (x *genericManagerSubscribeClient) Recv() (*SubscribeResponse, error) { + m := new(SubscribeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericManagerServer is the server API for GenericManager service. +type GenericManagerServer interface { + // Capabilities returns information about service capabilities + // including list of models supported by the server. + Capabilities(context.Context, *CapabilitiesRequest) (*CapabilitiesResponse, error) + // SetConfig is used to update desired configuration. + SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) + // GetConfig is used to readt current configuration. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + // DumpState retrieves the current running state. + DumpState(context.Context, *DumpStateRequest) (*DumpStateResponse, error) + // Subscribe is used for subscribing to events. + // Notifications are returned by streaming updates. + Subscribe(*SubscribeRequest, GenericManager_SubscribeServer) error +} + +func RegisterGenericManagerServer(s *grpc.Server, srv GenericManagerServer) { + s.RegisterService(&_GenericManager_serviceDesc, srv) +} + +func _GenericManager_Capabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericManagerServer).Capabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/genericmanager.GenericManager/Capabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericManagerServer).Capabilities(ctx, req.(*CapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GenericManager_SetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericManagerServer).SetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/genericmanager.GenericManager/SetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericManagerServer).SetConfig(ctx, req.(*SetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GenericManager_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericManagerServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/genericmanager.GenericManager/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericManagerServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GenericManager_DumpState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DumpStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericManagerServer).DumpState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/genericmanager.GenericManager/DumpState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericManagerServer).DumpState(ctx, req.(*DumpStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GenericManager_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GenericManagerServer).Subscribe(m, &genericManagerSubscribeServer{stream}) +} + +type GenericManager_SubscribeServer interface { + Send(*SubscribeResponse) error + grpc.ServerStream +} + +type genericManagerSubscribeServer struct { + grpc.ServerStream +} + +func (x *genericManagerSubscribeServer) Send(m *SubscribeResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _GenericManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "genericmanager.GenericManager", + HandlerType: (*GenericManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Capabilities", + Handler: _GenericManager_Capabilities_Handler, + }, + { + MethodName: "SetConfig", + Handler: _GenericManager_SetConfig_Handler, + }, + { + MethodName: "GetConfig", + Handler: _GenericManager_GetConfig_Handler, + }, + { + MethodName: "DumpState", + Handler: _GenericManager_DumpState_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _GenericManager_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "genericmanager/genericmanager.proto", +} + +func init() { + proto.RegisterFile("genericmanager/genericmanager.proto", fileDescriptor_genericmanager_0eb219cffc7c9a59) +} + +var fileDescriptor_genericmanager_0eb219cffc7c9a59 = []byte{ + // 997 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0x76, 0xda, 0x6e, 0x5e, 0xd2, 0x92, 0x0e, 0x5d, 0xe8, 0x5a, 0x1c, 0x8a, 0x0b, 0xa5, + 0x68, 0xc1, 0x59, 0x05, 0xb4, 0xdd, 0xe5, 0xcf, 0x8a, 0xb6, 0x09, 0x51, 0xc4, 0x76, 0xa9, 0x26, + 0x5b, 0x10, 0x5c, 0xaa, 0x49, 0x32, 0x09, 0xa3, 0xfa, 0x1f, 0xf6, 0x38, 0xab, 0x88, 0x2f, 0xc3, + 0x81, 0x13, 0x37, 0x0e, 0x7c, 0x08, 0x8e, 0x7c, 0x9a, 0xbd, 0xa2, 0x99, 0xb1, 0x1d, 0xc7, 0x4e, + 0xba, 0x1b, 0x09, 0x2e, 0xd6, 0xbc, 0x37, 0xbf, 0xf7, 0x7b, 0x6f, 0x7e, 0xf3, 0x9e, 0x6d, 0x38, + 0x9c, 0x50, 0x8f, 0x86, 0x6c, 0xe8, 0x12, 0x8f, 0x4c, 0x68, 0xd8, 0x5c, 0x34, 0xed, 0x20, 0xf4, + 0xb9, 0x8f, 0x76, 0x16, 0xbd, 0xe6, 0xbd, 0x89, 0xef, 0x4f, 0x1c, 0xda, 0x94, 0xbb, 0x83, 0x78, + 0xdc, 0x24, 0xde, 0x4c, 0x41, 0xad, 0x0b, 0xd8, 0xb8, 0xf0, 0x47, 0xd4, 0x41, 0x6f, 0xc3, 0xa6, + 0xeb, 0x8f, 0x62, 0x87, 0xee, 0x6b, 0x07, 0xda, 0x71, 0x15, 0x27, 0x16, 0xda, 0x87, 0xad, 0x29, + 0x0d, 0x23, 0xe6, 0x7b, 0xfb, 0xba, 0xdc, 0x48, 0x4d, 0x84, 0xa0, 0xc2, 0x67, 0x01, 0xdd, 0x37, + 0xa4, 0x5b, 0xae, 0xad, 0xdf, 0x34, 0xa8, 0xf4, 0x38, 0x75, 0xd1, 0x87, 0xa0, 0xb3, 0x91, 0xa4, + 0xaa, 0xb5, 0xde, 0xb1, 0x0b, 0x55, 0x0a, 0x84, 0xdd, 0x6b, 0x63, 0x9d, 0x8d, 0xd0, 0x31, 0x54, + 0x46, 0x84, 0x13, 0x49, 0x5e, 0x6b, 0xed, 0x15, 0xa1, 0x6d, 0xc2, 0x09, 0x96, 0x08, 0xb3, 0x03, + 0x7a, 0xaf, 0x8d, 0xee, 0xc3, 0x86, 0x2b, 0x0a, 0x4e, 0xb8, 0xef, 0x16, 0x03, 0xe4, 0x69, 0xb0, + 0xc2, 0x88, 0x12, 0x3d, 0xe2, 0xd2, 0xa4, 0x72, 0xb9, 0xb6, 0x6c, 0xa8, 0x08, 0x52, 0x74, 0x04, + 0x06, 0xf1, 0x66, 0x09, 0xcd, 0x9e, 0xad, 0x24, 0xb2, 0x53, 0x89, 0xec, 0x53, 0x6f, 0x86, 0x05, + 0xc0, 0x7a, 0x02, 0x20, 0xea, 0xed, 0x73, 0xc2, 0xe3, 0x48, 0xc8, 0x14, 0xc9, 0x55, 0x2a, 0x93, + 0xb2, 0x84, 0x4c, 0x2e, 0x8d, 0x22, 0x32, 0x49, 0x93, 0xa5, 0xa6, 0x75, 0x17, 0xde, 0x3a, 0x27, + 0x01, 0x19, 0x30, 0x87, 0x71, 0x46, 0x23, 0x4c, 0x7f, 0x89, 0x69, 0xc4, 0xad, 0x5f, 0x61, 0x6f, + 0xd1, 0x1d, 0x05, 0xbe, 0x17, 0x51, 0xf4, 0x25, 0xd4, 0x6f, 0x3c, 0xff, 0x85, 0x77, 0x2d, 0x4f, + 0x20, 0xd2, 0x18, 0xc7, 0xb5, 0xd6, 0xbd, 0xa5, 0xc7, 0xec, 0x79, 0x63, 0x1f, 0xd7, 0x24, 0x5c, + 0xda, 0x11, 0xfa, 0x00, 0x76, 0xc8, 0x90, 0xb3, 0x29, 0xbd, 0x56, 0xd7, 0x17, 0xed, 0xeb, 0x07, + 0xc6, 0x71, 0x15, 0x6f, 0x2b, 0xef, 0x85, 0x72, 0x5a, 0x7f, 0x68, 0x50, 0xcd, 0x18, 0xd6, 0x93, + 0xf4, 0x04, 0x2a, 0xcc, 0x1b, 0xfb, 0x92, 0xb7, 0xd6, 0x3a, 0x5c, 0x59, 0x97, 0x2d, 0x1e, 0x1d, + 0x8f, 0x87, 0x33, 0x2c, 0x03, 0xcc, 0x13, 0xa8, 0x66, 0x2e, 0xd4, 0x00, 0xe3, 0x86, 0xce, 0x12, + 0x0d, 0xc5, 0x12, 0xed, 0xc1, 0xc6, 0x94, 0x38, 0x71, 0x2a, 0x9f, 0x32, 0x3e, 0xd7, 0x1f, 0x69, + 0x96, 0x0b, 0x8d, 0x3e, 0xe5, 0xe7, 0xbe, 0x37, 0x66, 0x93, 0x44, 0x3d, 0xf4, 0x19, 0x6c, 0xc5, + 0xc1, 0x88, 0x70, 0x9a, 0x0a, 0x64, 0x16, 0x0b, 0xb9, 0x92, 0xdb, 0xe2, 0xe6, 0x70, 0x0a, 0x45, + 0x87, 0xb0, 0xed, 0x4f, 0x69, 0xf8, 0x22, 0x64, 0x9c, 0x5e, 0x13, 0xc7, 0x91, 0xb9, 0xee, 0xe0, + 0x7a, 0xe6, 0x3c, 0x75, 0x1c, 0xeb, 0x4f, 0x0d, 0x60, 0x1e, 0x2c, 0xfa, 0x93, 0x71, 0xea, 0xce, + 0xfb, 0xa4, 0xdc, 0xca, 0x58, 0x22, 0xd0, 0x13, 0xd8, 0x74, 0xc8, 0x40, 0xdc, 0x99, 0xd2, 0xe6, + 0x68, 0x75, 0x49, 0xf6, 0x53, 0x09, 0x54, 0xf2, 0x24, 0x51, 0xe6, 0x63, 0xa8, 0xe5, 0xdc, 0x6b, + 0x49, 0xf4, 0x2d, 0xec, 0xe6, 0x24, 0x4a, 0x3a, 0xe9, 0x21, 0x6c, 0x85, 0x34, 0x8a, 0x1d, 0x9e, + 0x6a, 0xf4, 0xee, 0xf2, 0x82, 0xb0, 0x04, 0xe1, 0x14, 0x6c, 0xbd, 0xd4, 0xa0, 0x9e, 0xdf, 0x49, + 0x66, 0xb9, 0xf2, 0xea, 0x59, 0x2e, 0x97, 0xfc, 0x10, 0x74, 0x3f, 0x90, 0xf5, 0xee, 0xac, 0xd2, + 0x43, 0x25, 0xb1, 0xbf, 0x0b, 0x68, 0x48, 0x38, 0xf3, 0x3d, 0xac, 0xfb, 0x01, 0x6a, 0x65, 0x63, + 0x66, 0xc8, 0xb4, 0xe6, 0xb2, 0xb4, 0x6a, 0x24, 0xd3, 0x11, 0xb4, 0xbe, 0x86, 0x6a, 0x46, 0x82, + 0xde, 0x84, 0xda, 0xd5, 0xb3, 0xfe, 0x65, 0xe7, 0xbc, 0xf7, 0x4d, 0xaf, 0xd3, 0x6e, 0xbc, 0x81, + 0x00, 0x36, 0xcf, 0x71, 0xe7, 0xf4, 0x79, 0xa7, 0xa1, 0x89, 0xf5, 0xd5, 0x65, 0x5b, 0xac, 0x75, + 0xb1, 0x6e, 0x77, 0x9e, 0x76, 0x9e, 0x77, 0x1a, 0x86, 0xf5, 0x15, 0x34, 0xba, 0xc5, 0x4e, 0xfb, + 0x08, 0x0c, 0x36, 0x4a, 0x15, 0x5c, 0x79, 0x7a, 0x81, 0xb1, 0xfe, 0xd1, 0x61, 0xb7, 0x5b, 0xba, + 0x86, 0x33, 0xd8, 0x10, 0xed, 0x91, 0x52, 0x7c, 0x5c, 0xa4, 0x28, 0x45, 0xd8, 0xca, 0x94, 0x9d, + 0xa5, 0x42, 0xcd, 0x97, 0x1a, 0xc0, 0xdc, 0xbb, 0x46, 0x4f, 0xce, 0x75, 0xd4, 0x5f, 0x57, 0x47, + 0x74, 0x99, 0xf5, 0xb1, 0x21, 0x2b, 0x7e, 0xb4, 0x4e, 0xc5, 0xff, 0x75, 0x67, 0x1f, 0x41, 0xa3, + 0x1d, 0xbb, 0x81, 0x28, 0x91, 0xa6, 0x57, 0x82, 0xa0, 0x72, 0x43, 0x67, 0x4a, 0xd0, 0x2a, 0x96, + 0x6b, 0xeb, 0x77, 0x1d, 0x76, 0x73, 0xc0, 0x44, 0xfb, 0x73, 0x75, 0xfc, 0xec, 0x2d, 0x71, 0xbf, + 0xf4, 0x79, 0x29, 0x86, 0xd8, 0xd2, 0x92, 0x0a, 0x26, 0xa1, 0xe6, 0xdf, 0x1a, 0x54, 0x33, 0xef, + 0x1a, 0xda, 0x5f, 0xc1, 0x1d, 0x97, 0x72, 0x92, 0x7c, 0xdd, 0x44, 0xfa, 0xc7, 0x6b, 0xa4, 0xb7, + 0x2f, 0x92, 0x58, 0x25, 0x65, 0x46, 0x65, 0x7e, 0x01, 0xdb, 0x0b, 0x5b, 0x6b, 0xc9, 0xf9, 0x3d, + 0x34, 0xfa, 0xf1, 0x20, 0x1a, 0x86, 0x6c, 0x90, 0xc9, 0x79, 0x06, 0xdb, 0x91, 0xf2, 0x05, 0x62, + 0x74, 0x56, 0xbe, 0x2d, 0xfa, 0x39, 0x10, 0x5e, 0x0c, 0xb1, 0x7e, 0x80, 0xdd, 0x1c, 0x6f, 0xd6, + 0xf9, 0xdb, 0x9e, 0xcf, 0xd9, 0x98, 0x0d, 0xc9, 0xad, 0xc4, 0xcf, 0x72, 0x20, 0xbc, 0x18, 0x62, + 0x9d, 0x40, 0x3d, 0x9f, 0xf7, 0xb5, 0xff, 0x2b, 0x2c, 0x07, 0xea, 0x79, 0xde, 0xff, 0x77, 0x66, + 0x5a, 0x7f, 0x19, 0xb0, 0xd3, 0x55, 0xa8, 0x0b, 0x85, 0x42, 0x3f, 0x42, 0x3d, 0xff, 0x81, 0x47, + 0xa5, 0x4f, 0xe5, 0x92, 0xbf, 0x02, 0xf3, 0xfd, 0xdb, 0x41, 0x89, 0xb0, 0x97, 0x50, 0xcd, 0x5e, + 0xf7, 0xe8, 0xa0, 0x74, 0x4f, 0x85, 0x57, 0x98, 0xf9, 0xde, 0x2d, 0x88, 0x39, 0x63, 0x77, 0x35, + 0x63, 0xf7, 0x95, 0x8c, 0xdd, 0x65, 0x8c, 0x59, 0x77, 0x97, 0x19, 0x8b, 0x33, 0x5d, 0x66, 0x2c, + 0x0f, 0x33, 0x86, 0x6a, 0xd6, 0x63, 0x4b, 0x4e, 0x5d, 0x68, 0xeb, 0x25, 0xa7, 0x2e, 0x36, 0xe8, + 0x03, 0xed, 0xec, 0xc1, 0x4f, 0xf6, 0x84, 0xf1, 0x9f, 0xe3, 0x81, 0x3d, 0xf4, 0xdd, 0xa6, 0xc3, + 0x26, 0x84, 0xfb, 0xcd, 0x69, 0x10, 0x7c, 0x42, 0x26, 0xd4, 0xe3, 0x4d, 0x12, 0xb0, 0xc2, 0x1f, + 0xf6, 0x60, 0x53, 0xfe, 0x21, 0x7e, 0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xd0, 0xb5, + 0xd4, 0x89, 0x0b, 0x00, 0x00, +} diff --git a/api/genericmanager/genericmanager.proto b/api/genericmanager/genericmanager.proto new file mode 100644 index 0000000000..b898270377 --- /dev/null +++ b/api/genericmanager/genericmanager.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package genericmanager; + +option go_package = "github.com/ligato/vpp-agent/api/genericmanager"; + +import "google/protobuf/any.proto"; + +// Model represents a model description to for recognizing +// different item types. +message Model { + // Module describes group of the model. + string module = 1; + // Version describes concrete version of the model schema. + string version = 2; + // Type describes name of type described by this model. + string type = 3; +} + +// Item represents single instance described by the Model. +message Item { + // ID represents identifier for distinguishing items. + message ID { + Model model = 1; + string name = 2; + } + + ID id = 1; + Data data = 2; +} + +// Data represents encoded data for an item. +message Data { + google.protobuf.Any any = 1; +} + +// Item status describes status of an item. +message ItemStatus { + string status = 1; + string message = 2; +} + +//------------------------------------------------------------------------------ + +// GenericManager defines the RPC methods for managing config +// using generic model, allowing extending with custom models. +service GenericManager { + // Capabilities returns information about service capabilities + // including list of models supported by the server. + rpc Capabilities (CapabilitiesRequest) returns (CapabilitiesResponse); + + // SetConfig is used to update desired configuration. + rpc SetConfig (SetConfigRequest) returns (SetConfigResponse); + + // GetConfig is used to readt current configuration. + rpc GetConfig (GetConfigRequest) returns (GetConfigResponse); + + // DumpState retrieves the current running state. + rpc DumpState (DumpStateRequest) returns (DumpStateResponse); + + // Subscribe is used for subscribing to events. + // Notifications are returned by streaming updates. + rpc Subscribe (SubscribeRequest) returns (stream SubscribeResponse); +} + +//------------------------------------------------------------------------------ + +message CapabilitiesRequest { + // TODO: query filters +} +message CapabilitiesResponse { + repeated ModelInfo known_models = 1; + repeated string active_modules = 2; +} + +message ModelInfo { + Model model = 1; + map info = 2; +} + +//------------------------------------------------------------------------------ + +message SetConfigRequest { + repeated UpdateItem updates = 1; + // The overwrite_all can be set to true to overwrite all other configuration + // (this is also known as Full Resync) + bool overwrite_all = 2; +} + +message UpdateItem { + // The item describes item to be updated. + // For a delete operation set fields item.Data to nil. + Item item = 1; + // The labels can be used to define user-defined labels for item. + map labels = 2; +} + +message SetConfigResponse { + repeated UpdateResult results = 1; +} + +message UpdateResult { + enum Operation { + UNSPECIFIED = 0; + CREATE = 1; + UPDATE = 2; + DELETE = 3; + } + Item.ID id = 4; + string key = 1; + Operation op = 2; + ItemStatus status = 3; +} + +//------------------------------------------------------------------------------ + +message GetConfigRequest { + repeated Item.ID ids = 1; +} +message GetConfigResponse { + message ConfigItem { + Item item = 1; + ItemStatus status = 2; + map labels = 3; + } + repeated ConfigItem items = 1; +} + +//------------------------------------------------------------------------------ + +message DumpStateRequest { + repeated string keys = 1; +} +message DumpStateResponse { + message StateItem { + Item item = 1; + map metadata = 2; + } + repeated StateItem states = 1; +} + +//------------------------------------------------------------------------------ + +message SubscribeRequest { + repeated Subscription subscriptions = 1; +} +message SubscribeResponse { + repeated Notification notifications = 1; +} + +message Subscription { + Item.ID id = 1; +} + +message Notification { + Item item = 1; + ItemStatus status = 2; +} diff --git a/api/models/linux/interfaces/interface.pb.go b/api/models/linux/interfaces/interface.pb.go new file mode 100644 index 0000000000..0133c99be1 --- /dev/null +++ b/api/models/linux/interfaces/interface.pb.go @@ -0,0 +1,435 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/interfaces/interface.proto + +package linux_interfaces // import "github.com/ligato/vpp-agent/api/models/linux/interfaces" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import namespace "github.com/ligato/vpp-agent/api/models/linux/namespace" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Interface_Type int32 + +const ( + Interface_UNDEFINED Interface_Type = 0 + Interface_VETH Interface_Type = 1 + Interface_TAP_TO_VPP Interface_Type = 2 +) + +var Interface_Type_name = map[int32]string{ + 0: "UNDEFINED", + 1: "VETH", + 2: "TAP_TO_VPP", +} +var Interface_Type_value = map[string]int32{ + "UNDEFINED": 0, + "VETH": 1, + "TAP_TO_VPP": 2, +} + +func (x Interface_Type) String() string { + return proto.EnumName(Interface_Type_name, int32(x)) +} +func (Interface_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_81e80a7dbf736c6b, []int{0, 0} +} + +type VethLink_ChecksumOffloading int32 + +const ( + VethLink_CHKSM_OFFLOAD_DEFAULT VethLink_ChecksumOffloading = 0 + VethLink_CHKSM_OFFLOAD_ENABLED VethLink_ChecksumOffloading = 1 + VethLink_CHKSM_OFFLOAD_DISABLED VethLink_ChecksumOffloading = 2 +) + +var VethLink_ChecksumOffloading_name = map[int32]string{ + 0: "CHKSM_OFFLOAD_DEFAULT", + 1: "CHKSM_OFFLOAD_ENABLED", + 2: "CHKSM_OFFLOAD_DISABLED", +} +var VethLink_ChecksumOffloading_value = map[string]int32{ + "CHKSM_OFFLOAD_DEFAULT": 0, + "CHKSM_OFFLOAD_ENABLED": 1, + "CHKSM_OFFLOAD_DISABLED": 2, +} + +func (x VethLink_ChecksumOffloading) String() string { + return proto.EnumName(VethLink_ChecksumOffloading_name, int32(x)) +} +func (VethLink_ChecksumOffloading) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_81e80a7dbf736c6b, []int{1, 0} +} + +type Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type Interface_Type `protobuf:"varint,2,opt,name=type,proto3,enum=linux.interfaces.Interface_Type" json:"type,omitempty"` + Namespace *namespace.NetNamespace `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + HostIfName string `protobuf:"bytes,4,opt,name=host_if_name,json=hostIfName,proto3" json:"host_if_name,omitempty"` + Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"` + IpAddresses []string `protobuf:"bytes,6,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"` + PhysAddress string `protobuf:"bytes,7,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + Mtu uint32 `protobuf:"varint,8,opt,name=mtu,proto3" json:"mtu,omitempty"` + // Types that are valid to be assigned to Link: + // *Interface_Veth + // *Interface_Tap + Link isInterface_Link `protobuf_oneof:"link"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Interface) Reset() { *m = Interface{} } +func (m *Interface) String() string { return proto.CompactTextString(m) } +func (*Interface) ProtoMessage() {} +func (*Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_81e80a7dbf736c6b, []int{0} +} +func (m *Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Interface.Unmarshal(m, b) +} +func (m *Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Interface.Marshal(b, m, deterministic) +} +func (dst *Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_Interface.Merge(dst, src) +} +func (m *Interface) XXX_Size() int { + return xxx_messageInfo_Interface.Size(m) +} +func (m *Interface) XXX_DiscardUnknown() { + xxx_messageInfo_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_Interface proto.InternalMessageInfo + +type isInterface_Link interface { + isInterface_Link() +} + +type Interface_Veth struct { + Veth *VethLink `protobuf:"bytes,20,opt,name=veth,proto3,oneof"` +} +type Interface_Tap struct { + Tap *TapLink `protobuf:"bytes,21,opt,name=tap,proto3,oneof"` +} + +func (*Interface_Veth) isInterface_Link() {} +func (*Interface_Tap) isInterface_Link() {} + +func (m *Interface) GetLink() isInterface_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Interface) GetType() Interface_Type { + if m != nil { + return m.Type + } + return Interface_UNDEFINED +} + +func (m *Interface) GetNamespace() *namespace.NetNamespace { + if m != nil { + return m.Namespace + } + return nil +} + +func (m *Interface) GetHostIfName() string { + if m != nil { + return m.HostIfName + } + return "" +} + +func (m *Interface) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *Interface) GetIpAddresses() []string { + if m != nil { + return m.IpAddresses + } + return nil +} + +func (m *Interface) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (m *Interface) GetMtu() uint32 { + if m != nil { + return m.Mtu + } + return 0 +} + +func (m *Interface) GetVeth() *VethLink { + if x, ok := m.GetLink().(*Interface_Veth); ok { + return x.Veth + } + return nil +} + +func (m *Interface) GetTap() *TapLink { + if x, ok := m.GetLink().(*Interface_Tap); ok { + return x.Tap + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Interface) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Interface_OneofMarshaler, _Interface_OneofUnmarshaler, _Interface_OneofSizer, []interface{}{ + (*Interface_Veth)(nil), + (*Interface_Tap)(nil), + } +} + +func _Interface_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Interface) + // link + switch x := m.Link.(type) { + case *Interface_Veth: + _ = b.EncodeVarint(20<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Veth); err != nil { + return err + } + case *Interface_Tap: + _ = b.EncodeVarint(21<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Tap); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Interface.Link has unexpected type %T", x) + } + return nil +} + +func _Interface_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Interface) + switch tag { + case 20: // link.veth + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VethLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Veth{msg} + return true, err + case 21: // link.tap + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TapLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Tap{msg} + return true, err + default: + return false, nil + } +} + +func _Interface_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Interface) + // link + switch x := m.Link.(type) { + case *Interface_Veth: + s := proto.Size(x.Veth) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Tap: + s := proto.Size(x.Tap) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func (*Interface) XXX_MessageName() string { + return "linux.interfaces.Interface" +} + +type VethLink struct { + PeerIfName string `protobuf:"bytes,1,opt,name=peer_if_name,json=peerIfName,proto3" json:"peer_if_name,omitempty"` + RxChecksumOffloading VethLink_ChecksumOffloading `protobuf:"varint,2,opt,name=rx_checksum_offloading,json=rxChecksumOffloading,proto3,enum=linux.interfaces.VethLink_ChecksumOffloading" json:"rx_checksum_offloading,omitempty"` + TxChecksumOffloading VethLink_ChecksumOffloading `protobuf:"varint,3,opt,name=tx_checksum_offloading,json=txChecksumOffloading,proto3,enum=linux.interfaces.VethLink_ChecksumOffloading" json:"tx_checksum_offloading,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VethLink) Reset() { *m = VethLink{} } +func (m *VethLink) String() string { return proto.CompactTextString(m) } +func (*VethLink) ProtoMessage() {} +func (*VethLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_81e80a7dbf736c6b, []int{1} +} +func (m *VethLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VethLink.Unmarshal(m, b) +} +func (m *VethLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VethLink.Marshal(b, m, deterministic) +} +func (dst *VethLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_VethLink.Merge(dst, src) +} +func (m *VethLink) XXX_Size() int { + return xxx_messageInfo_VethLink.Size(m) +} +func (m *VethLink) XXX_DiscardUnknown() { + xxx_messageInfo_VethLink.DiscardUnknown(m) +} + +var xxx_messageInfo_VethLink proto.InternalMessageInfo + +func (m *VethLink) GetPeerIfName() string { + if m != nil { + return m.PeerIfName + } + return "" +} + +func (m *VethLink) GetRxChecksumOffloading() VethLink_ChecksumOffloading { + if m != nil { + return m.RxChecksumOffloading + } + return VethLink_CHKSM_OFFLOAD_DEFAULT +} + +func (m *VethLink) GetTxChecksumOffloading() VethLink_ChecksumOffloading { + if m != nil { + return m.TxChecksumOffloading + } + return VethLink_CHKSM_OFFLOAD_DEFAULT +} + +func (*VethLink) XXX_MessageName() string { + return "linux.interfaces.VethLink" +} + +type TapLink struct { + VppTapIfName string `protobuf:"bytes,1,opt,name=vpp_tap_if_name,json=vppTapIfName,proto3" json:"vpp_tap_if_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TapLink) Reset() { *m = TapLink{} } +func (m *TapLink) String() string { return proto.CompactTextString(m) } +func (*TapLink) ProtoMessage() {} +func (*TapLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_81e80a7dbf736c6b, []int{2} +} +func (m *TapLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TapLink.Unmarshal(m, b) +} +func (m *TapLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TapLink.Marshal(b, m, deterministic) +} +func (dst *TapLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_TapLink.Merge(dst, src) +} +func (m *TapLink) XXX_Size() int { + return xxx_messageInfo_TapLink.Size(m) +} +func (m *TapLink) XXX_DiscardUnknown() { + xxx_messageInfo_TapLink.DiscardUnknown(m) +} + +var xxx_messageInfo_TapLink proto.InternalMessageInfo + +func (m *TapLink) GetVppTapIfName() string { + if m != nil { + return m.VppTapIfName + } + return "" +} + +func (*TapLink) XXX_MessageName() string { + return "linux.interfaces.TapLink" +} +func init() { + proto.RegisterType((*Interface)(nil), "linux.interfaces.Interface") + proto.RegisterType((*VethLink)(nil), "linux.interfaces.VethLink") + proto.RegisterType((*TapLink)(nil), "linux.interfaces.TapLink") + proto.RegisterEnum("linux.interfaces.Interface_Type", Interface_Type_name, Interface_Type_value) + proto.RegisterEnum("linux.interfaces.VethLink_ChecksumOffloading", VethLink_ChecksumOffloading_name, VethLink_ChecksumOffloading_value) +} + +func init() { + proto.RegisterFile("models/linux/interfaces/interface.proto", fileDescriptor_interface_81e80a7dbf736c6b) +} + +var fileDescriptor_interface_81e80a7dbf736c6b = []byte{ + // 567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0x9b, 0x4c, + 0x14, 0x0d, 0x86, 0x2f, 0xb1, 0x6f, 0x7e, 0x3e, 0x34, 0x4a, 0x22, 0x62, 0xa9, 0x15, 0xb5, 0xd4, + 0x96, 0x8d, 0x21, 0x4a, 0xbb, 0xcb, 0xca, 0x89, 0xb1, 0x6c, 0xd5, 0xc5, 0x11, 0x21, 0x59, 0x74, + 0x83, 0xc6, 0x78, 0xf8, 0x51, 0x30, 0x8c, 0x60, 0x6c, 0x25, 0x6f, 0xd7, 0x65, 0x9f, 0xa2, 0x9b, + 0xbe, 0x48, 0xc5, 0x18, 0x4c, 0x6b, 0x3b, 0x9b, 0xee, 0xee, 0x3d, 0x9c, 0x33, 0xe7, 0xce, 0xb9, + 0x1a, 0xe0, 0xe3, 0x3c, 0x9d, 0x91, 0x38, 0x37, 0xe2, 0x28, 0x59, 0x3c, 0x1b, 0x51, 0xc2, 0x48, + 0xe6, 0x63, 0x8f, 0xe4, 0x75, 0xa9, 0xd3, 0x2c, 0x65, 0x29, 0x92, 0x39, 0x43, 0xaf, 0x19, 0xed, + 0x6e, 0x10, 0xb1, 0x70, 0x31, 0xd5, 0xbd, 0x74, 0x6e, 0x04, 0x69, 0x90, 0x1a, 0x9c, 0x38, 0x5d, + 0xf8, 0xbc, 0xe3, 0x0d, 0xaf, 0x56, 0x07, 0xb4, 0x3f, 0xfc, 0xe5, 0x94, 0xe0, 0x39, 0xc9, 0x29, + 0xf6, 0x48, 0x5d, 0xad, 0x78, 0x9d, 0xef, 0x22, 0xb4, 0x46, 0x95, 0x0b, 0x42, 0x20, 0x15, 0x04, + 0x45, 0x50, 0x05, 0xad, 0x65, 0xf3, 0x1a, 0x7d, 0x06, 0x89, 0xbd, 0x50, 0xa2, 0x34, 0x54, 0x41, + 0x3b, 0xb9, 0x52, 0xf5, 0xcd, 0xc9, 0xf4, 0xb5, 0x5c, 0x77, 0x5e, 0x28, 0xb1, 0x39, 0x1b, 0x5d, + 0x43, 0x6b, 0x6d, 0xa5, 0x88, 0xaa, 0xa0, 0x1d, 0x5e, 0xbd, 0x29, 0xa5, 0xf5, 0x08, 0x16, 0x61, + 0x56, 0xd5, 0xd8, 0x35, 0x1f, 0xa9, 0x70, 0x14, 0xa6, 0x39, 0x73, 0x23, 0xdf, 0xe5, 0xe3, 0x48, + 0x7c, 0x1c, 0x28, 0xb0, 0x91, 0x5f, 0x28, 0x90, 0x02, 0x07, 0x24, 0xc1, 0xd3, 0x98, 0xcc, 0x94, + 0xff, 0x54, 0x41, 0x6b, 0xda, 0x55, 0x8b, 0xde, 0xc1, 0x51, 0x44, 0x5d, 0x3c, 0x9b, 0x65, 0x24, + 0xcf, 0x49, 0xae, 0xec, 0xab, 0xa2, 0xd6, 0xb2, 0x0f, 0x23, 0xda, 0xab, 0xa0, 0x82, 0x42, 0xc3, + 0x97, 0xbc, 0x22, 0x29, 0x07, 0xfc, 0xf8, 0xc3, 0x02, 0x2b, 0x49, 0x48, 0x06, 0x71, 0xce, 0x16, + 0x4a, 0x53, 0x15, 0xb4, 0x63, 0xbb, 0x28, 0xd1, 0x25, 0x48, 0x4b, 0xc2, 0x42, 0xe5, 0x94, 0xdf, + 0xa5, 0xbd, 0x1d, 0xc3, 0x23, 0x61, 0xe1, 0x38, 0x4a, 0x9e, 0x86, 0x7b, 0x36, 0x67, 0xa2, 0x2e, + 0x88, 0x0c, 0x53, 0xe5, 0x8c, 0x0b, 0x2e, 0xb6, 0x05, 0x0e, 0xa6, 0x25, 0xbf, 0xe0, 0x75, 0x0c, + 0x90, 0x8a, 0xfc, 0xd0, 0x31, 0xb4, 0x1e, 0xac, 0xbe, 0x39, 0x18, 0x59, 0x66, 0x5f, 0xde, 0x43, + 0x4d, 0x90, 0x1e, 0x4d, 0x67, 0x28, 0x0b, 0xe8, 0x04, 0xc0, 0xe9, 0xdd, 0xb9, 0xce, 0xc4, 0x7d, + 0xbc, 0xbb, 0x93, 0x1b, 0x37, 0xfb, 0x20, 0xc5, 0x51, 0xf2, 0xd4, 0xf9, 0xd9, 0x80, 0x66, 0x65, + 0x5e, 0x44, 0x47, 0x09, 0xc9, 0xd6, 0xd1, 0xad, 0x36, 0x09, 0x05, 0x56, 0x46, 0xe7, 0xc1, 0x79, + 0xf6, 0xec, 0x7a, 0x21, 0xf1, 0x9e, 0xf2, 0xc5, 0xdc, 0x4d, 0x7d, 0x3f, 0x4e, 0xf1, 0x2c, 0x4a, + 0x82, 0x72, 0xc3, 0xdd, 0xd7, 0xaf, 0xa6, 0xdf, 0x96, 0xaa, 0xc9, 0x5a, 0x64, 0x9f, 0x66, 0xcf, + 0xdb, 0x68, 0x61, 0xc2, 0x76, 0x9b, 0x88, 0xff, 0x64, 0xc2, 0x76, 0x98, 0x74, 0x7c, 0x40, 0x3b, + 0xac, 0x2f, 0xe0, 0xec, 0x76, 0xf8, 0xe5, 0xfe, 0xab, 0x3b, 0x19, 0x0c, 0xc6, 0x93, 0x5e, 0xdf, + 0xed, 0x9b, 0x83, 0xde, 0xc3, 0xd8, 0x91, 0xf7, 0xb6, 0x3f, 0x99, 0x56, 0xef, 0x66, 0x6c, 0xf6, + 0x65, 0x01, 0xb5, 0xe1, 0x7c, 0x43, 0x35, 0xba, 0x5f, 0x7d, 0x6b, 0x74, 0x2e, 0xe1, 0xa0, 0xdc, + 0x15, 0x7a, 0x0f, 0xff, 0x2f, 0x29, 0x75, 0x19, 0xa6, 0x1b, 0x09, 0x1f, 0x2d, 0x29, 0x75, 0x30, + 0x5d, 0x65, 0x7c, 0x63, 0xfd, 0xf8, 0xf5, 0x56, 0xf8, 0x36, 0xfc, 0xe3, 0xc9, 0xc6, 0x51, 0x80, + 0x59, 0x6a, 0x2c, 0x29, 0xed, 0xe2, 0x80, 0x24, 0xcc, 0xc0, 0x34, 0x32, 0x5e, 0xf9, 0x1b, 0x5c, + 0x73, 0xc0, 0xad, 0x81, 0xe9, 0x3e, 0x7f, 0xac, 0x9f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x3a, + 0xc6, 0xbf, 0xc4, 0x40, 0x04, 0x00, 0x00, +} diff --git a/api/models/linux/interfaces/interface.proto b/api/models/linux/interfaces/interface.proto new file mode 100644 index 0000000000..b4f5a0f784 --- /dev/null +++ b/api/models/linux/interfaces/interface.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package linux.interfaces; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux/interfaces;linux_interfaces"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +import "models/linux/namespace/namespace.proto"; + +message Interface { + enum Type { + UNDEFINED = 0; + VETH = 1; + TAP_TO_VPP = 2; /* TAP created by VPP to have the Linux-side further configured */ + }; + + string name = 1; /* Logical interface name unique across all configured interfaces (mandatory) */ + Type type = 2; /* Interface type (mandatory) */ + + linux.namespace.NetNamespace namespace = 3; + string host_if_name = 4; /* Name of the interface in the host OS. If not set, the host name + is the same as the interface logical name. */ + bool enabled = 5; + repeated string ip_addresses = 6; /* IP addresses in the format / */ + string phys_address = 7; /* MAC address */ + uint32 mtu = 8; /* Maximum transmission unit value */ + + oneof link { + VethLink veth = 20; /* VETH-specific configuration */ + TapLink tap = 21; /* TAP_TO_VPP-specific configuration */ + }; +}; + +message VethLink { + string peer_if_name = 1; /* Name of the VETH peer, i.e. other end of the linux veth (mandatory for VETH) */ + + enum ChecksumOffloading { + CHKSM_OFFLOAD_DEFAULT = 0; + CHKSM_OFFLOAD_ENABLED = 1; + CHKSM_OFFLOAD_DISABLED = 2; + } + ChecksumOffloading rx_checksum_offloading = 2; /* checksum offloading - Rx side (enabled by default) */ + ChecksumOffloading tx_checksum_offloading = 3; /* checksum offloading - Tx side (enabled by default) */ +}; + +message TapLink { + string vpp_tap_if_name = 1; /* Logical name of the VPP TAP interface (mandatory for TAP_TO_VPP) */ +}; diff --git a/api/models/linux/interfaces/keys.go b/api/models/linux/interfaces/keys.go new file mode 100644 index 0000000000..857000380c --- /dev/null +++ b/api/models/linux/interfaces/keys.go @@ -0,0 +1,166 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux_interfaces + +import ( + "net" + "strings" + + "github.com/gogo/protobuf/jsonpb" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "linux.interfaces" + +var ( + ModelInterface = models.Register(&Interface{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "interface", + }) +) + +// InterfaceKey returns the key used in ETCD to store configuration of a particular Linux interface. +func InterfaceKey(name string) string { + return models.Key(&Interface{ + Name: name, + }) +} + +const ( + /* Interface host-name (default ns only, notifications) */ + + // InterfaceHostNameKeyPrefix is the common prefix of all keys representing + // existing Linux interfaces in the default namespace (referenced by host names). + InterfaceHostNameKeyPrefix = "linux/interface/host-name/" + + /* Interface State (derived) */ + + // InterfaceStateKeyPrefix is used as a common prefix for keys derived from + // interfaces to represent the interface admin state (up/down). + InterfaceStateKeyPrefix = "linux/interface/state/" + + // interfaceStateKeyTemplate is a template for (derived) key representing interface + // admin state (up/down). + interfaceStateKeyTemplate = InterfaceStateKeyPrefix + "{ifName}/{ifState}" + + // interface admin state as printed in derived keys. + interfaceUpState = "UP" + interfaceDownState = "DOWN" + + /* Interface Address (derived) */ + + // InterfaceAddressKeyPrefix is used as a common prefix for keys derived from + // interfaces to represent assigned IP addresses. + InterfaceAddressKeyPrefix = "linux/interface/address/" + + // interfaceAddressKeyTemplate is a template for (derived) key representing IP address + // (incl. mask) assigned to a Linux interface (referenced by the logical name). + interfaceAddressKeyTemplate = InterfaceAddressKeyPrefix + "{ifName}/{addr}/{mask}" +) + +/* Interface host-name (default ns only, notifications) */ + +// InterfaceHostNameKey returns key representing Linux interface host name. +func InterfaceHostNameKey(hostName string) string { + return InterfaceHostNameKeyPrefix + hostName +} + +/* Interface State (derived) */ + +// InterfaceStateKey returns key representing admin state of a Linux interface. +func InterfaceStateKey(ifName string, ifIsUp bool) string { + ifState := interfaceDownState + if ifIsUp { + ifState = interfaceUpState + } + key := strings.Replace(interfaceStateKeyTemplate, "{ifName}", ifName, 1) + key = strings.Replace(key, "{ifState}", ifState, 1) + return key +} + +// ParseInterfaceStateKey parses interface name and state from key derived +// from interface by InterfaceStateKey(). +func ParseInterfaceStateKey(key string) (ifName string, ifIsUp bool, isStateKey bool) { + if strings.HasPrefix(key, InterfaceStateKeyPrefix) { + keySuffix := strings.TrimPrefix(key, InterfaceStateKeyPrefix) + keyComps := strings.Split(keySuffix, "/") + if len(keyComps) != 2 { + return "", false, false + } + ifName = keyComps[0] + isStateKey = true + if keyComps[1] == interfaceUpState { + ifIsUp = true + } + return + } + return "", false, false +} + +/* Interface Address (derived) */ + +// InterfaceAddressKey returns key representing IP address assigned to Linux interface. +func InterfaceAddressKey(ifName string, address string) string { + var mask string + addrComps := strings.Split(address, "/") + addr := addrComps[0] + if len(addrComps) > 0 { + mask = addrComps[1] + } + key := strings.Replace(interfaceAddressKeyTemplate, "{ifName}", ifName, 1) + key = strings.Replace(key, "{addr}", addr, 1) + key = strings.Replace(key, "{mask}", mask, 1) + return key +} + +// ParseInterfaceAddressKey parses interface address from key derived +// from interface by InterfaceAddressKey(). +func ParseInterfaceAddressKey(key string) (ifName string, ifAddr *net.IPNet, isAddrKey bool) { + var err error + if strings.HasPrefix(key, InterfaceAddressKeyPrefix) { + keySuffix := strings.TrimPrefix(key, InterfaceAddressKeyPrefix) + keyComps := strings.Split(keySuffix, "/") + if len(keyComps) != 3 { + return "", nil, false + } + _, ifAddr, err = net.ParseCIDR(keyComps[1] + "/" + keyComps[2]) + if err != nil { + return "", nil, false + } + ifName = keyComps[0] + isAddrKey = true + return + } + return "", nil, false +} + +// MarshalJSON ensures that field of type 'oneOf' is correctly marshaled +// by using gogo lib marshaller +func (m *Interface) MarshalJSON() ([]byte, error) { + marshaller := &jsonpb.Marshaler{} + str, err := marshaller.MarshalToString(m) + if err != nil { + return nil, err + } + return []byte(str), nil +} + +// UnmarshalJSON ensures that field of type 'oneOf' is correctly unmarshaled +func (m *Interface) UnmarshalJSON(data []byte) error { + return jsonpb.UnmarshalString(string(data), m) +} diff --git a/api/models/linux/l3/arp.pb.go b/api/models/linux/l3/arp.pb.go new file mode 100644 index 0000000000..fd64d0dbfc --- /dev/null +++ b/api/models/linux/l3/arp.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/l3/arp.proto + +package linux_l3 // import "github.com/ligato/vpp-agent/api/models/linux/l3" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ARPEntry struct { + Interface string `protobuf:"bytes,1,opt,name=interface,proto3" json:"interface,omitempty"` + IpAddress string `protobuf:"bytes,2,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + HwAddress string `protobuf:"bytes,3,opt,name=hw_address,json=hwAddress,proto3" json:"hw_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ARPEntry) Reset() { *m = ARPEntry{} } +func (m *ARPEntry) String() string { return proto.CompactTextString(m) } +func (*ARPEntry) ProtoMessage() {} +func (*ARPEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_arp_60ac29f29aa27027, []int{0} +} +func (m *ARPEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ARPEntry.Unmarshal(m, b) +} +func (m *ARPEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ARPEntry.Marshal(b, m, deterministic) +} +func (dst *ARPEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ARPEntry.Merge(dst, src) +} +func (m *ARPEntry) XXX_Size() int { + return xxx_messageInfo_ARPEntry.Size(m) +} +func (m *ARPEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ARPEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ARPEntry proto.InternalMessageInfo + +func (m *ARPEntry) GetInterface() string { + if m != nil { + return m.Interface + } + return "" +} + +func (m *ARPEntry) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *ARPEntry) GetHwAddress() string { + if m != nil { + return m.HwAddress + } + return "" +} + +func (*ARPEntry) XXX_MessageName() string { + return "linux.l3.ARPEntry" +} +func init() { + proto.RegisterType((*ARPEntry)(nil), "linux.l3.ARPEntry") +} + +func init() { proto.RegisterFile("models/linux/l3/arp.proto", fileDescriptor_arp_60ac29f29aa27027) } + +var fileDescriptor_arp_60ac29f29aa27027 = []byte{ + // 201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcd, 0x4f, 0x49, + 0xcd, 0x29, 0xd6, 0xcf, 0xc9, 0xcc, 0x2b, 0xad, 0xd0, 0xcf, 0x31, 0xd6, 0x4f, 0x2c, 0x2a, 0xd0, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x00, 0x8b, 0xe9, 0xe5, 0x18, 0x4b, 0xe9, 0xa6, 0x67, + 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x15, + 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa8, 0x94, 0xc6, 0xc5, 0xe1, 0x18, + 0x14, 0xe0, 0x9a, 0x57, 0x52, 0x54, 0x29, 0x24, 0xc3, 0xc5, 0x99, 0x99, 0x57, 0x92, 0x5a, 0x94, + 0x96, 0x98, 0x9c, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x84, 0x10, 0x10, 0x92, 0xe5, 0xe2, + 0xca, 0x2c, 0x88, 0x4f, 0x4c, 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x96, 0x60, 0x82, 0x4a, 0x17, 0x38, + 0x42, 0x04, 0x40, 0xd2, 0x19, 0xe5, 0x70, 0x69, 0x66, 0x88, 0x74, 0x46, 0x39, 0x54, 0xda, 0xc9, + 0xee, 0xc4, 0x63, 0x39, 0xc6, 0x28, 0x0b, 0x24, 0xc7, 0xe5, 0x64, 0xa6, 0x27, 0x96, 0xe4, 0xeb, + 0x97, 0x15, 0x14, 0xe8, 0x26, 0xa6, 0xa7, 0xe6, 0x95, 0xe8, 0x27, 0x16, 0x64, 0xea, 0xa3, 0xf9, + 0xd0, 0x1a, 0xcc, 0x88, 0xcf, 0x31, 0x4e, 0x62, 0x03, 0x3b, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, + 0xff, 0x57, 0xca, 0xbc, 0x0c, 0x04, 0x01, 0x00, 0x00, +} diff --git a/api/models/linux/l3/arp.proto b/api/models/linux/l3/arp.proto new file mode 100644 index 0000000000..950339f0be --- /dev/null +++ b/api/models/linux/l3/arp.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package linux.l3; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux/l3;linux_l3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message ARPEntry { + string interface = 1; + string ip_address = 2; + string hw_address = 3; +} diff --git a/api/models/linux/l3/keys.go b/api/models/linux/l3/keys.go new file mode 100644 index 0000000000..63f5ea9500 --- /dev/null +++ b/api/models/linux/l3/keys.go @@ -0,0 +1,113 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux_l3 + +import ( + "net" + "strconv" + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "linux.l3" + +var ( + ModelARPEntry = models.Register(&ARPEntry{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "arp", + }, models.WithNameTemplate("{{.Interface}}/{{.IpAddress}}")) + + ModelRoute = models.Register(&Route{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "route", + }, models.WithNameTemplate( + `{{with ipnet .DstNetwork}}{{printf "%s/%d" .IP .MaskSize}}{{end}}/{{.OutgoingInterface}}`, + )) +) + +// ArpKey returns the key used in ETCD to store configuration of a particular Linux ARP entry. +func ArpKey(iface, ipAddr string) string { + return models.Key(&ARPEntry{ + Interface: iface, + IpAddress: ipAddr, + }) +} + +// RouteKey returns the key used in ETCD to store configuration of a particular Linux route. +func RouteKey(dstNetwork, outgoingInterface string) string { + return models.Key(&Route{ + DstNetwork: dstNetwork, + OutgoingInterface: outgoingInterface, + }) +} + +const ( + /* Link-local route (derived) */ + + // StaticLinkLocalRouteKeyPrefix is a prefix for keys derived from link-local routes. + LinkLocalRouteKeyPrefix = "linux/link-local-route/" + + // staticLinkLocalRouteKeyTemplate is a template for key derived from link-local route. + linkLocalRouteKeyTemplate = LinkLocalRouteKeyPrefix + "{dest-net}/{dest-mask}/{out-intf}" +) + +/* Link-local Route (derived) */ + +// StaticLinkLocalRouteKey returns a derived key used to represent link-local route. +func StaticLinkLocalRouteKey(dstAddr, outgoingInterface string) string { + return RouteKeyFromTemplate(linkLocalRouteKeyTemplate, dstAddr, outgoingInterface) +} + +// ParseStaticLinkLocalRouteKey parses route attributes from a key derived from link-local route. +func ParseStaticLinkLocalRouteKey(key string) (dstNetAddr *net.IPNet, outgoingInterface string, isRouteKey bool) { + return parseRouteFromKeySuffix(key, LinkLocalRouteKeyPrefix, "invalid Linux link-local Route key: ") +} + +/* Route helpers */ + +// RouteKeyFromTemplate fills key template with route attributes. +func RouteKeyFromTemplate(template, dstAddr, outgoingInterface string) string { + _, dstNet, _ := net.ParseCIDR(dstAddr) + dstNetAddr := dstNet.IP.String() + dstNetMask, _ := dstNet.Mask.Size() + key := strings.Replace(template, "{dest-net}", dstNetAddr, 1) + key = strings.Replace(key, "{dest-mask}", strconv.Itoa(dstNetMask), 1) + key = strings.Replace(key, "{out-intf}", outgoingInterface, 1) + return key +} + +// parseRouteFromKeySuffix parses destination network and outgoing interface from a route key suffix. +func parseRouteFromKeySuffix(key, prefix, errPrefix string) (dstNetAddr *net.IPNet, outgoingInterface string, isRouteKey bool) { + var err error + if strings.HasPrefix(key, prefix) { + routeSuffix := strings.TrimPrefix(key, prefix) + routeComps := strings.Split(routeSuffix, "/") + if len(routeComps) != 3 { + return nil, "", false + } + _, dstNetAddr, err = net.ParseCIDR(routeComps[0] + "/" + routeComps[1]) + if err != nil { + return nil, "", false + } + outgoingInterface = routeComps[2] + isRouteKey = true + return + } + return nil, "", false +} diff --git a/api/models/linux/l3/route.pb.go b/api/models/linux/l3/route.pb.go new file mode 100644 index 0000000000..ef5a98e338 --- /dev/null +++ b/api/models/linux/l3/route.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/l3/route.proto + +package linux_l3 // import "github.com/ligato/vpp-agent/api/models/linux/l3" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Route_Scope int32 + +const ( + Route_UNDEFINED Route_Scope = 0 + Route_GLOBAL Route_Scope = 1 + Route_SITE Route_Scope = 2 + Route_LINK Route_Scope = 3 + Route_HOST Route_Scope = 4 +) + +var Route_Scope_name = map[int32]string{ + 0: "UNDEFINED", + 1: "GLOBAL", + 2: "SITE", + 3: "LINK", + 4: "HOST", +} +var Route_Scope_value = map[string]int32{ + "UNDEFINED": 0, + "GLOBAL": 1, + "SITE": 2, + "LINK": 3, + "HOST": 4, +} + +func (x Route_Scope) String() string { + return proto.EnumName(Route_Scope_name, int32(x)) +} +func (Route_Scope) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_route_91d1b915d4741414, []int{0, 0} +} + +type Route struct { + OutgoingInterface string `protobuf:"bytes,1,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + Scope Route_Scope `protobuf:"varint,2,opt,name=scope,proto3,enum=linux.l3.Route_Scope" json:"scope,omitempty"` + DstNetwork string `protobuf:"bytes,3,opt,name=dst_network,json=dstNetwork,proto3" json:"dst_network,omitempty"` + GwAddr string `protobuf:"bytes,4,opt,name=gw_addr,json=gwAddr,proto3" json:"gw_addr,omitempty"` + Metric uint32 `protobuf:"varint,5,opt,name=metric,proto3" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Route) Reset() { *m = Route{} } +func (m *Route) String() string { return proto.CompactTextString(m) } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_route_91d1b915d4741414, []int{0} +} +func (m *Route) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Route.Unmarshal(m, b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Route.Marshal(b, m, deterministic) +} +func (dst *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(dst, src) +} +func (m *Route) XXX_Size() int { + return xxx_messageInfo_Route.Size(m) +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *Route) GetOutgoingInterface() string { + if m != nil { + return m.OutgoingInterface + } + return "" +} + +func (m *Route) GetScope() Route_Scope { + if m != nil { + return m.Scope + } + return Route_UNDEFINED +} + +func (m *Route) GetDstNetwork() string { + if m != nil { + return m.DstNetwork + } + return "" +} + +func (m *Route) GetGwAddr() string { + if m != nil { + return m.GwAddr + } + return "" +} + +func (m *Route) GetMetric() uint32 { + if m != nil { + return m.Metric + } + return 0 +} + +func (*Route) XXX_MessageName() string { + return "linux.l3.Route" +} +func init() { + proto.RegisterType((*Route)(nil), "linux.l3.Route") + proto.RegisterEnum("linux.l3.Route_Scope", Route_Scope_name, Route_Scope_value) +} + +func init() { proto.RegisterFile("models/linux/l3/route.proto", fileDescriptor_route_91d1b915d4741414) } + +var fileDescriptor_route_91d1b915d4741414 = []byte{ + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0x41, 0x4b, 0xeb, 0x40, + 0x14, 0x85, 0xdf, 0xb4, 0x4d, 0x5e, 0x7b, 0x1f, 0x7d, 0xe4, 0x0d, 0x3c, 0x0d, 0x0a, 0x5a, 0xba, + 0x2a, 0x48, 0x33, 0x60, 0x36, 0x82, 0x20, 0xb6, 0xb4, 0x6a, 0xb0, 0xa4, 0x90, 0xd6, 0x8d, 0x9b, + 0x90, 0x66, 0xa6, 0xe3, 0x60, 0x9a, 0x09, 0x93, 0x89, 0xf5, 0x27, 0xfa, 0x3f, 0xfc, 0x11, 0x6e, + 0x25, 0x53, 0x0b, 0xe2, 0xee, 0x9c, 0xfb, 0xdd, 0x39, 0x73, 0x39, 0x70, 0xbc, 0x91, 0x94, 0x65, + 0x25, 0xc9, 0x44, 0x5e, 0xbd, 0x92, 0xcc, 0x27, 0x4a, 0x56, 0x9a, 0x79, 0x85, 0x92, 0x5a, 0xe2, + 0xb6, 0x99, 0x7a, 0x99, 0x7f, 0x34, 0xe4, 0x42, 0x3f, 0x55, 0x2b, 0x2f, 0x95, 0x1b, 0xc2, 0x25, + 0x97, 0xc4, 0x2c, 0xac, 0xaa, 0xb5, 0x71, 0xc6, 0x18, 0xb5, 0x7b, 0xd8, 0xff, 0x40, 0x60, 0x45, + 0x75, 0x10, 0x1e, 0x02, 0x96, 0x95, 0xe6, 0x52, 0xe4, 0x3c, 0x16, 0xb9, 0x66, 0x6a, 0x9d, 0xa4, + 0xcc, 0x45, 0x3d, 0x34, 0xe8, 0x44, 0xff, 0xf6, 0x24, 0xd8, 0x03, 0x7c, 0x06, 0x56, 0x99, 0xca, + 0x82, 0xb9, 0x8d, 0x1e, 0x1a, 0xfc, 0x3d, 0xff, 0xef, 0xed, 0x2f, 0xf0, 0x4c, 0x9c, 0xb7, 0xa8, + 0x61, 0xb4, 0xdb, 0xc1, 0xa7, 0xf0, 0x87, 0x96, 0x3a, 0xce, 0x99, 0xde, 0x4a, 0xf5, 0xec, 0x36, + 0x4d, 0x28, 0xd0, 0x52, 0x87, 0xbb, 0x09, 0x3e, 0x84, 0xdf, 0x7c, 0x1b, 0x27, 0x94, 0x2a, 0xb7, + 0x65, 0xa0, 0xcd, 0xb7, 0x23, 0x4a, 0x15, 0x3e, 0x00, 0x7b, 0xc3, 0xb4, 0x12, 0xa9, 0x6b, 0xf5, + 0xd0, 0xa0, 0x1b, 0x7d, 0xb9, 0xfe, 0x35, 0x58, 0xe6, 0x07, 0xdc, 0x85, 0xce, 0x43, 0x38, 0x99, + 0xde, 0x04, 0xe1, 0x74, 0xe2, 0xfc, 0xc2, 0x00, 0xf6, 0xed, 0x6c, 0x3e, 0x1e, 0xcd, 0x1c, 0x84, + 0xdb, 0xd0, 0x5a, 0x04, 0xcb, 0xa9, 0xd3, 0xa8, 0xd5, 0x2c, 0x08, 0xef, 0x9d, 0x66, 0xad, 0xee, + 0xe6, 0x8b, 0xa5, 0xd3, 0x1a, 0x5f, 0xbd, 0xbd, 0x9f, 0xa0, 0xc7, 0x8b, 0x6f, 0x75, 0x65, 0x82, + 0x27, 0x5a, 0x92, 0x97, 0xa2, 0x18, 0x26, 0x9c, 0xe5, 0x9a, 0x24, 0x85, 0x20, 0x3f, 0x5a, 0xbf, + 0x34, 0x22, 0xce, 0xfc, 0x95, 0x6d, 0x0a, 0xf4, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x63, 0x31, + 0x49, 0xba, 0x98, 0x01, 0x00, 0x00, +} diff --git a/api/models/linux/l3/route.proto b/api/models/linux/l3/route.proto new file mode 100644 index 0000000000..a8b0815a45 --- /dev/null +++ b/api/models/linux/l3/route.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package linux.l3; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux/l3;linux_l3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message Route { + string outgoing_interface = 1; /* outgoing interface logical name (mandatory) */ + + enum Scope { + UNDEFINED = 0; + GLOBAL = 1; + SITE = 2; + LINK = 3; + HOST = 4; + } + Scope scope = 2; /* the scope of the area where the link is valid */ + + string dst_network = 3; /* destination network address in the format
/ (mandatory) */ + string gw_addr = 4; /* gateway IP address */ + uint32 metric = 5; /* routing metric (weight) */ +} diff --git a/api/models/linux/linux.pb.go b/api/models/linux/linux.pb.go new file mode 100644 index 0000000000..b7a141fcb1 --- /dev/null +++ b/api/models/linux/linux.pb.go @@ -0,0 +1,132 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/linux.proto + +package linux // import "github.com/ligato/vpp-agent/api/models/linux" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import interfaces "github.com/ligato/vpp-agent/api/models/linux/interfaces" +import l3 "github.com/ligato/vpp-agent/api/models/linux/l3" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ConfigData struct { + Interfaces []*interfaces.Interface `protobuf:"bytes,10,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + ArpEntries []*l3.ARPEntry `protobuf:"bytes,20,rep,name=arp_entries,json=arpEntries,proto3" json:"arp_entries,omitempty"` + Routes []*l3.Route `protobuf:"bytes,21,rep,name=routes,proto3" json:"routes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigData) Reset() { *m = ConfigData{} } +func (m *ConfigData) String() string { return proto.CompactTextString(m) } +func (*ConfigData) ProtoMessage() {} +func (*ConfigData) Descriptor() ([]byte, []int) { + return fileDescriptor_linux_bf9f381edc6f58d1, []int{0} +} +func (m *ConfigData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigData.Unmarshal(m, b) +} +func (m *ConfigData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigData.Marshal(b, m, deterministic) +} +func (dst *ConfigData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigData.Merge(dst, src) +} +func (m *ConfigData) XXX_Size() int { + return xxx_messageInfo_ConfigData.Size(m) +} +func (m *ConfigData) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigData proto.InternalMessageInfo + +func (m *ConfigData) GetInterfaces() []*interfaces.Interface { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *ConfigData) GetArpEntries() []*l3.ARPEntry { + if m != nil { + return m.ArpEntries + } + return nil +} + +func (m *ConfigData) GetRoutes() []*l3.Route { + if m != nil { + return m.Routes + } + return nil +} + +type Notification struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Notification) Reset() { *m = Notification{} } +func (m *Notification) String() string { return proto.CompactTextString(m) } +func (*Notification) ProtoMessage() {} +func (*Notification) Descriptor() ([]byte, []int) { + return fileDescriptor_linux_bf9f381edc6f58d1, []int{1} +} +func (m *Notification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Notification.Unmarshal(m, b) +} +func (m *Notification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Notification.Marshal(b, m, deterministic) +} +func (dst *Notification) XXX_Merge(src proto.Message) { + xxx_messageInfo_Notification.Merge(dst, src) +} +func (m *Notification) XXX_Size() int { + return xxx_messageInfo_Notification.Size(m) +} +func (m *Notification) XXX_DiscardUnknown() { + xxx_messageInfo_Notification.DiscardUnknown(m) +} + +var xxx_messageInfo_Notification proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConfigData)(nil), "linux.ConfigData") + proto.RegisterType((*Notification)(nil), "linux.Notification") +} + +func init() { proto.RegisterFile("models/linux/linux.proto", fileDescriptor_linux_bf9f381edc6f58d1) } + +var fileDescriptor_linux_bf9f381edc6f58d1 = []byte{ + // 247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0xbf, 0x4e, 0xc3, 0x30, + 0x10, 0x87, 0x85, 0x10, 0x1d, 0xae, 0x08, 0x24, 0x0b, 0xa4, 0xd0, 0x2e, 0xa8, 0x4b, 0x59, 0xb0, + 0xa5, 0x86, 0xad, 0x13, 0x7f, 0x3a, 0xb0, 0x20, 0xe4, 0x91, 0x05, 0x5d, 0x83, 0x13, 0x4e, 0x4a, + 0x7d, 0x96, 0x73, 0x41, 0xf0, 0x44, 0xbc, 0x26, 0xc2, 0x09, 0x4a, 0xc3, 0x72, 0x3a, 0xfb, 0xfb, + 0x7e, 0xf6, 0x1d, 0x64, 0x3b, 0x7e, 0x73, 0x75, 0x63, 0x6a, 0xf2, 0xed, 0x67, 0x57, 0x75, 0x88, + 0x2c, 0xac, 0x8e, 0xd2, 0x61, 0xb6, 0x1c, 0x09, 0xe4, 0xc5, 0xc5, 0x12, 0x0b, 0xd7, 0x0c, 0x6d, + 0xe7, 0xcf, 0x2e, 0xc6, 0x2f, 0xe5, 0x06, 0x63, 0xe8, 0xd1, 0xfc, 0x3f, 0x8a, 0xdc, 0x4a, 0x9f, + 0x5b, 0x7c, 0x1f, 0x00, 0xdc, 0xb3, 0x2f, 0xa9, 0x7a, 0x40, 0x41, 0xb5, 0x06, 0x18, 0x3e, 0xc9, + 0xe0, 0xf2, 0xf0, 0x6a, 0xba, 0x9a, 0xeb, 0x6e, 0xb0, 0x01, 0xe8, 0xc7, 0xbf, 0xd6, 0xee, 0xe9, + 0x2a, 0x87, 0x29, 0xc6, 0xf0, 0xea, 0xbc, 0x44, 0x72, 0x4d, 0x76, 0x96, 0xd2, 0xaa, 0x4f, 0xd7, + 0xb9, 0xbe, 0xb5, 0xcf, 0x1b, 0x2f, 0xf1, 0xcb, 0x02, 0xc6, 0xb0, 0xe9, 0x2c, 0xb5, 0x84, 0x49, + 0x9a, 0xa7, 0xc9, 0xce, 0x93, 0x7f, 0x3a, 0xf8, 0xf6, 0xf7, 0xde, 0xf6, 0x78, 0x71, 0x02, 0xc7, + 0x4f, 0x2c, 0x54, 0x52, 0x81, 0x42, 0xec, 0xef, 0x6e, 0x5e, 0x56, 0x15, 0xc9, 0x7b, 0xbb, 0xd5, + 0x05, 0xef, 0x4c, 0x4d, 0x15, 0x0a, 0x9b, 0x8f, 0x10, 0xae, 0xb1, 0x72, 0x5e, 0x0c, 0x06, 0x32, + 0xfb, 0x8b, 0xaf, 0x53, 0xdd, 0x4e, 0xd2, 0xda, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdf, + 0x44, 0x56, 0xfc, 0x7a, 0x01, 0x00, 0x00, +} diff --git a/api/models/linux/linux.proto b/api/models/linux/linux.proto new file mode 100644 index 0000000000..ccf7888a7a --- /dev/null +++ b/api/models/linux/linux.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package linux; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux;linux"; + +import "models/linux/interfaces/interface.proto"; +import "models/linux/l3/arp.proto"; +import "models/linux/l3/route.proto"; + +message ConfigData { + repeated linux.interfaces.Interface interfaces = 10; + + repeated linux.l3.ARPEntry arp_entries = 20; + repeated linux.l3.Route routes = 21; +} + +message Notification { + +} diff --git a/api/models/linux/linux_types.go b/api/models/linux/linux_types.go new file mode 100644 index 0000000000..8579a41fc9 --- /dev/null +++ b/api/models/linux/linux_types.go @@ -0,0 +1,29 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" +) + +type ( + // Interface + Interface = linux_interfaces.Interface + + // L3 + Route = linux_l3.Route + ARPEntry = linux_l3.ARPEntry +) diff --git a/api/models/linux/namespace/keys.go b/api/models/linux/namespace/keys.go new file mode 100644 index 0000000000..ee49bd1a5b --- /dev/null +++ b/api/models/linux/namespace/keys.go @@ -0,0 +1,28 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux_namespace + +const ( + /* Microservice (notifications) */ + + // MicroserviceKeyPrefix is the common prefix of all keys representing + // existing microservices. + MicroserviceKeyPrefix = "linux/microservice/" +) + +// MicroserviceKey returns key representing existing microservice namespace. +func MicroserviceKey(microservice string) string { + return MicroserviceKeyPrefix + microservice +} diff --git a/api/models/linux/namespace/namespace.pb.go b/api/models/linux/namespace/namespace.pb.go new file mode 100644 index 0000000000..a9f6650a69 --- /dev/null +++ b/api/models/linux/namespace/namespace.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/namespace/namespace.proto + +package linux_namespace // import "github.com/ligato/vpp-agent/api/models/linux/namespace" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type NetNamespace_ReferenceType int32 + +const ( + NetNamespace_UNDEFINED NetNamespace_ReferenceType = 0 + NetNamespace_NSID NetNamespace_ReferenceType = 1 + NetNamespace_PID NetNamespace_ReferenceType = 2 + NetNamespace_FD NetNamespace_ReferenceType = 3 + NetNamespace_MICROSERVICE NetNamespace_ReferenceType = 4 +) + +var NetNamespace_ReferenceType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "NSID", + 2: "PID", + 3: "FD", + 4: "MICROSERVICE", +} +var NetNamespace_ReferenceType_value = map[string]int32{ + "UNDEFINED": 0, + "NSID": 1, + "PID": 2, + "FD": 3, + "MICROSERVICE": 4, +} + +func (x NetNamespace_ReferenceType) String() string { + return proto.EnumName(NetNamespace_ReferenceType_name, int32(x)) +} +func (NetNamespace_ReferenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_namespace_a134fbd3ced9f31a, []int{0, 0} +} + +type NetNamespace struct { + Type NetNamespace_ReferenceType `protobuf:"varint,1,opt,name=type,proto3,enum=linux.namespace.NetNamespace_ReferenceType" json:"type,omitempty"` + // Reference defines reference specific + // to the namespace type: + // * namespace ID (NSID) + // * PID number (PID) + // * file path (FD) + // * microservice label (MICROSERVICE) + Reference string `protobuf:"bytes,2,opt,name=reference,proto3" json:"reference,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetNamespace) Reset() { *m = NetNamespace{} } +func (m *NetNamespace) String() string { return proto.CompactTextString(m) } +func (*NetNamespace) ProtoMessage() {} +func (*NetNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_namespace_a134fbd3ced9f31a, []int{0} +} +func (m *NetNamespace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetNamespace.Unmarshal(m, b) +} +func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetNamespace.Marshal(b, m, deterministic) +} +func (dst *NetNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespace.Merge(dst, src) +} +func (m *NetNamespace) XXX_Size() int { + return xxx_messageInfo_NetNamespace.Size(m) +} +func (m *NetNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespace proto.InternalMessageInfo + +func (m *NetNamespace) GetType() NetNamespace_ReferenceType { + if m != nil { + return m.Type + } + return NetNamespace_UNDEFINED +} + +func (m *NetNamespace) GetReference() string { + if m != nil { + return m.Reference + } + return "" +} + +func (*NetNamespace) XXX_MessageName() string { + return "linux.namespace.NetNamespace" +} +func init() { + proto.RegisterType((*NetNamespace)(nil), "linux.namespace.NetNamespace") + proto.RegisterEnum("linux.namespace.NetNamespace_ReferenceType", NetNamespace_ReferenceType_name, NetNamespace_ReferenceType_value) +} + +func init() { + proto.RegisterFile("models/linux/namespace/namespace.proto", fileDescriptor_namespace_a134fbd3ced9f31a) +} + +var fileDescriptor_namespace_a134fbd3ced9f31a = []byte{ + // 270 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcb, 0xcd, 0x4f, 0x49, + 0xcd, 0x29, 0xd6, 0xcf, 0xc9, 0xcc, 0x2b, 0xad, 0xd0, 0xcf, 0x4b, 0xcc, 0x4d, 0x2d, 0x2e, 0x48, + 0x4c, 0x4e, 0x45, 0xb0, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0xc1, 0x0a, 0xf4, 0xe0, + 0xc2, 0x52, 0xba, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, + 0xe9, 0xf9, 0xfa, 0x60, 0x75, 0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xf4, 0x2b, + 0xed, 0x62, 0xe4, 0xe2, 0xf1, 0x4b, 0x2d, 0xf1, 0x83, 0xe9, 0x17, 0xb2, 0xe7, 0x62, 0x29, 0xa9, + 0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, 0xd2, 0xd6, 0x43, 0x33, 0x5f, 0x0f, 0x59, + 0xb1, 0x5e, 0x50, 0x6a, 0x5a, 0x6a, 0x51, 0x6a, 0x5e, 0x72, 0x6a, 0x48, 0x65, 0x41, 0x6a, 0x10, + 0x58, 0xa3, 0x90, 0x0c, 0x17, 0x67, 0x11, 0x4c, 0x58, 0x82, 0x49, 0x81, 0x51, 0x83, 0x33, 0x08, + 0x21, 0xa0, 0xe4, 0xcd, 0xc5, 0x8b, 0xa2, 0x49, 0x88, 0x97, 0x8b, 0x33, 0xd4, 0xcf, 0xc5, 0xd5, + 0xcd, 0xd3, 0xcf, 0xd5, 0x45, 0x80, 0x41, 0x88, 0x83, 0x8b, 0xc5, 0x2f, 0xd8, 0xd3, 0x45, 0x80, + 0x51, 0x88, 0x9d, 0x8b, 0x39, 0xc0, 0xd3, 0x45, 0x80, 0x49, 0x88, 0x8d, 0x8b, 0xc9, 0xcd, 0x45, + 0x80, 0x59, 0x48, 0x80, 0x8b, 0xc7, 0xd7, 0xd3, 0x39, 0xc8, 0x3f, 0xd8, 0x35, 0x28, 0xcc, 0xd3, + 0xd9, 0x55, 0x80, 0xc5, 0xc9, 0xe7, 0xc4, 0x63, 0x39, 0xc6, 0x28, 0x37, 0x24, 0x1f, 0xe7, 0x64, + 0xa6, 0x27, 0x96, 0xe4, 0xeb, 0x97, 0x15, 0x14, 0xe8, 0x26, 0xa6, 0xa7, 0xe6, 0x95, 0xe8, 0x27, + 0x16, 0x64, 0xea, 0x63, 0x0f, 0x4a, 0x6b, 0x30, 0x3f, 0x1e, 0xce, 0x4f, 0x62, 0x03, 0x87, 0x88, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x96, 0x7e, 0x56, 0x91, 0x7b, 0x01, 0x00, 0x00, +} diff --git a/api/models/linux/namespace/namespace.proto b/api/models/linux/namespace/namespace.proto new file mode 100644 index 0000000000..0675fd22b4 --- /dev/null +++ b/api/models/linux/namespace/namespace.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package linux.namespace; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux/namespace;linux_namespace"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message NetNamespace { + enum ReferenceType { + UNDEFINED = 0; + NSID = 1; /* named namespace */ + PID = 2; /* namespace of a given process */ + FD = 3; /* namespace referenced by a file handle */ + MICROSERVICE = 4; /* namespace of a docker container running given microservice */ + } + ReferenceType type = 1; + + // Reference defines reference specific + // to the namespace type: + // * namespace ID (NSID) + // * PID number (PID) + // * file path (FD) + // * microservice label (MICROSERVICE) + string reference = 2; +}; diff --git a/api/models/linux/punt/punt.pb.go b/api/models/linux/punt/punt.pb.go new file mode 100644 index 0000000000..a84a2462ba --- /dev/null +++ b/api/models/linux/punt/punt.pb.go @@ -0,0 +1,448 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/linux/punt/punt.proto + +package linux_punt // import "github.com/ligato/vpp-agent/api/models/linux/punt" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PortBased_L4Protocol int32 + +const ( + PortBased_UNDEFINED_L4 PortBased_L4Protocol = 0 + PortBased_TCP PortBased_L4Protocol = 6 + PortBased_UDP PortBased_L4Protocol = 17 +) + +var PortBased_L4Protocol_name = map[int32]string{ + 0: "UNDEFINED_L4", + 6: "TCP", + 17: "UDP", +} +var PortBased_L4Protocol_value = map[string]int32{ + "UNDEFINED_L4": 0, + "TCP": 6, + "UDP": 17, +} + +func (x PortBased_L4Protocol) String() string { + return proto.EnumName(PortBased_L4Protocol_name, int32(x)) +} +func (PortBased_L4Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_punt_808f67b1735e53e4, []int{1, 0} +} + +type PortBased_L3Protocol int32 + +const ( + PortBased_UNDEFINED_L3 PortBased_L3Protocol = 0 + PortBased_IPv4 PortBased_L3Protocol = 1 + PortBased_IPv6 PortBased_L3Protocol = 2 + PortBased_ALL PortBased_L3Protocol = 3 +) + +var PortBased_L3Protocol_name = map[int32]string{ + 0: "UNDEFINED_L3", + 1: "IPv4", + 2: "IPv6", + 3: "ALL", +} +var PortBased_L3Protocol_value = map[string]int32{ + "UNDEFINED_L3": 0, + "IPv4": 1, + "IPv6": 2, + "ALL": 3, +} + +func (x PortBased_L3Protocol) String() string { + return proto.EnumName(PortBased_L3Protocol_name, int32(x)) +} +func (PortBased_L3Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_punt_808f67b1735e53e4, []int{1, 1} +} + +// Proxy allows to listen on network socket or unix domain socket, and resend to another network/unix domain socket +type Proxy struct { + // Types that are valid to be assigned to Rx: + // *Proxy_RxPort + // *Proxy_RxSocket + Rx isProxy_Rx `protobuf_oneof:"rx"` + // Types that are valid to be assigned to Tx: + // *Proxy_TxPort + // *Proxy_TxSocket + Tx isProxy_Tx `protobuf_oneof:"tx"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proxy) Reset() { *m = Proxy{} } +func (m *Proxy) String() string { return proto.CompactTextString(m) } +func (*Proxy) ProtoMessage() {} +func (*Proxy) Descriptor() ([]byte, []int) { + return fileDescriptor_punt_808f67b1735e53e4, []int{0} +} +func (m *Proxy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Proxy.Unmarshal(m, b) +} +func (m *Proxy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Proxy.Marshal(b, m, deterministic) +} +func (dst *Proxy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proxy.Merge(dst, src) +} +func (m *Proxy) XXX_Size() int { + return xxx_messageInfo_Proxy.Size(m) +} +func (m *Proxy) XXX_DiscardUnknown() { + xxx_messageInfo_Proxy.DiscardUnknown(m) +} + +var xxx_messageInfo_Proxy proto.InternalMessageInfo + +type isProxy_Rx interface { + isProxy_Rx() +} +type isProxy_Tx interface { + isProxy_Tx() +} + +type Proxy_RxPort struct { + RxPort *PortBased `protobuf:"bytes,1,opt,name=rx_port,json=rxPort,proto3,oneof"` +} +type Proxy_RxSocket struct { + RxSocket *SocketBased `protobuf:"bytes,2,opt,name=rx_socket,json=rxSocket,proto3,oneof"` +} +type Proxy_TxPort struct { + TxPort *PortBased `protobuf:"bytes,3,opt,name=tx_port,json=txPort,proto3,oneof"` +} +type Proxy_TxSocket struct { + TxSocket *SocketBased `protobuf:"bytes,4,opt,name=tx_socket,json=txSocket,proto3,oneof"` +} + +func (*Proxy_RxPort) isProxy_Rx() {} +func (*Proxy_RxSocket) isProxy_Rx() {} +func (*Proxy_TxPort) isProxy_Tx() {} +func (*Proxy_TxSocket) isProxy_Tx() {} + +func (m *Proxy) GetRx() isProxy_Rx { + if m != nil { + return m.Rx + } + return nil +} +func (m *Proxy) GetTx() isProxy_Tx { + if m != nil { + return m.Tx + } + return nil +} + +func (m *Proxy) GetRxPort() *PortBased { + if x, ok := m.GetRx().(*Proxy_RxPort); ok { + return x.RxPort + } + return nil +} + +func (m *Proxy) GetRxSocket() *SocketBased { + if x, ok := m.GetRx().(*Proxy_RxSocket); ok { + return x.RxSocket + } + return nil +} + +func (m *Proxy) GetTxPort() *PortBased { + if x, ok := m.GetTx().(*Proxy_TxPort); ok { + return x.TxPort + } + return nil +} + +func (m *Proxy) GetTxSocket() *SocketBased { + if x, ok := m.GetTx().(*Proxy_TxSocket); ok { + return x.TxSocket + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Proxy) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Proxy_OneofMarshaler, _Proxy_OneofUnmarshaler, _Proxy_OneofSizer, []interface{}{ + (*Proxy_RxPort)(nil), + (*Proxy_RxSocket)(nil), + (*Proxy_TxPort)(nil), + (*Proxy_TxSocket)(nil), + } +} + +func _Proxy_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Proxy) + // rx + switch x := m.Rx.(type) { + case *Proxy_RxPort: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RxPort); err != nil { + return err + } + case *Proxy_RxSocket: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RxSocket); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Proxy.Rx has unexpected type %T", x) + } + // tx + switch x := m.Tx.(type) { + case *Proxy_TxPort: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TxPort); err != nil { + return err + } + case *Proxy_TxSocket: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TxSocket); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Proxy.Tx has unexpected type %T", x) + } + return nil +} + +func _Proxy_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Proxy) + switch tag { + case 1: // rx.rx_port + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PortBased) + err := b.DecodeMessage(msg) + m.Rx = &Proxy_RxPort{msg} + return true, err + case 2: // rx.rx_socket + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SocketBased) + err := b.DecodeMessage(msg) + m.Rx = &Proxy_RxSocket{msg} + return true, err + case 3: // tx.tx_port + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PortBased) + err := b.DecodeMessage(msg) + m.Tx = &Proxy_TxPort{msg} + return true, err + case 4: // tx.tx_socket + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SocketBased) + err := b.DecodeMessage(msg) + m.Tx = &Proxy_TxSocket{msg} + return true, err + default: + return false, nil + } +} + +func _Proxy_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Proxy) + // rx + switch x := m.Rx.(type) { + case *Proxy_RxPort: + s := proto.Size(x.RxPort) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Proxy_RxSocket: + s := proto.Size(x.RxSocket) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tx + switch x := m.Tx.(type) { + case *Proxy_TxPort: + s := proto.Size(x.TxPort) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Proxy_TxSocket: + s := proto.Size(x.TxSocket) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func (*Proxy) XXX_MessageName() string { + return "linux.punt.Proxy" +} + +// Define network socket type +type PortBased struct { + L4Protocol PortBased_L4Protocol `protobuf:"varint,1,opt,name=l4_protocol,json=l4Protocol,proto3,enum=linux.punt.PortBased_L4Protocol" json:"l4_protocol,omitempty"` + L3Protocol PortBased_L3Protocol `protobuf:"varint,3,opt,name=l3_protocol,json=l3Protocol,proto3,enum=linux.punt.PortBased_L3Protocol" json:"l3_protocol,omitempty"` + Port uint32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortBased) Reset() { *m = PortBased{} } +func (m *PortBased) String() string { return proto.CompactTextString(m) } +func (*PortBased) ProtoMessage() {} +func (*PortBased) Descriptor() ([]byte, []int) { + return fileDescriptor_punt_808f67b1735e53e4, []int{1} +} +func (m *PortBased) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PortBased.Unmarshal(m, b) +} +func (m *PortBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PortBased.Marshal(b, m, deterministic) +} +func (dst *PortBased) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortBased.Merge(dst, src) +} +func (m *PortBased) XXX_Size() int { + return xxx_messageInfo_PortBased.Size(m) +} +func (m *PortBased) XXX_DiscardUnknown() { + xxx_messageInfo_PortBased.DiscardUnknown(m) +} + +var xxx_messageInfo_PortBased proto.InternalMessageInfo + +func (m *PortBased) GetL4Protocol() PortBased_L4Protocol { + if m != nil { + return m.L4Protocol + } + return PortBased_UNDEFINED_L4 +} + +func (m *PortBased) GetL3Protocol() PortBased_L3Protocol { + if m != nil { + return m.L3Protocol + } + return PortBased_UNDEFINED_L3 +} + +func (m *PortBased) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (*PortBased) XXX_MessageName() string { + return "linux.punt.PortBased" +} + +// Define unix domain socket type for IPC +type SocketBased struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SocketBased) Reset() { *m = SocketBased{} } +func (m *SocketBased) String() string { return proto.CompactTextString(m) } +func (*SocketBased) ProtoMessage() {} +func (*SocketBased) Descriptor() ([]byte, []int) { + return fileDescriptor_punt_808f67b1735e53e4, []int{2} +} +func (m *SocketBased) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SocketBased.Unmarshal(m, b) +} +func (m *SocketBased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SocketBased.Marshal(b, m, deterministic) +} +func (dst *SocketBased) XXX_Merge(src proto.Message) { + xxx_messageInfo_SocketBased.Merge(dst, src) +} +func (m *SocketBased) XXX_Size() int { + return xxx_messageInfo_SocketBased.Size(m) +} +func (m *SocketBased) XXX_DiscardUnknown() { + xxx_messageInfo_SocketBased.DiscardUnknown(m) +} + +var xxx_messageInfo_SocketBased proto.InternalMessageInfo + +func (m *SocketBased) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (*SocketBased) XXX_MessageName() string { + return "linux.punt.SocketBased" +} +func init() { + proto.RegisterType((*Proxy)(nil), "linux.punt.Proxy") + proto.RegisterType((*PortBased)(nil), "linux.punt.PortBased") + proto.RegisterType((*SocketBased)(nil), "linux.punt.SocketBased") + proto.RegisterEnum("linux.punt.PortBased_L4Protocol", PortBased_L4Protocol_name, PortBased_L4Protocol_value) + proto.RegisterEnum("linux.punt.PortBased_L3Protocol", PortBased_L3Protocol_name, PortBased_L3Protocol_value) +} + +func init() { proto.RegisterFile("models/linux/punt/punt.proto", fileDescriptor_punt_808f67b1735e53e4) } + +var fileDescriptor_punt_808f67b1735e53e4 = []byte{ + // 394 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x8e, 0xd3, 0x30, + 0x10, 0xc6, 0x9b, 0x3f, 0x64, 0xdb, 0x29, 0x20, 0x63, 0x09, 0xb1, 0x42, 0x08, 0x2d, 0x39, 0x71, + 0xd9, 0x64, 0xb5, 0x89, 0xf6, 0xb2, 0x5c, 0x1a, 0x5a, 0x44, 0xa5, 0xa8, 0x8a, 0x02, 0xbd, 0x70, + 0x89, 0xd2, 0x36, 0xa4, 0x11, 0x69, 0x1d, 0xb9, 0x4e, 0x65, 0xde, 0x83, 0x87, 0xe2, 0x3d, 0x38, + 0xf0, 0x1a, 0xc8, 0xd3, 0x90, 0x44, 0xb0, 0xea, 0xc5, 0xfa, 0xcd, 0xd8, 0xdf, 0xf7, 0xd9, 0x23, + 0xc3, 0xab, 0x1d, 0xdb, 0x64, 0xe5, 0xc1, 0x2d, 0x8b, 0x7d, 0x2d, 0xdd, 0xaa, 0xde, 0x0b, 0x5c, + 0x9c, 0x8a, 0x33, 0xc1, 0x28, 0x60, 0xdb, 0x51, 0x9d, 0x97, 0xd7, 0x79, 0x21, 0xb6, 0xf5, 0xca, + 0x59, 0xb3, 0x9d, 0x9b, 0xb3, 0x9c, 0xb9, 0x78, 0x64, 0x55, 0x7f, 0xc5, 0x0a, 0x0b, 0xa4, 0x93, + 0xd4, 0xfe, 0xad, 0xc1, 0xa3, 0x88, 0x33, 0xf9, 0x9d, 0xde, 0xc0, 0x05, 0x97, 0x49, 0xc5, 0xb8, + 0xb8, 0xd4, 0xae, 0xb4, 0xb7, 0xe3, 0xdb, 0xe7, 0x4e, 0x67, 0xeb, 0x44, 0x8c, 0x8b, 0x20, 0x3d, + 0x64, 0x9b, 0x8f, 0x83, 0xd8, 0xe2, 0x52, 0x95, 0xf4, 0x0e, 0x46, 0x5c, 0x26, 0x07, 0xb6, 0xfe, + 0x96, 0x89, 0x4b, 0x1d, 0x35, 0x2f, 0xfa, 0x9a, 0x4f, 0xb8, 0xf3, 0x57, 0x35, 0xe4, 0xf2, 0xd4, + 0x50, 0x49, 0xa2, 0x49, 0x32, 0xce, 0x25, 0x69, 0xb1, 0x25, 0xda, 0x24, 0xd1, 0x26, 0x99, 0xe7, + 0x93, 0xb4, 0x78, 0x28, 0x9a, 0xa4, 0xc0, 0x04, 0x9d, 0x4b, 0xb5, 0x0a, 0x69, 0xff, 0xd0, 0x61, + 0xd4, 0x7a, 0xd3, 0x09, 0x8c, 0x4b, 0x3f, 0xc1, 0x19, 0xac, 0x59, 0x89, 0x2f, 0x7e, 0x7a, 0x7b, + 0xf5, 0xe0, 0x3d, 0x9c, 0xd0, 0x8f, 0x9a, 0x73, 0x31, 0x94, 0x2d, 0xa3, 0x85, 0xd7, 0x59, 0x18, + 0x67, 0x2d, 0xbc, 0x9e, 0x45, 0xcb, 0x94, 0x82, 0x89, 0x63, 0x50, 0x4f, 0x7a, 0x12, 0x23, 0xdb, + 0x37, 0x00, 0x5d, 0x20, 0x25, 0xf0, 0x78, 0xb9, 0x98, 0xce, 0x3e, 0xcc, 0x17, 0xb3, 0x69, 0x12, + 0xfa, 0x64, 0x40, 0x2f, 0xc0, 0xf8, 0xfc, 0x3e, 0x22, 0x96, 0x82, 0xe5, 0x34, 0x22, 0xcf, 0xec, + 0x7b, 0x80, 0xce, 0xff, 0x1f, 0x85, 0x47, 0x06, 0x74, 0x08, 0xe6, 0x3c, 0x3a, 0xfa, 0x44, 0x6b, + 0xe8, 0x8e, 0xe8, 0x4a, 0x3c, 0x09, 0x43, 0x62, 0xd8, 0x6f, 0x60, 0xdc, 0x9b, 0x1e, 0xde, 0x28, + 0x15, 0x5b, 0x1c, 0xc8, 0x28, 0x46, 0x0e, 0x82, 0x9f, 0xbf, 0x5e, 0x6b, 0x5f, 0xde, 0xf5, 0x3e, + 0x56, 0x59, 0xe4, 0xa9, 0x60, 0xee, 0xb1, 0xaa, 0xae, 0xd3, 0x3c, 0xdb, 0x0b, 0x37, 0xad, 0x0a, + 0xf7, 0xbf, 0x2f, 0x7a, 0x8f, 0x98, 0x28, 0x5c, 0x59, 0x38, 0x27, 0xef, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x54, 0x1e, 0xcc, 0xdc, 0xc9, 0x02, 0x00, 0x00, +} diff --git a/api/models/linux/punt/punt.proto b/api/models/linux/punt/punt.proto new file mode 100644 index 0000000000..d554ab194c --- /dev/null +++ b/api/models/linux/punt/punt.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package linux.punt; + +option go_package = "github.com/ligato/vpp-agent/api/models/linux/punt;linux_punt"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +/* Proxy allows to listen on network socket or unix domain socket, and resend to another network/unix domain socket */ +message Proxy { + oneof rx { /* Socket to listen */ + PortBased rx_port= 1; + SocketBased rx_socket = 2; + } + oneof tx { /* Socket to redirect */ + PortBased tx_port= 3; + SocketBased tx_socket = 4; + } +} + +/* Define network socket type */ +message PortBased { + enum L4Protocol { /* L4 protocol */ + UNDEFINED_L4 = 0; + TCP = 6; + UDP = 17; + } + L4Protocol l4_protocol = 1; + enum L3Protocol { /* L3 protocol */ + UNDEFINED_L3 = 0; + IPv4 = 1; + IPv6 = 2; + ALL = 3; + } + L3Protocol l3_protocol = 3; + uint32 port = 4; +} + +/* Define unix domain socket type for IPC */ +message SocketBased { + string path = 1; +} diff --git a/api/models/vpp/acl/acl.pb.go b/api/models/vpp/acl/acl.pb.go new file mode 100644 index 0000000000..239ad13501 --- /dev/null +++ b/api/models/vpp/acl/acl.pb.go @@ -0,0 +1,769 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/acl/acl.proto + +package vpp_acl // import "github.com/ligato/vpp-agent/api/models/vpp/acl" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ACL_Rule_Action int32 + +const ( + ACL_Rule_DENY ACL_Rule_Action = 0 + ACL_Rule_PERMIT ACL_Rule_Action = 1 + ACL_Rule_REFLECT ACL_Rule_Action = 2 +) + +var ACL_Rule_Action_name = map[int32]string{ + 0: "DENY", + 1: "PERMIT", + 2: "REFLECT", +} +var ACL_Rule_Action_value = map[string]int32{ + "DENY": 0, + "PERMIT": 1, + "REFLECT": 2, +} + +func (x ACL_Rule_Action) String() string { + return proto.EnumName(ACL_Rule_Action_name, int32(x)) +} +func (ACL_Rule_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0} +} + +// Access Control List (ACL) +type ACL struct { + // The name of an access list. A device MAY restrict the length + // and value of this name, possibly spaces and special + // characters are not allowed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Rules []*ACL_Rule `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"` + Interfaces *ACL_Interfaces `protobuf:"bytes,3,opt,name=interfaces,proto3" json:"interfaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL) Reset() { *m = ACL{} } +func (m *ACL) String() string { return proto.CompactTextString(m) } +func (*ACL) ProtoMessage() {} +func (*ACL) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0} +} +func (m *ACL) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL.Unmarshal(m, b) +} +func (m *ACL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL.Marshal(b, m, deterministic) +} +func (dst *ACL) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL.Merge(dst, src) +} +func (m *ACL) XXX_Size() int { + return xxx_messageInfo_ACL.Size(m) +} +func (m *ACL) XXX_DiscardUnknown() { + xxx_messageInfo_ACL.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL proto.InternalMessageInfo + +func (m *ACL) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ACL) GetRules() []*ACL_Rule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *ACL) GetInterfaces() *ACL_Interfaces { + if m != nil { + return m.Interfaces + } + return nil +} + +func (*ACL) XXX_MessageName() string { + return "vpp.acl.ACL" +} + +// List of access list entries (Rules). Each Access Control Rule has +// a list of match criteria and a list of actions. +// Access List entry that can define: +// - IPv4/IPv6 src ip prefix +// - src MAC address mask +// - src MAC address value +// - can be used only for static ACLs. +type ACL_Rule struct { + Action ACL_Rule_Action `protobuf:"varint,1,opt,name=action,proto3,enum=vpp.acl.ACL_Rule_Action" json:"action,omitempty"` + IpRule *ACL_Rule_IpRule `protobuf:"bytes,2,opt,name=ip_rule,json=ipRule,proto3" json:"ip_rule,omitempty"` + MacipRule *ACL_Rule_MacIpRule `protobuf:"bytes,3,opt,name=macip_rule,json=macipRule,proto3" json:"macip_rule,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule) Reset() { *m = ACL_Rule{} } +func (m *ACL_Rule) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule) ProtoMessage() {} +func (*ACL_Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0} +} +func (m *ACL_Rule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule.Unmarshal(m, b) +} +func (m *ACL_Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule.Merge(dst, src) +} +func (m *ACL_Rule) XXX_Size() int { + return xxx_messageInfo_ACL_Rule.Size(m) +} +func (m *ACL_Rule) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule proto.InternalMessageInfo + +func (m *ACL_Rule) GetAction() ACL_Rule_Action { + if m != nil { + return m.Action + } + return ACL_Rule_DENY +} + +func (m *ACL_Rule) GetIpRule() *ACL_Rule_IpRule { + if m != nil { + return m.IpRule + } + return nil +} + +func (m *ACL_Rule) GetMacipRule() *ACL_Rule_MacIpRule { + if m != nil { + return m.MacipRule + } + return nil +} + +func (*ACL_Rule) XXX_MessageName() string { + return "vpp.acl.ACL.Rule" +} + +type ACL_Rule_IpRule struct { + Ip *ACL_Rule_IpRule_Ip `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + Icmp *ACL_Rule_IpRule_Icmp `protobuf:"bytes,2,opt,name=icmp,proto3" json:"icmp,omitempty"` + Tcp *ACL_Rule_IpRule_Tcp `protobuf:"bytes,3,opt,name=tcp,proto3" json:"tcp,omitempty"` + Udp *ACL_Rule_IpRule_Udp `protobuf:"bytes,4,opt,name=udp,proto3" json:"udp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule) Reset() { *m = ACL_Rule_IpRule{} } +func (m *ACL_Rule_IpRule) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule) ProtoMessage() {} +func (*ACL_Rule_IpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0} +} +func (m *ACL_Rule_IpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule.Merge(dst, src) +} +func (m *ACL_Rule_IpRule) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule.Size(m) +} +func (m *ACL_Rule_IpRule) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule) GetIp() *ACL_Rule_IpRule_Ip { + if m != nil { + return m.Ip + } + return nil +} + +func (m *ACL_Rule_IpRule) GetIcmp() *ACL_Rule_IpRule_Icmp { + if m != nil { + return m.Icmp + } + return nil +} + +func (m *ACL_Rule_IpRule) GetTcp() *ACL_Rule_IpRule_Tcp { + if m != nil { + return m.Tcp + } + return nil +} + +func (m *ACL_Rule_IpRule) GetUdp() *ACL_Rule_IpRule_Udp { + if m != nil { + return m.Udp + } + return nil +} + +func (*ACL_Rule_IpRule) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule" +} + +// IP used in this Access List Entry. +type ACL_Rule_IpRule_Ip struct { + // Destination IPv4/IPv6 network address (/) + DestinationNetwork string `protobuf:"bytes,1,opt,name=destination_network,json=destinationNetwork,proto3" json:"destination_network,omitempty"` + // Destination IPv4/IPv6 network address (/) + SourceNetwork string `protobuf:"bytes,2,opt,name=source_network,json=sourceNetwork,proto3" json:"source_network,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_Ip) Reset() { *m = ACL_Rule_IpRule_Ip{} } +func (m *ACL_Rule_IpRule_Ip) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_Ip) ProtoMessage() {} +func (*ACL_Rule_IpRule_Ip) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 0} +} +func (m *ACL_Rule_IpRule_Ip) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_Ip.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_Ip) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_Ip.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_Ip) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_Ip.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_Ip) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_Ip.Size(m) +} +func (m *ACL_Rule_IpRule_Ip) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_Ip.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_Ip proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_Ip) GetDestinationNetwork() string { + if m != nil { + return m.DestinationNetwork + } + return "" +} + +func (m *ACL_Rule_IpRule_Ip) GetSourceNetwork() string { + if m != nil { + return m.SourceNetwork + } + return "" +} + +func (*ACL_Rule_IpRule_Ip) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.Ip" +} + +type ACL_Rule_IpRule_Icmp struct { + // ICMPv6 flag, if false ICMPv4 will be used + Icmpv6 bool `protobuf:"varint,1,opt,name=icmpv6,proto3" json:"icmpv6,omitempty"` + // Inclusive range representing icmp codes to be used. + IcmpCodeRange *ACL_Rule_IpRule_Icmp_Range `protobuf:"bytes,2,opt,name=icmp_code_range,json=icmpCodeRange,proto3" json:"icmp_code_range,omitempty"` + IcmpTypeRange *ACL_Rule_IpRule_Icmp_Range `protobuf:"bytes,3,opt,name=icmp_type_range,json=icmpTypeRange,proto3" json:"icmp_type_range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_Icmp) Reset() { *m = ACL_Rule_IpRule_Icmp{} } +func (m *ACL_Rule_IpRule_Icmp) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_Icmp) ProtoMessage() {} +func (*ACL_Rule_IpRule_Icmp) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 1} +} +func (m *ACL_Rule_IpRule_Icmp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_Icmp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_Icmp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_Icmp.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_Icmp) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp.Size(m) +} +func (m *ACL_Rule_IpRule_Icmp) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_Icmp.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_Icmp proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_Icmp) GetIcmpv6() bool { + if m != nil { + return m.Icmpv6 + } + return false +} + +func (m *ACL_Rule_IpRule_Icmp) GetIcmpCodeRange() *ACL_Rule_IpRule_Icmp_Range { + if m != nil { + return m.IcmpCodeRange + } + return nil +} + +func (m *ACL_Rule_IpRule_Icmp) GetIcmpTypeRange() *ACL_Rule_IpRule_Icmp_Range { + if m != nil { + return m.IcmpTypeRange + } + return nil +} + +func (*ACL_Rule_IpRule_Icmp) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.Icmp" +} + +type ACL_Rule_IpRule_Icmp_Range struct { + First uint32 `protobuf:"varint,1,opt,name=first,proto3" json:"first,omitempty"` + Last uint32 `protobuf:"varint,2,opt,name=last,proto3" json:"last,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_Icmp_Range) Reset() { *m = ACL_Rule_IpRule_Icmp_Range{} } +func (m *ACL_Rule_IpRule_Icmp_Range) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_Icmp_Range) ProtoMessage() {} +func (*ACL_Rule_IpRule_Icmp_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 1, 0} +} +func (m *ACL_Rule_IpRule_Icmp_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_Icmp_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_Icmp_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_Icmp_Range) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range.Size(m) +} +func (m *ACL_Rule_IpRule_Icmp_Range) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_Icmp_Range proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_Icmp_Range) GetFirst() uint32 { + if m != nil { + return m.First + } + return 0 +} + +func (m *ACL_Rule_IpRule_Icmp_Range) GetLast() uint32 { + if m != nil { + return m.Last + } + return 0 +} + +func (*ACL_Rule_IpRule_Icmp_Range) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.Icmp.Range" +} + +// Inclusive range representing destination ports to be used. When +// only lower-port is present, it represents a single port. +type ACL_Rule_IpRule_PortRange struct { + LowerPort uint32 `protobuf:"varint,1,opt,name=lower_port,json=lowerPort,proto3" json:"lower_port,omitempty"` + // If upper port is set, it must + // be greater or equal to lower port + UpperPort uint32 `protobuf:"varint,2,opt,name=upper_port,json=upperPort,proto3" json:"upper_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_PortRange) Reset() { *m = ACL_Rule_IpRule_PortRange{} } +func (m *ACL_Rule_IpRule_PortRange) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_PortRange) ProtoMessage() {} +func (*ACL_Rule_IpRule_PortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 2} +} +func (m *ACL_Rule_IpRule_PortRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_PortRange.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_PortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_PortRange.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_PortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_PortRange.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_PortRange) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_PortRange.Size(m) +} +func (m *ACL_Rule_IpRule_PortRange) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_PortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_PortRange proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_PortRange) GetLowerPort() uint32 { + if m != nil { + return m.LowerPort + } + return 0 +} + +func (m *ACL_Rule_IpRule_PortRange) GetUpperPort() uint32 { + if m != nil { + return m.UpperPort + } + return 0 +} + +func (*ACL_Rule_IpRule_PortRange) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.PortRange" +} + +type ACL_Rule_IpRule_Tcp struct { + DestinationPortRange *ACL_Rule_IpRule_PortRange `protobuf:"bytes,1,opt,name=destination_port_range,json=destinationPortRange,proto3" json:"destination_port_range,omitempty"` + SourcePortRange *ACL_Rule_IpRule_PortRange `protobuf:"bytes,2,opt,name=source_port_range,json=sourcePortRange,proto3" json:"source_port_range,omitempty"` + // Binary mask for tcp flags to match. MSB order (FIN at position 0). + // Applied as logical AND to tcp flags field of the packet being matched, + // before it is compared with tcp-flags-value. + TcpFlagsMask uint32 `protobuf:"varint,3,opt,name=tcp_flags_mask,json=tcpFlagsMask,proto3" json:"tcp_flags_mask,omitempty"` + // Binary value for tcp flags to match. MSB order (FIN at position 0). + // Before tcp-flags-value is compared with tcp flags field of the packet being matched, + // tcp-flags-mask is applied to packet field value. + TcpFlagsValue uint32 `protobuf:"varint,4,opt,name=tcp_flags_value,json=tcpFlagsValue,proto3" json:"tcp_flags_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_Tcp) Reset() { *m = ACL_Rule_IpRule_Tcp{} } +func (m *ACL_Rule_IpRule_Tcp) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_Tcp) ProtoMessage() {} +func (*ACL_Rule_IpRule_Tcp) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 3} +} +func (m *ACL_Rule_IpRule_Tcp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_Tcp.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_Tcp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_Tcp.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_Tcp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_Tcp.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_Tcp) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_Tcp.Size(m) +} +func (m *ACL_Rule_IpRule_Tcp) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_Tcp.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_Tcp proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_Tcp) GetDestinationPortRange() *ACL_Rule_IpRule_PortRange { + if m != nil { + return m.DestinationPortRange + } + return nil +} + +func (m *ACL_Rule_IpRule_Tcp) GetSourcePortRange() *ACL_Rule_IpRule_PortRange { + if m != nil { + return m.SourcePortRange + } + return nil +} + +func (m *ACL_Rule_IpRule_Tcp) GetTcpFlagsMask() uint32 { + if m != nil { + return m.TcpFlagsMask + } + return 0 +} + +func (m *ACL_Rule_IpRule_Tcp) GetTcpFlagsValue() uint32 { + if m != nil { + return m.TcpFlagsValue + } + return 0 +} + +func (*ACL_Rule_IpRule_Tcp) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.Tcp" +} + +type ACL_Rule_IpRule_Udp struct { + DestinationPortRange *ACL_Rule_IpRule_PortRange `protobuf:"bytes,1,opt,name=destination_port_range,json=destinationPortRange,proto3" json:"destination_port_range,omitempty"` + SourcePortRange *ACL_Rule_IpRule_PortRange `protobuf:"bytes,2,opt,name=source_port_range,json=sourcePortRange,proto3" json:"source_port_range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_IpRule_Udp) Reset() { *m = ACL_Rule_IpRule_Udp{} } +func (m *ACL_Rule_IpRule_Udp) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_IpRule_Udp) ProtoMessage() {} +func (*ACL_Rule_IpRule_Udp) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 0, 4} +} +func (m *ACL_Rule_IpRule_Udp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_IpRule_Udp.Unmarshal(m, b) +} +func (m *ACL_Rule_IpRule_Udp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_IpRule_Udp.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_IpRule_Udp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_IpRule_Udp.Merge(dst, src) +} +func (m *ACL_Rule_IpRule_Udp) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_IpRule_Udp.Size(m) +} +func (m *ACL_Rule_IpRule_Udp) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_IpRule_Udp.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_IpRule_Udp proto.InternalMessageInfo + +func (m *ACL_Rule_IpRule_Udp) GetDestinationPortRange() *ACL_Rule_IpRule_PortRange { + if m != nil { + return m.DestinationPortRange + } + return nil +} + +func (m *ACL_Rule_IpRule_Udp) GetSourcePortRange() *ACL_Rule_IpRule_PortRange { + if m != nil { + return m.SourcePortRange + } + return nil +} + +func (*ACL_Rule_IpRule_Udp) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.IpRule.Udp" +} + +type ACL_Rule_MacIpRule struct { + SourceAddress string `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` + SourceAddressPrefix uint32 `protobuf:"varint,2,opt,name=source_address_prefix,json=sourceAddressPrefix,proto3" json:"source_address_prefix,omitempty"` + // Before source-mac-address is compared with source mac address field of the packet + // being matched, source-mac-address-mask is applied to packet field value. + SourceMacAddress string `protobuf:"bytes,3,opt,name=source_mac_address,json=sourceMacAddress,proto3" json:"source_mac_address,omitempty"` + // Source MAC address mask. + // Applied as logical AND with source mac address field of the packet being matched, + // before it is compared with source-mac-address. + SourceMacAddressMask string `protobuf:"bytes,4,opt,name=source_mac_address_mask,json=sourceMacAddressMask,proto3" json:"source_mac_address_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Rule_MacIpRule) Reset() { *m = ACL_Rule_MacIpRule{} } +func (m *ACL_Rule_MacIpRule) String() string { return proto.CompactTextString(m) } +func (*ACL_Rule_MacIpRule) ProtoMessage() {} +func (*ACL_Rule_MacIpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 0, 1} +} +func (m *ACL_Rule_MacIpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Rule_MacIpRule.Unmarshal(m, b) +} +func (m *ACL_Rule_MacIpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Rule_MacIpRule.Marshal(b, m, deterministic) +} +func (dst *ACL_Rule_MacIpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Rule_MacIpRule.Merge(dst, src) +} +func (m *ACL_Rule_MacIpRule) XXX_Size() int { + return xxx_messageInfo_ACL_Rule_MacIpRule.Size(m) +} +func (m *ACL_Rule_MacIpRule) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Rule_MacIpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Rule_MacIpRule proto.InternalMessageInfo + +func (m *ACL_Rule_MacIpRule) GetSourceAddress() string { + if m != nil { + return m.SourceAddress + } + return "" +} + +func (m *ACL_Rule_MacIpRule) GetSourceAddressPrefix() uint32 { + if m != nil { + return m.SourceAddressPrefix + } + return 0 +} + +func (m *ACL_Rule_MacIpRule) GetSourceMacAddress() string { + if m != nil { + return m.SourceMacAddress + } + return "" +} + +func (m *ACL_Rule_MacIpRule) GetSourceMacAddressMask() string { + if m != nil { + return m.SourceMacAddressMask + } + return "" +} + +func (*ACL_Rule_MacIpRule) XXX_MessageName() string { + return "vpp.acl.ACL.Rule.MacIpRule" +} + +// The set of interfaces that has assigned this ACL on ingres or egress. +type ACL_Interfaces struct { + Egress []string `protobuf:"bytes,1,rep,name=egress,proto3" json:"egress,omitempty"` + Ingress []string `protobuf:"bytes,2,rep,name=ingress,proto3" json:"ingress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ACL_Interfaces) Reset() { *m = ACL_Interfaces{} } +func (m *ACL_Interfaces) String() string { return proto.CompactTextString(m) } +func (*ACL_Interfaces) ProtoMessage() {} +func (*ACL_Interfaces) Descriptor() ([]byte, []int) { + return fileDescriptor_acl_0795a1de06d41f69, []int{0, 1} +} +func (m *ACL_Interfaces) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ACL_Interfaces.Unmarshal(m, b) +} +func (m *ACL_Interfaces) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ACL_Interfaces.Marshal(b, m, deterministic) +} +func (dst *ACL_Interfaces) XXX_Merge(src proto.Message) { + xxx_messageInfo_ACL_Interfaces.Merge(dst, src) +} +func (m *ACL_Interfaces) XXX_Size() int { + return xxx_messageInfo_ACL_Interfaces.Size(m) +} +func (m *ACL_Interfaces) XXX_DiscardUnknown() { + xxx_messageInfo_ACL_Interfaces.DiscardUnknown(m) +} + +var xxx_messageInfo_ACL_Interfaces proto.InternalMessageInfo + +func (m *ACL_Interfaces) GetEgress() []string { + if m != nil { + return m.Egress + } + return nil +} + +func (m *ACL_Interfaces) GetIngress() []string { + if m != nil { + return m.Ingress + } + return nil +} + +func (*ACL_Interfaces) XXX_MessageName() string { + return "vpp.acl.ACL.Interfaces" +} +func init() { + proto.RegisterType((*ACL)(nil), "vpp.acl.ACL") + proto.RegisterType((*ACL_Rule)(nil), "vpp.acl.ACL.Rule") + proto.RegisterType((*ACL_Rule_IpRule)(nil), "vpp.acl.ACL.Rule.IpRule") + proto.RegisterType((*ACL_Rule_IpRule_Ip)(nil), "vpp.acl.ACL.Rule.IpRule.Ip") + proto.RegisterType((*ACL_Rule_IpRule_Icmp)(nil), "vpp.acl.ACL.Rule.IpRule.Icmp") + proto.RegisterType((*ACL_Rule_IpRule_Icmp_Range)(nil), "vpp.acl.ACL.Rule.IpRule.Icmp.Range") + proto.RegisterType((*ACL_Rule_IpRule_PortRange)(nil), "vpp.acl.ACL.Rule.IpRule.PortRange") + proto.RegisterType((*ACL_Rule_IpRule_Tcp)(nil), "vpp.acl.ACL.Rule.IpRule.Tcp") + proto.RegisterType((*ACL_Rule_IpRule_Udp)(nil), "vpp.acl.ACL.Rule.IpRule.Udp") + proto.RegisterType((*ACL_Rule_MacIpRule)(nil), "vpp.acl.ACL.Rule.MacIpRule") + proto.RegisterType((*ACL_Interfaces)(nil), "vpp.acl.ACL.Interfaces") + proto.RegisterEnum("vpp.acl.ACL_Rule_Action", ACL_Rule_Action_name, ACL_Rule_Action_value) +} + +func init() { proto.RegisterFile("models/vpp/acl/acl.proto", fileDescriptor_acl_0795a1de06d41f69) } + +var fileDescriptor_acl_0795a1de06d41f69 = []byte{ + // 776 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x55, 0x5b, 0x6b, 0x3b, 0x45, + 0x14, 0x77, 0x77, 0xd3, 0xa4, 0x39, 0xf9, 0xa7, 0x4d, 0xa7, 0xb5, 0x5d, 0x56, 0x2b, 0xa5, 0xde, + 0x02, 0xb5, 0x1b, 0x1b, 0xb1, 0x82, 0x8a, 0x10, 0x63, 0x0a, 0xc1, 0xa6, 0x94, 0x21, 0x15, 0x15, + 0x61, 0x99, 0xce, 0x4e, 0xd6, 0xa5, 0x7b, 0x19, 0xf6, 0x92, 0xda, 0x8f, 0xe1, 0x27, 0xf1, 0xcd, + 0xcf, 0x20, 0xf8, 0x31, 0x7c, 0xf4, 0xd5, 0x57, 0x41, 0xe6, 0xb2, 0xb9, 0xd8, 0xd6, 0xcb, 0xe3, + 0xff, 0x21, 0x64, 0xce, 0xf9, 0x5d, 0xe6, 0x9c, 0x3d, 0x3b, 0x3b, 0x60, 0xc7, 0xa9, 0xcf, 0xa2, + 0xbc, 0x37, 0xe7, 0xbc, 0x47, 0x68, 0x24, 0x7e, 0x2e, 0xcf, 0xd2, 0x22, 0x45, 0x8d, 0x39, 0xe7, + 0x2e, 0xa1, 0x91, 0x73, 0x1a, 0x84, 0xc5, 0xf7, 0xe5, 0xad, 0x4b, 0xd3, 0xb8, 0x17, 0xa4, 0x41, + 0xda, 0x93, 0xf8, 0x6d, 0x39, 0x93, 0x91, 0x0c, 0xe4, 0x4a, 0xe9, 0x8e, 0xff, 0x78, 0x01, 0xd6, + 0x60, 0x78, 0x89, 0x10, 0xd4, 0x12, 0x12, 0x33, 0xdb, 0x38, 0x32, 0xba, 0x4d, 0x2c, 0xd7, 0xe8, + 0x5d, 0xd8, 0xc8, 0xca, 0x88, 0xe5, 0xb6, 0x79, 0x64, 0x75, 0x5b, 0xfd, 0x1d, 0x57, 0xef, 0xe1, + 0x0e, 0x86, 0x97, 0x2e, 0x2e, 0x23, 0x86, 0x15, 0x8e, 0x3e, 0x02, 0x08, 0x93, 0x82, 0x65, 0x33, + 0x42, 0x59, 0x6e, 0x5b, 0x47, 0x46, 0xb7, 0xd5, 0x3f, 0x58, 0x63, 0x8f, 0x17, 0x30, 0x5e, 0xa1, + 0x3a, 0x3f, 0xb7, 0xa0, 0x26, 0x8c, 0xd0, 0xfb, 0x50, 0x27, 0xb4, 0x08, 0xd3, 0x44, 0x16, 0xb0, + 0xd5, 0xb7, 0x1f, 0xed, 0xe5, 0x0e, 0x24, 0x8e, 0x35, 0x0f, 0x9d, 0x41, 0x23, 0xe4, 0x9e, 0xd8, + 0xdf, 0x36, 0xe5, 0x86, 0x4f, 0x48, 0xc6, 0x5c, 0x56, 0x59, 0x0f, 0xe5, 0x3f, 0xfa, 0x18, 0x20, + 0x26, 0xb4, 0x52, 0xa9, 0x32, 0x5f, 0x7b, 0xac, 0x9a, 0x10, 0xaa, 0x85, 0x4d, 0x49, 0x17, 0x4b, + 0xe7, 0xc7, 0x4d, 0xa8, 0xab, 0x2c, 0x3a, 0x01, 0x33, 0xe4, 0xb2, 0xce, 0x27, 0xe5, 0x8a, 0x25, + 0xfe, 0xcc, 0x90, 0xa3, 0x33, 0xa8, 0x85, 0x34, 0xe6, 0xba, 0xc6, 0xc3, 0xe7, 0xe9, 0x34, 0xe6, + 0x58, 0x52, 0x91, 0x0b, 0x56, 0x41, 0xb9, 0xae, 0xef, 0xf5, 0x67, 0x15, 0x53, 0xca, 0xb1, 0x20, + 0x0a, 0x7e, 0xe9, 0x73, 0xbb, 0xf6, 0x2f, 0xfc, 0x1b, 0x9f, 0x63, 0x41, 0x74, 0xbe, 0x03, 0x73, + 0xcc, 0x51, 0x0f, 0x76, 0x7d, 0x96, 0x17, 0x61, 0x42, 0xc4, 0xe3, 0xf4, 0x12, 0x56, 0xdc, 0xa7, + 0xd9, 0x9d, 0x9e, 0x3f, 0x5a, 0x81, 0xae, 0x14, 0x82, 0xde, 0x86, 0xad, 0x3c, 0x2d, 0x33, 0xca, + 0x16, 0x5c, 0x53, 0x72, 0xdb, 0x2a, 0xab, 0x69, 0xce, 0xef, 0x06, 0xd4, 0x44, 0x33, 0x68, 0x1f, + 0xea, 0xa2, 0x9d, 0xf9, 0xb9, 0xf4, 0xdc, 0xc4, 0x3a, 0x42, 0x5f, 0xc2, 0xb6, 0x58, 0x79, 0x34, + 0xf5, 0x99, 0x97, 0x91, 0x24, 0xa8, 0x06, 0xf8, 0xe6, 0x3f, 0x3e, 0x1c, 0x17, 0x0b, 0x2a, 0x6e, + 0x0b, 0xed, 0x30, 0xf5, 0x99, 0x0c, 0x17, 0x66, 0xc5, 0x03, 0xaf, 0xcc, 0xac, 0xff, 0x69, 0x36, + 0x7d, 0xe0, 0xca, 0xcc, 0x39, 0x83, 0x0d, 0xe5, 0xba, 0x07, 0x1b, 0xb3, 0x30, 0xcb, 0x0b, 0x59, + 0x79, 0x1b, 0xab, 0x40, 0x1c, 0x91, 0x88, 0xe4, 0x85, 0xac, 0xb6, 0x8d, 0xe5, 0xda, 0x19, 0x43, + 0xf3, 0x3a, 0xcd, 0x0a, 0x25, 0x3b, 0x04, 0x88, 0xd2, 0x7b, 0x96, 0x79, 0x3c, 0xcd, 0x2a, 0x6d, + 0x53, 0x66, 0x04, 0x47, 0xc0, 0x25, 0xe7, 0x15, 0xac, 0x5c, 0x9a, 0x32, 0x23, 0x60, 0xe7, 0x4f, + 0x03, 0xac, 0x29, 0xe5, 0xe8, 0x6b, 0xd8, 0x5f, 0x1d, 0x8c, 0x20, 0xeb, 0xce, 0xd4, 0x2b, 0x77, + 0xfc, 0x6c, 0x67, 0x8b, 0x4a, 0xf0, 0xde, 0x8a, 0xc3, 0xb2, 0xbe, 0x2b, 0xd8, 0xd1, 0x13, 0x5c, + 0x31, 0x35, 0xff, 0xb3, 0xe9, 0xb6, 0x12, 0x2f, 0xfd, 0xde, 0x82, 0xad, 0x82, 0x72, 0x6f, 0x16, + 0x91, 0x20, 0xf7, 0x62, 0x92, 0xdf, 0xc9, 0x67, 0xdf, 0xc6, 0x2f, 0x0a, 0xca, 0x2f, 0x44, 0x72, + 0x42, 0xf2, 0x3b, 0xf4, 0x0e, 0x6c, 0x2f, 0x59, 0x73, 0x12, 0x95, 0x4c, 0xbe, 0xaa, 0x6d, 0xdc, + 0xae, 0x68, 0x5f, 0x89, 0xa4, 0xf3, 0x93, 0x01, 0xd6, 0x8d, 0xff, 0x12, 0xf5, 0xef, 0xfc, 0x6a, + 0x40, 0x73, 0xf1, 0xb1, 0x58, 0x39, 0x1f, 0xc4, 0xf7, 0x33, 0x96, 0xe7, 0xfa, 0x2c, 0xe9, 0xf3, + 0x31, 0x50, 0x49, 0xd4, 0x87, 0x57, 0xd7, 0x69, 0x1e, 0xcf, 0xd8, 0x2c, 0xfc, 0x41, 0xbf, 0x10, + 0xbb, 0x6b, 0xec, 0x6b, 0x09, 0xa1, 0xf7, 0x00, 0x69, 0x4d, 0x4c, 0xe8, 0xc2, 0xde, 0x92, 0xf6, + 0x1d, 0x85, 0x4c, 0x08, 0xad, 0x76, 0xf8, 0x10, 0x0e, 0x1e, 0xb3, 0xd5, 0x7c, 0x6a, 0x52, 0xb2, + 0xf7, 0x77, 0x89, 0x98, 0xd3, 0xf1, 0x09, 0xd4, 0xd5, 0x27, 0x16, 0x6d, 0x42, 0xed, 0x8b, 0xd1, + 0xd5, 0x37, 0x9d, 0x57, 0x10, 0x40, 0xfd, 0x7a, 0x84, 0x27, 0xe3, 0x69, 0xc7, 0x40, 0x2d, 0x68, + 0xe0, 0xd1, 0xc5, 0xe5, 0x68, 0x38, 0xed, 0x98, 0xce, 0x67, 0x00, 0xcb, 0x4f, 0xba, 0x38, 0xea, + 0x2c, 0xd0, 0x2d, 0x5b, 0xdd, 0x26, 0xd6, 0x11, 0xb2, 0xa1, 0x11, 0x26, 0x0a, 0x30, 0x25, 0x50, + 0x85, 0x9f, 0x7f, 0xfa, 0xcb, 0x6f, 0x6f, 0x18, 0xdf, 0x9e, 0xaf, 0xdc, 0x55, 0x51, 0x18, 0x90, + 0x22, 0x15, 0x37, 0xdb, 0x29, 0x09, 0x58, 0x52, 0xf4, 0x08, 0x0f, 0x7b, 0xeb, 0xd7, 0xdd, 0x27, + 0x73, 0xce, 0x3d, 0x42, 0xa3, 0xdb, 0xba, 0xbc, 0xbb, 0x3e, 0xf8, 0x2b, 0x00, 0x00, 0xff, 0xff, + 0x70, 0x59, 0x94, 0x62, 0x0f, 0x07, 0x00, 0x00, +} diff --git a/api/models/vpp/acl/acl.proto b/api/models/vpp/acl/acl.proto new file mode 100644 index 0000000000..ade9863376 --- /dev/null +++ b/api/models/vpp/acl/acl.proto @@ -0,0 +1,115 @@ +syntax = "proto3"; + +package vpp.acl; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/acl;vpp_acl"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +// Access Control List (ACL) +message ACL { + // The name of an access list. A device MAY restrict the length + // and value of this name, possibly spaces and special + // characters are not allowed. + string name = 1; + + // List of access list entries (Rules). Each Access Control Rule has + // a list of match criteria and a list of actions. + // Access List entry that can define: + // - IPv4/IPv6 src ip prefix + // - src MAC address mask + // - src MAC address value + // - can be used only for static ACLs. + message Rule { + enum Action { + DENY = 0; + PERMIT = 1; + REFLECT = 2; + }; + Action action = 1; + + // Access List entry that can define: + // - IPv4/IPv6 src/dst IP prefix + // - Internet Protocol number + // - selected L4 headers: + // * ICMP (type range) + // * UDP (port range) + // * TCP (port range, flags mask, flags value) + + message IpRule { + // IP used in this Access List Entry. + message Ip { + // Destination IPv4/IPv6 network address (/) + string destination_network = 1; + // Destination IPv4/IPv6 network address (/) + string source_network = 2; + } + Ip ip = 1; + + message Icmp { + // ICMPv6 flag, if false ICMPv4 will be used + bool icmpv6 = 1; + message Range { + uint32 first = 1; + uint32 last = 2; + } + // Inclusive range representing icmp codes to be used. + Range icmp_code_range = 2; + Range icmp_type_range = 3; + } + Icmp icmp = 2; + + // Inclusive range representing destination ports to be used. When + // only lower-port is present, it represents a single port. + message PortRange { + uint32 lower_port = 1; + // If upper port is set, it must + // be greater or equal to lower port + uint32 upper_port = 2; + } + + message Tcp { + PortRange destination_port_range = 1; + PortRange source_port_range = 2; + // Binary mask for tcp flags to match. MSB order (FIN at position 0). + // Applied as logical AND to tcp flags field of the packet being matched, + // before it is compared with tcp-flags-value. + uint32 tcp_flags_mask = 3; + // Binary value for tcp flags to match. MSB order (FIN at position 0). + // Before tcp-flags-value is compared with tcp flags field of the packet being matched, + // tcp-flags-mask is applied to packet field value. + uint32 tcp_flags_value = 4; + } + Tcp tcp = 3; + + message Udp { + PortRange destination_port_range = 1; + PortRange source_port_range = 2; + } + Udp udp = 4; + } + IpRule ip_rule = 2; + + message MacIpRule { + string source_address = 1; + uint32 source_address_prefix = 2; + // Before source-mac-address is compared with source mac address field of the packet + // being matched, source-mac-address-mask is applied to packet field value. + string source_mac_address = 3; + // Source MAC address mask. + // Applied as logical AND with source mac address field of the packet being matched, + // before it is compared with source-mac-address. + string source_mac_address_mask = 4; + } + MacIpRule macip_rule = 3; + } + repeated Rule rules = 2; + + // The set of interfaces that has assigned this ACL on ingres or egress. + message Interfaces { + repeated string egress = 1; + repeated string ingress = 2; + } + Interfaces interfaces = 3; +} diff --git a/api/models/vpp/acl/keys.go b/api/models/vpp/acl/keys.go new file mode 100644 index 0000000000..7a2a681810 --- /dev/null +++ b/api/models/vpp/acl/keys.go @@ -0,0 +1,87 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_acl + +import ( + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp.acls" + +var ( + ModelACL = models.Register(&ACL{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "acl", + }) +) + +// Key returns the prefix used in ETCD to store vpp ACL config +// of a particular ACL in selected vpp instance. +func Key(aclName string) string { + return models.Key(&ACL{ + Name: aclName, + }) +} + +const ( + aclToInterfaceTemplate = "vpp/acl/{acl}/interface/{flow}/{iface}" + + // IngressFlow represents ingress packet flow + IngressFlow = "ingress" + // EgressFlow represents egress packet flow + EgressFlow = "egress" +) + +const ( + // InvalidKeyPart is used in key for parts which are invalid + InvalidKeyPart = "" +) + +// ToInterfaceKey returns key for ACL to interface +func ToInterfaceKey(acl, iface, flow string) string { + if acl == "" { + acl = InvalidKeyPart + } + if iface == "" { + iface = InvalidKeyPart + } + if flow != IngressFlow && flow != EgressFlow { + flow = InvalidKeyPart + } + key := aclToInterfaceTemplate + key = strings.Replace(key, "{acl}", acl, 1) + key = strings.Replace(key, "{flow}", flow, 1) + key = strings.Replace(key, "{iface}", iface, 1) + return key +} + +// ParseACLToInterfaceKey parses ACL to interface key +func ParseACLToInterfaceKey(key string) (acl, iface, flow string, isACLToInterface bool) { + parts := strings.Split(key, "/") + if len(parts) >= 6 && + parts[0] == "vpp" && parts[1] == "acl" && parts[3] == "interface" && + (parts[4] == IngressFlow || parts[4] == EgressFlow || parts[4] == InvalidKeyPart) { + acl = parts[2] + iface = strings.Join(parts[5:], "/") + if iface != "" && acl != "" { + return acl, iface, parts[4], true + } + } + return "", "", "", false +} diff --git a/api/models/vpp/acl/keys_test.go b/api/models/vpp/acl/keys_test.go new file mode 100644 index 0000000000..5be6e361b7 --- /dev/null +++ b/api/models/vpp/acl/keys_test.go @@ -0,0 +1,280 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_acl + +import ( + "testing" +) + +/*func TestACLKey(t *testing.T) { + tests := []struct { + name string + aclName string + expectedKey string + }{ + { + name: "valid ACL name", + aclName: "acl1", + expectedKey: "vpp/config/v2/acl/acl1", + }, + { + name: "invalid ACL name", + aclName: "", + expectedKey: "vpp/config/v2/acl/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := Key(test.aclName) + if key != test.expectedKey { + t.Errorf("failed for: aclName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.aclName, test.expectedKey, key) + } + }) + } +} + +func TestParseNameFromKey(t *testing.T) { + tests := []struct { + name string + key string + expectedACLName string + expectedIsACLKey bool + }{ + { + name: "valid ACL name", + key: "vpp/config/v2/acl/acl1", + expectedACLName: "acl1", + expectedIsACLKey: true, + }, + { + name: "invalid ACL name", + key: "vpp/config/v2/acl/", + expectedACLName: "", + expectedIsACLKey: true, + }, + { + name: "not an ACL key", + key: "vpp/config/v2/bd/bd1", + expectedACLName: "", + expectedIsACLKey: false, + }, + { + name: "not an ACL key (empty name)", + key: "vpp/config/v2/acl/", + expectedACLName: "", + expectedIsACLKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + aclName, isACLKey := models.Model(&Acl{}).ParseKey(test.key) + if isACLKey != test.expectedIsACLKey { + t.Errorf("expected isACLKey: %v\tgot: %v", test.expectedIsACLKey, isACLKey) + } + if aclName != test.expectedACLName { + t.Errorf("expected aclName: %s\tgot: %s", test.expectedACLName, aclName) + } + }) + } +}*/ + +func TestACLToInterfaceKey(t *testing.T) { + tests := []struct { + name string + aclName string + iface string + flow string + expectedKey string + }{ + { + name: "ingress interface", + aclName: "acl1", + iface: "tap0", + flow: "ingress", + expectedKey: "vpp/acl/acl1/interface/ingress/tap0", + }, + { + name: "egress interface", + aclName: "acl2", + iface: "memif0", + flow: "egress", + expectedKey: "vpp/acl/acl2/interface/egress/memif0", + }, + { + name: "Gbe interface", + aclName: "acl1", + iface: "GigabitEthernet0/8/0", + flow: "ingress", + expectedKey: "vpp/acl/acl1/interface/ingress/GigabitEthernet0/8/0", + }, + { + name: "empty acl name", + aclName: "", + iface: "memif0", + flow: "egress", + expectedKey: "vpp/acl//interface/egress/memif0", + }, + { + name: "invalid flow", + aclName: "acl2", + iface: "memif0", + flow: "invalid-value", + expectedKey: "vpp/acl/acl2/interface//memif0", + }, + { + name: "empty interface", + aclName: "acl2", + iface: "", + flow: "egress", + expectedKey: "vpp/acl/acl2/interface/egress/", + }, + { + name: "empty parameters", + aclName: "", + iface: "", + flow: "", + expectedKey: "vpp/acl//interface//", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ToInterfaceKey(test.aclName, test.iface, test.flow) + if key != test.expectedKey { + t.Errorf("failed for: aclName=%s iface=%s flow=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.aclName, test.iface, test.flow, test.expectedKey, key) + } + }) + } +} + +func TestParseACLToInterfaceKey(t *testing.T) { + tests := []struct { + name string + key string + expectedACLName string + expectedIface string + expectedFlow string + expectedIsACLIfaceKey bool + }{ + { + name: "ingress interface", + key: "vpp/acl/acl1/interface/ingress/tap0", + expectedACLName: "acl1", + expectedIface: "tap0", + expectedFlow: IngressFlow, + expectedIsACLIfaceKey: true, + }, + { + name: "egress interface", + key: "vpp/acl/acl1/interface/egress/tap0", + expectedACLName: "acl1", + expectedIface: "tap0", + expectedFlow: EgressFlow, + expectedIsACLIfaceKey: true, + }, + { + name: "Gbe interface", + key: "vpp/acl/acl1/interface/ingress/GigabitEthernet0/8/0", + expectedACLName: "acl1", + expectedIface: "GigabitEthernet0/8/0", + expectedFlow: IngressFlow, + expectedIsACLIfaceKey: true, + }, + { + name: "invalid acl name", + key: "vpp/acl//interface/egress/tap0", + expectedACLName: "", + expectedIface: "tap0", + expectedFlow: EgressFlow, + expectedIsACLIfaceKey: true, + }, + { + name: "invalid flow", + key: "vpp/acl/acl1/interface//tap0", + expectedACLName: "acl1", + expectedIface: "tap0", + expectedFlow: "", + expectedIsACLIfaceKey: true, + }, + { + name: "invalid interface", + key: "vpp/acl/acl1/interface/ingress/", + expectedACLName: "acl1", + expectedIface: "", + expectedFlow: IngressFlow, + expectedIsACLIfaceKey: true, + }, + { + name: "all parameters invalid", + key: "vpp/acl//interface//", + expectedACLName: "", + expectedIface: "", + expectedFlow: "", + expectedIsACLIfaceKey: true, + }, + { + name: "not ACLToInterface key", + key: "vpp/config/v2/acl/acl1", + expectedACLName: "", + expectedIface: "", + expectedFlow: "", + expectedIsACLIfaceKey: false, + }, + { + name: "not ACLToInterface key (cut after interface)", + key: "vpp/acl/acl1/interface/", + expectedACLName: "", + expectedIface: "", + expectedFlow: "", + expectedIsACLIfaceKey: false, + }, + { + name: "not ACLToInterface key (cut after flow)", + key: "vpp/acl/acl1/interface/ingress", + expectedACLName: "", + expectedIface: "", + expectedFlow: "", + expectedIsACLIfaceKey: false, + }, + { + name: "empty key", + key: "", + expectedACLName: "", + expectedIface: "", + expectedFlow: "", + expectedIsACLIfaceKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + aclName, iface, flow, isACLIfaceKey := ParseACLToInterfaceKey(test.key) + if isACLIfaceKey != test.expectedIsACLIfaceKey { + t.Errorf("expected isACLKey: %v\tgot: %v", test.expectedIsACLIfaceKey, isACLIfaceKey) + } + if aclName != test.expectedACLName { + t.Errorf("expected aclName: %s\tgot: %s", test.expectedACLName, aclName) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + if flow != test.expectedFlow { + t.Errorf("expected flow: %s\tgot: %s", test.expectedFlow, flow) + } + }) + } +} diff --git a/api/models/vpp/interfaces/dhcp.pb.go b/api/models/vpp/interfaces/dhcp.pb.go new file mode 100644 index 0000000000..4c9a52ce11 --- /dev/null +++ b/api/models/vpp/interfaces/dhcp.pb.go @@ -0,0 +1,132 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/interfaces/dhcp.proto + +package vpp_interfaces // import "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// DHCPLease is a notification, i.e. flows from SB upwards +type DHCPLease struct { + InterfaceName string `protobuf:"bytes,1,opt,name=interface_name,json=interfaceName,proto3" json:"interface_name,omitempty"` + HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + IsIpv6 bool `protobuf:"varint,3,opt,name=is_ipv6,json=isIpv6,proto3" json:"is_ipv6,omitempty"` + HostPhysAddress string `protobuf:"bytes,4,opt,name=host_phys_address,json=hostPhysAddress,proto3" json:"host_phys_address,omitempty"` + HostIpAddress string `protobuf:"bytes,5,opt,name=host_ip_address,json=hostIpAddress,proto3" json:"host_ip_address,omitempty"` + RouterIpAddress string `protobuf:"bytes,6,opt,name=router_ip_address,json=routerIpAddress,proto3" json:"router_ip_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DHCPLease) Reset() { *m = DHCPLease{} } +func (m *DHCPLease) String() string { return proto.CompactTextString(m) } +func (*DHCPLease) ProtoMessage() {} +func (*DHCPLease) Descriptor() ([]byte, []int) { + return fileDescriptor_dhcp_fd468439f257354b, []int{0} +} +func (m *DHCPLease) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DHCPLease.Unmarshal(m, b) +} +func (m *DHCPLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DHCPLease.Marshal(b, m, deterministic) +} +func (dst *DHCPLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_DHCPLease.Merge(dst, src) +} +func (m *DHCPLease) XXX_Size() int { + return xxx_messageInfo_DHCPLease.Size(m) +} +func (m *DHCPLease) XXX_DiscardUnknown() { + xxx_messageInfo_DHCPLease.DiscardUnknown(m) +} + +var xxx_messageInfo_DHCPLease proto.InternalMessageInfo + +func (m *DHCPLease) GetInterfaceName() string { + if m != nil { + return m.InterfaceName + } + return "" +} + +func (m *DHCPLease) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *DHCPLease) GetIsIpv6() bool { + if m != nil { + return m.IsIpv6 + } + return false +} + +func (m *DHCPLease) GetHostPhysAddress() string { + if m != nil { + return m.HostPhysAddress + } + return "" +} + +func (m *DHCPLease) GetHostIpAddress() string { + if m != nil { + return m.HostIpAddress + } + return "" +} + +func (m *DHCPLease) GetRouterIpAddress() string { + if m != nil { + return m.RouterIpAddress + } + return "" +} + +func (*DHCPLease) XXX_MessageName() string { + return "vpp.interfaces.DHCPLease" +} +func init() { + proto.RegisterType((*DHCPLease)(nil), "vpp.interfaces.DHCPLease") +} + +func init() { + proto.RegisterFile("models/vpp/interfaces/dhcp.proto", fileDescriptor_dhcp_fd468439f257354b) +} + +var fileDescriptor_dhcp_fd468439f257354b = []byte{ + // 284 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4b, 0xc3, 0x40, + 0x14, 0xc4, 0x89, 0x7f, 0x6a, 0xb3, 0x60, 0xa5, 0xb9, 0x18, 0x14, 0x24, 0x08, 0x4a, 0x29, 0x34, + 0x7b, 0x10, 0x7a, 0xf1, 0xa4, 0xf6, 0x60, 0x45, 0xa4, 0xf4, 0xe8, 0x25, 0x6c, 0x92, 0x6d, 0xb2, + 0xd0, 0x64, 0x1f, 0x79, 0x9b, 0x85, 0x7e, 0x43, 0xbf, 0x87, 0x17, 0x3f, 0x86, 0xe4, 0x2d, 0xa6, + 0x3d, 0x78, 0xdb, 0x99, 0xf9, 0xcd, 0x63, 0x19, 0x16, 0x55, 0x3a, 0x97, 0x5b, 0xe4, 0x16, 0x80, + 0xab, 0xda, 0xc8, 0x66, 0x23, 0x32, 0x89, 0x3c, 0x2f, 0x33, 0x88, 0xa1, 0xd1, 0x46, 0x07, 0x23, + 0x0b, 0x10, 0xef, 0xa3, 0xab, 0x59, 0xa1, 0x4c, 0xd9, 0xa6, 0x71, 0xa6, 0x2b, 0x5e, 0xe8, 0x42, + 0x73, 0xc2, 0xd2, 0x76, 0x43, 0x8a, 0x04, 0xbd, 0x5c, 0xfd, 0xf6, 0xc7, 0x63, 0xfe, 0xe2, 0xf5, + 0x65, 0xf5, 0x2e, 0x05, 0xca, 0xe0, 0x8e, 0x8d, 0xfa, 0x53, 0x49, 0x2d, 0x2a, 0x19, 0x7a, 0x91, + 0x37, 0xf1, 0xd7, 0xe7, 0xbd, 0xfb, 0x21, 0x2a, 0x19, 0x5c, 0x33, 0xbf, 0xd4, 0x68, 0x1c, 0x71, + 0x44, 0xc4, 0xb0, 0x33, 0x28, 0xbc, 0x64, 0x67, 0x0a, 0x13, 0x05, 0x76, 0x1e, 0x1e, 0x47, 0xde, + 0x64, 0xb8, 0x1e, 0x28, 0x5c, 0x82, 0x9d, 0x07, 0x53, 0x36, 0xa6, 0x16, 0x94, 0x3b, 0x4c, 0x44, + 0x9e, 0x37, 0x12, 0x31, 0x3c, 0xa1, 0xf6, 0x45, 0x17, 0xac, 0xca, 0x1d, 0x3e, 0x39, 0x3b, 0xb8, + 0x67, 0x64, 0x25, 0x0a, 0x7a, 0xf2, 0xd4, 0xfd, 0xa4, 0xb3, 0x97, 0xf0, 0xc7, 0x4d, 0xd9, 0xb8, + 0xd1, 0xad, 0x91, 0xcd, 0x21, 0x39, 0x70, 0x37, 0x5d, 0xd0, 0xb3, 0xcf, 0x6f, 0x5f, 0xdf, 0x37, + 0xde, 0xe7, 0xe2, 0x60, 0x9f, 0xad, 0x2a, 0x84, 0xd1, 0xdd, 0xb8, 0x33, 0x51, 0xc8, 0xda, 0x70, + 0x01, 0x8a, 0xff, 0xbb, 0xf8, 0xa3, 0x05, 0x48, 0xf6, 0x32, 0x1d, 0xd0, 0x7a, 0x0f, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xc5, 0xdd, 0x8e, 0xa5, 0xa0, 0x01, 0x00, 0x00, +} diff --git a/api/models/vpp/interfaces/dhcp.proto b/api/models/vpp/interfaces/dhcp.proto new file mode 100644 index 0000000000..a6ec277e4e --- /dev/null +++ b/api/models/vpp/interfaces/dhcp.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package vpp.interfaces; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/interfaces;vpp_interfaces"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +/* DHCPLease is a notification, i.e. flows from SB upwards */ +message DHCPLease { + string interface_name = 1; + string host_name = 2; + bool is_ipv6 = 3; + string host_phys_address = 4; + string host_ip_address = 5; /* IP addresses in the format / */ + string router_ip_address = 6; /* IP addresses in the format / */ +} diff --git a/api/models/vpp/interfaces/interface.pb.go b/api/models/vpp/interfaces/interface.pb.go new file mode 100644 index 0000000000..6c36b223c4 --- /dev/null +++ b/api/models/vpp/interfaces/interface.pb.go @@ -0,0 +1,1404 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/interfaces/interface.proto + +package vpp_interfaces // import "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Interface_Type int32 + +const ( + Interface_UNDEFINED_TYPE Interface_Type = 0 + Interface_SUB_INTERFACE Interface_Type = 1 + Interface_SOFTWARE_LOOPBACK Interface_Type = 2 + Interface_DPDK Interface_Type = 3 + Interface_MEMIF Interface_Type = 4 + Interface_TAP Interface_Type = 5 + Interface_AF_PACKET Interface_Type = 6 + Interface_VXLAN_TUNNEL Interface_Type = 7 + Interface_IPSEC_TUNNEL Interface_Type = 8 + Interface_VMXNET3_INTERFACE Interface_Type = 9 +) + +var Interface_Type_name = map[int32]string{ + 0: "UNDEFINED_TYPE", + 1: "SUB_INTERFACE", + 2: "SOFTWARE_LOOPBACK", + 3: "DPDK", + 4: "MEMIF", + 5: "TAP", + 6: "AF_PACKET", + 7: "VXLAN_TUNNEL", + 8: "IPSEC_TUNNEL", + 9: "VMXNET3_INTERFACE", +} +var Interface_Type_value = map[string]int32{ + "UNDEFINED_TYPE": 0, + "SUB_INTERFACE": 1, + "SOFTWARE_LOOPBACK": 2, + "DPDK": 3, + "MEMIF": 4, + "TAP": 5, + "AF_PACKET": 6, + "VXLAN_TUNNEL": 7, + "IPSEC_TUNNEL": 8, + "VMXNET3_INTERFACE": 9, +} + +func (x Interface_Type) String() string { + return proto.EnumName(Interface_Type_name, int32(x)) +} +func (Interface_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0, 0} +} + +// from vpp/build-root/install-vpp-native/vpp/include/vnet/interface.h +type Interface_RxModeSettings_RxModeType int32 + +const ( + Interface_RxModeSettings_UNKNOWN Interface_RxModeSettings_RxModeType = 0 + Interface_RxModeSettings_POLLING Interface_RxModeSettings_RxModeType = 1 + Interface_RxModeSettings_INTERRUPT Interface_RxModeSettings_RxModeType = 2 + Interface_RxModeSettings_ADAPTIVE Interface_RxModeSettings_RxModeType = 3 + Interface_RxModeSettings_DEFAULT Interface_RxModeSettings_RxModeType = 4 +) + +var Interface_RxModeSettings_RxModeType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "POLLING", + 2: "INTERRUPT", + 3: "ADAPTIVE", + 4: "DEFAULT", +} +var Interface_RxModeSettings_RxModeType_value = map[string]int32{ + "UNKNOWN": 0, + "POLLING": 1, + "INTERRUPT": 2, + "ADAPTIVE": 3, + "DEFAULT": 4, +} + +func (x Interface_RxModeSettings_RxModeType) String() string { + return proto.EnumName(Interface_RxModeSettings_RxModeType_name, int32(x)) +} +func (Interface_RxModeSettings_RxModeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0, 1, 0} +} + +type MemifLink_MemifMode int32 + +const ( + MemifLink_ETHERNET MemifLink_MemifMode = 0 + MemifLink_IP MemifLink_MemifMode = 1 + MemifLink_PUNT_INJECT MemifLink_MemifMode = 2 +) + +var MemifLink_MemifMode_name = map[int32]string{ + 0: "ETHERNET", + 1: "IP", + 2: "PUNT_INJECT", +} +var MemifLink_MemifMode_value = map[string]int32{ + "ETHERNET": 0, + "IP": 1, + "PUNT_INJECT": 2, +} + +func (x MemifLink_MemifMode) String() string { + return proto.EnumName(MemifLink_MemifMode_name, int32(x)) +} +func (MemifLink_MemifMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{2, 0} +} + +type IPSecLink_CryptoAlg int32 + +const ( + IPSecLink_NONE_CRYPTO IPSecLink_CryptoAlg = 0 + IPSecLink_AES_CBC_128 IPSecLink_CryptoAlg = 1 + IPSecLink_AES_CBC_192 IPSecLink_CryptoAlg = 2 + IPSecLink_AES_CBC_256 IPSecLink_CryptoAlg = 3 +) + +var IPSecLink_CryptoAlg_name = map[int32]string{ + 0: "NONE_CRYPTO", + 1: "AES_CBC_128", + 2: "AES_CBC_192", + 3: "AES_CBC_256", +} +var IPSecLink_CryptoAlg_value = map[string]int32{ + "NONE_CRYPTO": 0, + "AES_CBC_128": 1, + "AES_CBC_192": 2, + "AES_CBC_256": 3, +} + +func (x IPSecLink_CryptoAlg) String() string { + return proto.EnumName(IPSecLink_CryptoAlg_name, int32(x)) +} +func (IPSecLink_CryptoAlg) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{6, 0} +} + +type IPSecLink_IntegAlg int32 + +const ( + IPSecLink_NONE_INTEG IPSecLink_IntegAlg = 0 + IPSecLink_MD5_96 IPSecLink_IntegAlg = 1 + IPSecLink_SHA1_96 IPSecLink_IntegAlg = 2 + IPSecLink_SHA_256_96 IPSecLink_IntegAlg = 3 + IPSecLink_SHA_256_128 IPSecLink_IntegAlg = 4 + IPSecLink_SHA_384_192 IPSecLink_IntegAlg = 5 + IPSecLink_SHA_512_256 IPSecLink_IntegAlg = 6 +) + +var IPSecLink_IntegAlg_name = map[int32]string{ + 0: "NONE_INTEG", + 1: "MD5_96", + 2: "SHA1_96", + 3: "SHA_256_96", + 4: "SHA_256_128", + 5: "SHA_384_192", + 6: "SHA_512_256", +} +var IPSecLink_IntegAlg_value = map[string]int32{ + "NONE_INTEG": 0, + "MD5_96": 1, + "SHA1_96": 2, + "SHA_256_96": 3, + "SHA_256_128": 4, + "SHA_384_192": 5, + "SHA_512_256": 6, +} + +func (x IPSecLink_IntegAlg) String() string { + return proto.EnumName(IPSecLink_IntegAlg_name, int32(x)) +} +func (IPSecLink_IntegAlg) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{6, 1} +} + +type Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type Interface_Type `protobuf:"varint,2,opt,name=type,proto3,enum=vpp.interfaces.Interface_Type" json:"type,omitempty"` + Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"` + PhysAddress string `protobuf:"bytes,4,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + IpAddresses []string `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"` + Vrf uint32 `protobuf:"varint,6,opt,name=vrf,proto3" json:"vrf,omitempty"` + SetDhcpClient bool `protobuf:"varint,7,opt,name=set_dhcp_client,json=setDhcpClient,proto3" json:"set_dhcp_client,omitempty"` + Mtu uint32 `protobuf:"varint,8,opt,name=mtu,proto3" json:"mtu,omitempty"` + Unnumbered *Interface_Unnumbered `protobuf:"bytes,9,opt,name=unnumbered,proto3" json:"unnumbered,omitempty"` + RxModeSettings *Interface_RxModeSettings `protobuf:"bytes,10,opt,name=rx_mode_settings,json=rxModeSettings,proto3" json:"rx_mode_settings,omitempty"` + RxPlacementSettings *Interface_RxPlacementSettings `protobuf:"bytes,11,opt,name=rx_placement_settings,json=rxPlacementSettings,proto3" json:"rx_placement_settings,omitempty"` + // Types that are valid to be assigned to Link: + // *Interface_Sub + // *Interface_Memif + // *Interface_Afpacket + // *Interface_Tap + // *Interface_Vxlan + // *Interface_Ipsec + // *Interface_VmxNet3 + Link isInterface_Link `protobuf_oneof:"link"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Interface) Reset() { *m = Interface{} } +func (m *Interface) String() string { return proto.CompactTextString(m) } +func (*Interface) ProtoMessage() {} +func (*Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0} +} +func (m *Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Interface.Unmarshal(m, b) +} +func (m *Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Interface.Marshal(b, m, deterministic) +} +func (dst *Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_Interface.Merge(dst, src) +} +func (m *Interface) XXX_Size() int { + return xxx_messageInfo_Interface.Size(m) +} +func (m *Interface) XXX_DiscardUnknown() { + xxx_messageInfo_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_Interface proto.InternalMessageInfo + +type isInterface_Link interface { + isInterface_Link() +} + +type Interface_Sub struct { + Sub *SubInterface `protobuf:"bytes,100,opt,name=sub,proto3,oneof"` +} +type Interface_Memif struct { + Memif *MemifLink `protobuf:"bytes,101,opt,name=memif,proto3,oneof"` +} +type Interface_Afpacket struct { + Afpacket *AfpacketLink `protobuf:"bytes,102,opt,name=afpacket,proto3,oneof"` +} +type Interface_Tap struct { + Tap *TapLink `protobuf:"bytes,103,opt,name=tap,proto3,oneof"` +} +type Interface_Vxlan struct { + Vxlan *VxlanLink `protobuf:"bytes,104,opt,name=vxlan,proto3,oneof"` +} +type Interface_Ipsec struct { + Ipsec *IPSecLink `protobuf:"bytes,105,opt,name=ipsec,proto3,oneof"` +} +type Interface_VmxNet3 struct { + VmxNet3 *VmxNet3Link `protobuf:"bytes,106,opt,name=vmx_net3,json=vmxNet3,proto3,oneof"` +} + +func (*Interface_Sub) isInterface_Link() {} +func (*Interface_Memif) isInterface_Link() {} +func (*Interface_Afpacket) isInterface_Link() {} +func (*Interface_Tap) isInterface_Link() {} +func (*Interface_Vxlan) isInterface_Link() {} +func (*Interface_Ipsec) isInterface_Link() {} +func (*Interface_VmxNet3) isInterface_Link() {} + +func (m *Interface) GetLink() isInterface_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Interface) GetType() Interface_Type { + if m != nil { + return m.Type + } + return Interface_UNDEFINED_TYPE +} + +func (m *Interface) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *Interface) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (m *Interface) GetIpAddresses() []string { + if m != nil { + return m.IpAddresses + } + return nil +} + +func (m *Interface) GetVrf() uint32 { + if m != nil { + return m.Vrf + } + return 0 +} + +func (m *Interface) GetSetDhcpClient() bool { + if m != nil { + return m.SetDhcpClient + } + return false +} + +func (m *Interface) GetMtu() uint32 { + if m != nil { + return m.Mtu + } + return 0 +} + +func (m *Interface) GetUnnumbered() *Interface_Unnumbered { + if m != nil { + return m.Unnumbered + } + return nil +} + +func (m *Interface) GetRxModeSettings() *Interface_RxModeSettings { + if m != nil { + return m.RxModeSettings + } + return nil +} + +func (m *Interface) GetRxPlacementSettings() *Interface_RxPlacementSettings { + if m != nil { + return m.RxPlacementSettings + } + return nil +} + +func (m *Interface) GetSub() *SubInterface { + if x, ok := m.GetLink().(*Interface_Sub); ok { + return x.Sub + } + return nil +} + +func (m *Interface) GetMemif() *MemifLink { + if x, ok := m.GetLink().(*Interface_Memif); ok { + return x.Memif + } + return nil +} + +func (m *Interface) GetAfpacket() *AfpacketLink { + if x, ok := m.GetLink().(*Interface_Afpacket); ok { + return x.Afpacket + } + return nil +} + +func (m *Interface) GetTap() *TapLink { + if x, ok := m.GetLink().(*Interface_Tap); ok { + return x.Tap + } + return nil +} + +func (m *Interface) GetVxlan() *VxlanLink { + if x, ok := m.GetLink().(*Interface_Vxlan); ok { + return x.Vxlan + } + return nil +} + +func (m *Interface) GetIpsec() *IPSecLink { + if x, ok := m.GetLink().(*Interface_Ipsec); ok { + return x.Ipsec + } + return nil +} + +func (m *Interface) GetVmxNet3() *VmxNet3Link { + if x, ok := m.GetLink().(*Interface_VmxNet3); ok { + return x.VmxNet3 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Interface) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Interface_OneofMarshaler, _Interface_OneofUnmarshaler, _Interface_OneofSizer, []interface{}{ + (*Interface_Sub)(nil), + (*Interface_Memif)(nil), + (*Interface_Afpacket)(nil), + (*Interface_Tap)(nil), + (*Interface_Vxlan)(nil), + (*Interface_Ipsec)(nil), + (*Interface_VmxNet3)(nil), + } +} + +func _Interface_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Interface) + // link + switch x := m.Link.(type) { + case *Interface_Sub: + _ = b.EncodeVarint(100<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Sub); err != nil { + return err + } + case *Interface_Memif: + _ = b.EncodeVarint(101<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Memif); err != nil { + return err + } + case *Interface_Afpacket: + _ = b.EncodeVarint(102<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Afpacket); err != nil { + return err + } + case *Interface_Tap: + _ = b.EncodeVarint(103<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Tap); err != nil { + return err + } + case *Interface_Vxlan: + _ = b.EncodeVarint(104<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Vxlan); err != nil { + return err + } + case *Interface_Ipsec: + _ = b.EncodeVarint(105<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Ipsec); err != nil { + return err + } + case *Interface_VmxNet3: + _ = b.EncodeVarint(106<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.VmxNet3); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Interface.Link has unexpected type %T", x) + } + return nil +} + +func _Interface_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Interface) + switch tag { + case 100: // link.sub + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SubInterface) + err := b.DecodeMessage(msg) + m.Link = &Interface_Sub{msg} + return true, err + case 101: // link.memif + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MemifLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Memif{msg} + return true, err + case 102: // link.afpacket + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AfpacketLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Afpacket{msg} + return true, err + case 103: // link.tap + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TapLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Tap{msg} + return true, err + case 104: // link.vxlan + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VxlanLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Vxlan{msg} + return true, err + case 105: // link.ipsec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(IPSecLink) + err := b.DecodeMessage(msg) + m.Link = &Interface_Ipsec{msg} + return true, err + case 106: // link.vmx_net3 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VmxNet3Link) + err := b.DecodeMessage(msg) + m.Link = &Interface_VmxNet3{msg} + return true, err + default: + return false, nil + } +} + +func _Interface_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Interface) + // link + switch x := m.Link.(type) { + case *Interface_Sub: + s := proto.Size(x.Sub) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Memif: + s := proto.Size(x.Memif) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Afpacket: + s := proto.Size(x.Afpacket) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Tap: + s := proto.Size(x.Tap) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Vxlan: + s := proto.Size(x.Vxlan) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_Ipsec: + s := proto.Size(x.Ipsec) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Interface_VmxNet3: + s := proto.Size(x.VmxNet3) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func (*Interface) XXX_MessageName() string { + return "vpp.interfaces.Interface" +} + +type Interface_Unnumbered struct { + InterfaceWithIp string `protobuf:"bytes,1,opt,name=interface_with_ip,json=interfaceWithIp,proto3" json:"interface_with_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Interface_Unnumbered) Reset() { *m = Interface_Unnumbered{} } +func (m *Interface_Unnumbered) String() string { return proto.CompactTextString(m) } +func (*Interface_Unnumbered) ProtoMessage() {} +func (*Interface_Unnumbered) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0, 0} +} +func (m *Interface_Unnumbered) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Interface_Unnumbered.Unmarshal(m, b) +} +func (m *Interface_Unnumbered) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Interface_Unnumbered.Marshal(b, m, deterministic) +} +func (dst *Interface_Unnumbered) XXX_Merge(src proto.Message) { + xxx_messageInfo_Interface_Unnumbered.Merge(dst, src) +} +func (m *Interface_Unnumbered) XXX_Size() int { + return xxx_messageInfo_Interface_Unnumbered.Size(m) +} +func (m *Interface_Unnumbered) XXX_DiscardUnknown() { + xxx_messageInfo_Interface_Unnumbered.DiscardUnknown(m) +} + +var xxx_messageInfo_Interface_Unnumbered proto.InternalMessageInfo + +func (m *Interface_Unnumbered) GetInterfaceWithIp() string { + if m != nil { + return m.InterfaceWithIp + } + return "" +} + +func (*Interface_Unnumbered) XXX_MessageName() string { + return "vpp.interfaces.Interface.Unnumbered" +} + +type Interface_RxModeSettings struct { + RxMode Interface_RxModeSettings_RxModeType `protobuf:"varint,1,opt,name=rx_mode,json=rxMode,proto3,enum=vpp.interfaces.Interface_RxModeSettings_RxModeType" json:"rx_mode,omitempty"` + QueueId uint32 `protobuf:"varint,2,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"` + QueueIdValid uint32 `protobuf:"varint,3,opt,name=queue_id_valid,json=queueIdValid,proto3" json:"queue_id_valid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Interface_RxModeSettings) Reset() { *m = Interface_RxModeSettings{} } +func (m *Interface_RxModeSettings) String() string { return proto.CompactTextString(m) } +func (*Interface_RxModeSettings) ProtoMessage() {} +func (*Interface_RxModeSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0, 1} +} +func (m *Interface_RxModeSettings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Interface_RxModeSettings.Unmarshal(m, b) +} +func (m *Interface_RxModeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Interface_RxModeSettings.Marshal(b, m, deterministic) +} +func (dst *Interface_RxModeSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_Interface_RxModeSettings.Merge(dst, src) +} +func (m *Interface_RxModeSettings) XXX_Size() int { + return xxx_messageInfo_Interface_RxModeSettings.Size(m) +} +func (m *Interface_RxModeSettings) XXX_DiscardUnknown() { + xxx_messageInfo_Interface_RxModeSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_Interface_RxModeSettings proto.InternalMessageInfo + +func (m *Interface_RxModeSettings) GetRxMode() Interface_RxModeSettings_RxModeType { + if m != nil { + return m.RxMode + } + return Interface_RxModeSettings_UNKNOWN +} + +func (m *Interface_RxModeSettings) GetQueueId() uint32 { + if m != nil { + return m.QueueId + } + return 0 +} + +func (m *Interface_RxModeSettings) GetQueueIdValid() uint32 { + if m != nil { + return m.QueueIdValid + } + return 0 +} + +func (*Interface_RxModeSettings) XXX_MessageName() string { + return "vpp.interfaces.Interface.RxModeSettings" +} + +type Interface_RxPlacementSettings struct { + Queue uint32 `protobuf:"varint,1,opt,name=queue,proto3" json:"queue,omitempty"` + Worker uint32 `protobuf:"varint,2,opt,name=worker,proto3" json:"worker,omitempty"` + IsMain bool `protobuf:"varint,3,opt,name=is_main,json=isMain,proto3" json:"is_main,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Interface_RxPlacementSettings) Reset() { *m = Interface_RxPlacementSettings{} } +func (m *Interface_RxPlacementSettings) String() string { return proto.CompactTextString(m) } +func (*Interface_RxPlacementSettings) ProtoMessage() {} +func (*Interface_RxPlacementSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{0, 2} +} +func (m *Interface_RxPlacementSettings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Interface_RxPlacementSettings.Unmarshal(m, b) +} +func (m *Interface_RxPlacementSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Interface_RxPlacementSettings.Marshal(b, m, deterministic) +} +func (dst *Interface_RxPlacementSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_Interface_RxPlacementSettings.Merge(dst, src) +} +func (m *Interface_RxPlacementSettings) XXX_Size() int { + return xxx_messageInfo_Interface_RxPlacementSettings.Size(m) +} +func (m *Interface_RxPlacementSettings) XXX_DiscardUnknown() { + xxx_messageInfo_Interface_RxPlacementSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_Interface_RxPlacementSettings proto.InternalMessageInfo + +func (m *Interface_RxPlacementSettings) GetQueue() uint32 { + if m != nil { + return m.Queue + } + return 0 +} + +func (m *Interface_RxPlacementSettings) GetWorker() uint32 { + if m != nil { + return m.Worker + } + return 0 +} + +func (m *Interface_RxPlacementSettings) GetIsMain() bool { + if m != nil { + return m.IsMain + } + return false +} + +func (*Interface_RxPlacementSettings) XXX_MessageName() string { + return "vpp.interfaces.Interface.RxPlacementSettings" +} + +type SubInterface struct { + ParentName string `protobuf:"bytes,1,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + SubId uint32 `protobuf:"varint,2,opt,name=sub_id,json=subId,proto3" json:"sub_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubInterface) Reset() { *m = SubInterface{} } +func (m *SubInterface) String() string { return proto.CompactTextString(m) } +func (*SubInterface) ProtoMessage() {} +func (*SubInterface) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{1} +} +func (m *SubInterface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubInterface.Unmarshal(m, b) +} +func (m *SubInterface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubInterface.Marshal(b, m, deterministic) +} +func (dst *SubInterface) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubInterface.Merge(dst, src) +} +func (m *SubInterface) XXX_Size() int { + return xxx_messageInfo_SubInterface.Size(m) +} +func (m *SubInterface) XXX_DiscardUnknown() { + xxx_messageInfo_SubInterface.DiscardUnknown(m) +} + +var xxx_messageInfo_SubInterface proto.InternalMessageInfo + +func (m *SubInterface) GetParentName() string { + if m != nil { + return m.ParentName + } + return "" +} + +func (m *SubInterface) GetSubId() uint32 { + if m != nil { + return m.SubId + } + return 0 +} + +func (*SubInterface) XXX_MessageName() string { + return "vpp.interfaces.SubInterface" +} + +type MemifLink struct { + Mode MemifLink_MemifMode `protobuf:"varint,1,opt,name=mode,proto3,enum=vpp.interfaces.MemifLink_MemifMode" json:"mode,omitempty"` + Master bool `protobuf:"varint,2,opt,name=master,proto3" json:"master,omitempty"` + Id uint32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` + SocketFilename string `protobuf:"bytes,4,opt,name=socket_filename,json=socketFilename,proto3" json:"socket_filename,omitempty"` + Secret string `protobuf:"bytes,5,opt,name=secret,proto3" json:"secret,omitempty"` + RingSize uint32 `protobuf:"varint,6,opt,name=ring_size,json=ringSize,proto3" json:"ring_size,omitempty"` + BufferSize uint32 `protobuf:"varint,7,opt,name=buffer_size,json=bufferSize,proto3" json:"buffer_size,omitempty"` + RxQueues uint32 `protobuf:"varint,8,opt,name=rx_queues,json=rxQueues,proto3" json:"rx_queues,omitempty"` + TxQueues uint32 `protobuf:"varint,9,opt,name=tx_queues,json=txQueues,proto3" json:"tx_queues,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemifLink) Reset() { *m = MemifLink{} } +func (m *MemifLink) String() string { return proto.CompactTextString(m) } +func (*MemifLink) ProtoMessage() {} +func (*MemifLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{2} +} +func (m *MemifLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemifLink.Unmarshal(m, b) +} +func (m *MemifLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemifLink.Marshal(b, m, deterministic) +} +func (dst *MemifLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemifLink.Merge(dst, src) +} +func (m *MemifLink) XXX_Size() int { + return xxx_messageInfo_MemifLink.Size(m) +} +func (m *MemifLink) XXX_DiscardUnknown() { + xxx_messageInfo_MemifLink.DiscardUnknown(m) +} + +var xxx_messageInfo_MemifLink proto.InternalMessageInfo + +func (m *MemifLink) GetMode() MemifLink_MemifMode { + if m != nil { + return m.Mode + } + return MemifLink_ETHERNET +} + +func (m *MemifLink) GetMaster() bool { + if m != nil { + return m.Master + } + return false +} + +func (m *MemifLink) GetId() uint32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MemifLink) GetSocketFilename() string { + if m != nil { + return m.SocketFilename + } + return "" +} + +func (m *MemifLink) GetSecret() string { + if m != nil { + return m.Secret + } + return "" +} + +func (m *MemifLink) GetRingSize() uint32 { + if m != nil { + return m.RingSize + } + return 0 +} + +func (m *MemifLink) GetBufferSize() uint32 { + if m != nil { + return m.BufferSize + } + return 0 +} + +func (m *MemifLink) GetRxQueues() uint32 { + if m != nil { + return m.RxQueues + } + return 0 +} + +func (m *MemifLink) GetTxQueues() uint32 { + if m != nil { + return m.TxQueues + } + return 0 +} + +func (*MemifLink) XXX_MessageName() string { + return "vpp.interfaces.MemifLink" +} + +type VxlanLink struct { + SrcAddress string `protobuf:"bytes,1,opt,name=src_address,json=srcAddress,proto3" json:"src_address,omitempty"` + DstAddress string `protobuf:"bytes,2,opt,name=dst_address,json=dstAddress,proto3" json:"dst_address,omitempty"` + Vni uint32 `protobuf:"varint,3,opt,name=vni,proto3" json:"vni,omitempty"` + Multicast string `protobuf:"bytes,4,opt,name=multicast,proto3" json:"multicast,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VxlanLink) Reset() { *m = VxlanLink{} } +func (m *VxlanLink) String() string { return proto.CompactTextString(m) } +func (*VxlanLink) ProtoMessage() {} +func (*VxlanLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{3} +} +func (m *VxlanLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VxlanLink.Unmarshal(m, b) +} +func (m *VxlanLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VxlanLink.Marshal(b, m, deterministic) +} +func (dst *VxlanLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_VxlanLink.Merge(dst, src) +} +func (m *VxlanLink) XXX_Size() int { + return xxx_messageInfo_VxlanLink.Size(m) +} +func (m *VxlanLink) XXX_DiscardUnknown() { + xxx_messageInfo_VxlanLink.DiscardUnknown(m) +} + +var xxx_messageInfo_VxlanLink proto.InternalMessageInfo + +func (m *VxlanLink) GetSrcAddress() string { + if m != nil { + return m.SrcAddress + } + return "" +} + +func (m *VxlanLink) GetDstAddress() string { + if m != nil { + return m.DstAddress + } + return "" +} + +func (m *VxlanLink) GetVni() uint32 { + if m != nil { + return m.Vni + } + return 0 +} + +func (m *VxlanLink) GetMulticast() string { + if m != nil { + return m.Multicast + } + return "" +} + +func (*VxlanLink) XXX_MessageName() string { + return "vpp.interfaces.VxlanLink" +} + +type AfpacketLink struct { + HostIfName string `protobuf:"bytes,1,opt,name=host_if_name,json=hostIfName,proto3" json:"host_if_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AfpacketLink) Reset() { *m = AfpacketLink{} } +func (m *AfpacketLink) String() string { return proto.CompactTextString(m) } +func (*AfpacketLink) ProtoMessage() {} +func (*AfpacketLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{4} +} +func (m *AfpacketLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AfpacketLink.Unmarshal(m, b) +} +func (m *AfpacketLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AfpacketLink.Marshal(b, m, deterministic) +} +func (dst *AfpacketLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AfpacketLink.Merge(dst, src) +} +func (m *AfpacketLink) XXX_Size() int { + return xxx_messageInfo_AfpacketLink.Size(m) +} +func (m *AfpacketLink) XXX_DiscardUnknown() { + xxx_messageInfo_AfpacketLink.DiscardUnknown(m) +} + +var xxx_messageInfo_AfpacketLink proto.InternalMessageInfo + +func (m *AfpacketLink) GetHostIfName() string { + if m != nil { + return m.HostIfName + } + return "" +} + +func (*AfpacketLink) XXX_MessageName() string { + return "vpp.interfaces.AfpacketLink" +} + +type TapLink struct { + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + HostIfName string `protobuf:"bytes,2,opt,name=host_if_name,json=hostIfName,proto3" json:"host_if_name,omitempty"` + ToMicroservice string `protobuf:"bytes,3,opt,name=to_microservice,json=toMicroservice,proto3" json:"to_microservice,omitempty"` + RxRingSize uint32 `protobuf:"varint,4,opt,name=rx_ring_size,json=rxRingSize,proto3" json:"rx_ring_size,omitempty"` + TxRingSize uint32 `protobuf:"varint,5,opt,name=tx_ring_size,json=txRingSize,proto3" json:"tx_ring_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TapLink) Reset() { *m = TapLink{} } +func (m *TapLink) String() string { return proto.CompactTextString(m) } +func (*TapLink) ProtoMessage() {} +func (*TapLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{5} +} +func (m *TapLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TapLink.Unmarshal(m, b) +} +func (m *TapLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TapLink.Marshal(b, m, deterministic) +} +func (dst *TapLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_TapLink.Merge(dst, src) +} +func (m *TapLink) XXX_Size() int { + return xxx_messageInfo_TapLink.Size(m) +} +func (m *TapLink) XXX_DiscardUnknown() { + xxx_messageInfo_TapLink.DiscardUnknown(m) +} + +var xxx_messageInfo_TapLink proto.InternalMessageInfo + +func (m *TapLink) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *TapLink) GetHostIfName() string { + if m != nil { + return m.HostIfName + } + return "" +} + +func (m *TapLink) GetToMicroservice() string { + if m != nil { + return m.ToMicroservice + } + return "" +} + +func (m *TapLink) GetRxRingSize() uint32 { + if m != nil { + return m.RxRingSize + } + return 0 +} + +func (m *TapLink) GetTxRingSize() uint32 { + if m != nil { + return m.TxRingSize + } + return 0 +} + +func (*TapLink) XXX_MessageName() string { + return "vpp.interfaces.TapLink" +} + +type IPSecLink struct { + Esn bool `protobuf:"varint,2,opt,name=esn,proto3" json:"esn,omitempty"` + AntiReplay bool `protobuf:"varint,3,opt,name=anti_replay,json=antiReplay,proto3" json:"anti_replay,omitempty"` + LocalIp string `protobuf:"bytes,4,opt,name=local_ip,json=localIp,proto3" json:"local_ip,omitempty"` + RemoteIp string `protobuf:"bytes,5,opt,name=remote_ip,json=remoteIp,proto3" json:"remote_ip,omitempty"` + LocalSpi uint32 `protobuf:"varint,6,opt,name=local_spi,json=localSpi,proto3" json:"local_spi,omitempty"` + RemoteSpi uint32 `protobuf:"varint,7,opt,name=remote_spi,json=remoteSpi,proto3" json:"remote_spi,omitempty"` + CryptoAlg IPSecLink_CryptoAlg `protobuf:"varint,8,opt,name=crypto_alg,json=cryptoAlg,proto3,enum=vpp.interfaces.IPSecLink_CryptoAlg" json:"crypto_alg,omitempty"` + LocalCryptoKey string `protobuf:"bytes,9,opt,name=local_crypto_key,json=localCryptoKey,proto3" json:"local_crypto_key,omitempty"` + RemoteCryptoKey string `protobuf:"bytes,10,opt,name=remote_crypto_key,json=remoteCryptoKey,proto3" json:"remote_crypto_key,omitempty"` + IntegAlg IPSecLink_IntegAlg `protobuf:"varint,11,opt,name=integ_alg,json=integAlg,proto3,enum=vpp.interfaces.IPSecLink_IntegAlg" json:"integ_alg,omitempty"` + LocalIntegKey string `protobuf:"bytes,12,opt,name=local_integ_key,json=localIntegKey,proto3" json:"local_integ_key,omitempty"` + RemoteIntegKey string `protobuf:"bytes,13,opt,name=remote_integ_key,json=remoteIntegKey,proto3" json:"remote_integ_key,omitempty"` + EnableUdpEncap bool `protobuf:"varint,14,opt,name=enable_udp_encap,json=enableUdpEncap,proto3" json:"enable_udp_encap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IPSecLink) Reset() { *m = IPSecLink{} } +func (m *IPSecLink) String() string { return proto.CompactTextString(m) } +func (*IPSecLink) ProtoMessage() {} +func (*IPSecLink) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{6} +} +func (m *IPSecLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IPSecLink.Unmarshal(m, b) +} +func (m *IPSecLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IPSecLink.Marshal(b, m, deterministic) +} +func (dst *IPSecLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPSecLink.Merge(dst, src) +} +func (m *IPSecLink) XXX_Size() int { + return xxx_messageInfo_IPSecLink.Size(m) +} +func (m *IPSecLink) XXX_DiscardUnknown() { + xxx_messageInfo_IPSecLink.DiscardUnknown(m) +} + +var xxx_messageInfo_IPSecLink proto.InternalMessageInfo + +func (m *IPSecLink) GetEsn() bool { + if m != nil { + return m.Esn + } + return false +} + +func (m *IPSecLink) GetAntiReplay() bool { + if m != nil { + return m.AntiReplay + } + return false +} + +func (m *IPSecLink) GetLocalIp() string { + if m != nil { + return m.LocalIp + } + return "" +} + +func (m *IPSecLink) GetRemoteIp() string { + if m != nil { + return m.RemoteIp + } + return "" +} + +func (m *IPSecLink) GetLocalSpi() uint32 { + if m != nil { + return m.LocalSpi + } + return 0 +} + +func (m *IPSecLink) GetRemoteSpi() uint32 { + if m != nil { + return m.RemoteSpi + } + return 0 +} + +func (m *IPSecLink) GetCryptoAlg() IPSecLink_CryptoAlg { + if m != nil { + return m.CryptoAlg + } + return IPSecLink_NONE_CRYPTO +} + +func (m *IPSecLink) GetLocalCryptoKey() string { + if m != nil { + return m.LocalCryptoKey + } + return "" +} + +func (m *IPSecLink) GetRemoteCryptoKey() string { + if m != nil { + return m.RemoteCryptoKey + } + return "" +} + +func (m *IPSecLink) GetIntegAlg() IPSecLink_IntegAlg { + if m != nil { + return m.IntegAlg + } + return IPSecLink_NONE_INTEG +} + +func (m *IPSecLink) GetLocalIntegKey() string { + if m != nil { + return m.LocalIntegKey + } + return "" +} + +func (m *IPSecLink) GetRemoteIntegKey() string { + if m != nil { + return m.RemoteIntegKey + } + return "" +} + +func (m *IPSecLink) GetEnableUdpEncap() bool { + if m != nil { + return m.EnableUdpEncap + } + return false +} + +func (*IPSecLink) XXX_MessageName() string { + return "vpp.interfaces.IPSecLink" +} + +// PCI address (unsigned 32bit int) is derived from vmxnet3 interface name. It is expected that the interface +// name is in format "vmxnet3-///", where 'd' stands for domain (max ffff), 'b' is bus (max ff), +// 's' is slot (max 1f) and 'f is function' (max 7). All values are base 16 +type VmxNet3Link struct { + EnableElog bool `protobuf:"varint,2,opt,name=enable_elog,json=enableElog,proto3" json:"enable_elog,omitempty"` + RxqSize uint32 `protobuf:"varint,3,opt,name=rxq_size,json=rxqSize,proto3" json:"rxq_size,omitempty"` + TxqSize uint32 `protobuf:"varint,4,opt,name=txq_size,json=txqSize,proto3" json:"txq_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VmxNet3Link) Reset() { *m = VmxNet3Link{} } +func (m *VmxNet3Link) String() string { return proto.CompactTextString(m) } +func (*VmxNet3Link) ProtoMessage() {} +func (*VmxNet3Link) Descriptor() ([]byte, []int) { + return fileDescriptor_interface_db722b704b5287c1, []int{7} +} +func (m *VmxNet3Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VmxNet3Link.Unmarshal(m, b) +} +func (m *VmxNet3Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VmxNet3Link.Marshal(b, m, deterministic) +} +func (dst *VmxNet3Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_VmxNet3Link.Merge(dst, src) +} +func (m *VmxNet3Link) XXX_Size() int { + return xxx_messageInfo_VmxNet3Link.Size(m) +} +func (m *VmxNet3Link) XXX_DiscardUnknown() { + xxx_messageInfo_VmxNet3Link.DiscardUnknown(m) +} + +var xxx_messageInfo_VmxNet3Link proto.InternalMessageInfo + +func (m *VmxNet3Link) GetEnableElog() bool { + if m != nil { + return m.EnableElog + } + return false +} + +func (m *VmxNet3Link) GetRxqSize() uint32 { + if m != nil { + return m.RxqSize + } + return 0 +} + +func (m *VmxNet3Link) GetTxqSize() uint32 { + if m != nil { + return m.TxqSize + } + return 0 +} + +func (*VmxNet3Link) XXX_MessageName() string { + return "vpp.interfaces.VmxNet3Link" +} +func init() { + proto.RegisterType((*Interface)(nil), "vpp.interfaces.Interface") + proto.RegisterType((*Interface_Unnumbered)(nil), "vpp.interfaces.Interface.Unnumbered") + proto.RegisterType((*Interface_RxModeSettings)(nil), "vpp.interfaces.Interface.RxModeSettings") + proto.RegisterType((*Interface_RxPlacementSettings)(nil), "vpp.interfaces.Interface.RxPlacementSettings") + proto.RegisterType((*SubInterface)(nil), "vpp.interfaces.SubInterface") + proto.RegisterType((*MemifLink)(nil), "vpp.interfaces.MemifLink") + proto.RegisterType((*VxlanLink)(nil), "vpp.interfaces.VxlanLink") + proto.RegisterType((*AfpacketLink)(nil), "vpp.interfaces.AfpacketLink") + proto.RegisterType((*TapLink)(nil), "vpp.interfaces.TapLink") + proto.RegisterType((*IPSecLink)(nil), "vpp.interfaces.IPSecLink") + proto.RegisterType((*VmxNet3Link)(nil), "vpp.interfaces.VmxNet3Link") + proto.RegisterEnum("vpp.interfaces.Interface_Type", Interface_Type_name, Interface_Type_value) + proto.RegisterEnum("vpp.interfaces.Interface_RxModeSettings_RxModeType", Interface_RxModeSettings_RxModeType_name, Interface_RxModeSettings_RxModeType_value) + proto.RegisterEnum("vpp.interfaces.MemifLink_MemifMode", MemifLink_MemifMode_name, MemifLink_MemifMode_value) + proto.RegisterEnum("vpp.interfaces.IPSecLink_CryptoAlg", IPSecLink_CryptoAlg_name, IPSecLink_CryptoAlg_value) + proto.RegisterEnum("vpp.interfaces.IPSecLink_IntegAlg", IPSecLink_IntegAlg_name, IPSecLink_IntegAlg_value) +} + +func init() { + proto.RegisterFile("models/vpp/interfaces/interface.proto", fileDescriptor_interface_db722b704b5287c1) +} + +var fileDescriptor_interface_db722b704b5287c1 = []byte{ + // 1595 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0xdd, 0x72, 0xdb, 0xc6, + 0x15, 0x16, 0xff, 0x89, 0xc3, 0x1f, 0xc3, 0x9b, 0xba, 0x81, 0x9d, 0x34, 0x51, 0xd9, 0xb4, 0xd5, + 0xb4, 0x63, 0x29, 0xa6, 0xea, 0xd4, 0x69, 0x2f, 0x3a, 0x10, 0x09, 0x45, 0x8c, 0x24, 0x88, 0x05, + 0x41, 0x39, 0xe9, 0x74, 0x66, 0x07, 0x04, 0x96, 0xd4, 0x56, 0x20, 0x00, 0x03, 0x4b, 0x86, 0xca, + 0x4c, 0xdf, 0xa8, 0xbd, 0xe9, 0x53, 0xf4, 0xba, 0x77, 0xbd, 0xee, 0x13, 0xf4, 0x0d, 0x3a, 0xfb, + 0x03, 0x90, 0xb6, 0xa5, 0x4e, 0xee, 0xf6, 0x7c, 0xe7, 0x3b, 0x3f, 0x38, 0x7b, 0xce, 0x59, 0x12, + 0x7e, 0xbe, 0x8c, 0x03, 0x12, 0x66, 0x47, 0xeb, 0x24, 0x39, 0xa2, 0x11, 0x23, 0xe9, 0xdc, 0xf3, + 0x49, 0xb6, 0x3d, 0x1e, 0x26, 0x69, 0xcc, 0x62, 0xd4, 0x5d, 0x27, 0xc9, 0xe1, 0x56, 0xff, 0xec, + 0xf9, 0x82, 0xb2, 0x9b, 0xd5, 0xec, 0xd0, 0x8f, 0x97, 0x47, 0x8b, 0x78, 0x11, 0x1f, 0x09, 0xda, + 0x6c, 0x35, 0x17, 0x92, 0x10, 0xc4, 0x49, 0x9a, 0xf7, 0xfe, 0xde, 0x02, 0x6d, 0x94, 0x5b, 0x23, + 0x04, 0xd5, 0xc8, 0x5b, 0x12, 0xa3, 0xb4, 0x5f, 0x3a, 0xd0, 0x1c, 0x71, 0x46, 0x7d, 0xa8, 0xb2, + 0xbb, 0x84, 0x18, 0xe5, 0xfd, 0xd2, 0x41, 0xb7, 0xff, 0xc9, 0xe1, 0xdb, 0xf1, 0x0e, 0x0b, 0xe3, + 0x43, 0xf7, 0x2e, 0x21, 0x8e, 0xe0, 0x22, 0x03, 0x1a, 0x24, 0xf2, 0x66, 0x21, 0x09, 0x8c, 0xca, + 0x7e, 0xe9, 0xa0, 0xe9, 0xe4, 0x22, 0xfa, 0x29, 0xb4, 0x93, 0x9b, 0xbb, 0x0c, 0x7b, 0x41, 0x90, + 0x92, 0x2c, 0x33, 0xaa, 0x22, 0x52, 0x8b, 0x63, 0xa6, 0x84, 0x38, 0x85, 0x26, 0x39, 0x81, 0x64, + 0x46, 0x6d, 0xbf, 0xc2, 0x29, 0x34, 0x31, 0x73, 0x08, 0xe9, 0x50, 0x59, 0xa7, 0x73, 0xa3, 0xbe, + 0x5f, 0x3a, 0xe8, 0x38, 0xfc, 0x88, 0x7e, 0x01, 0x8f, 0x32, 0xc2, 0x70, 0x70, 0xe3, 0x27, 0xd8, + 0x0f, 0x29, 0x89, 0x98, 0xd1, 0x10, 0x91, 0x3b, 0x19, 0x61, 0xc3, 0x1b, 0x3f, 0x19, 0x08, 0x90, + 0x5b, 0x2e, 0xd9, 0xca, 0x68, 0x4a, 0xcb, 0x25, 0x5b, 0xa1, 0x21, 0xc0, 0x2a, 0x8a, 0x56, 0xcb, + 0x19, 0x49, 0x49, 0x60, 0x68, 0xfb, 0xa5, 0x83, 0x56, 0xff, 0xb3, 0x87, 0xbf, 0x72, 0x5a, 0x70, + 0x9d, 0x1d, 0x3b, 0xe4, 0x80, 0x9e, 0x6e, 0x30, 0xbf, 0x32, 0x9c, 0x11, 0xc6, 0x68, 0xb4, 0xc8, + 0x0c, 0x10, 0xbe, 0x0e, 0x1e, 0xf6, 0xe5, 0x6c, 0x2e, 0xe3, 0x80, 0x4c, 0x14, 0xdf, 0xe9, 0xa6, + 0x6f, 0xc9, 0xc8, 0x83, 0x27, 0xe9, 0x06, 0x27, 0xa1, 0xe7, 0x93, 0x25, 0x89, 0xd8, 0xd6, 0x71, + 0x4b, 0x38, 0x7e, 0xfe, 0xff, 0x1c, 0x8f, 0x73, 0xab, 0xc2, 0xfb, 0x07, 0xe9, 0xfb, 0x20, 0xfa, + 0x1c, 0x2a, 0xd9, 0x6a, 0x66, 0x04, 0xc2, 0xe1, 0xc7, 0xef, 0x3a, 0x9c, 0xac, 0x66, 0x85, 0xcf, + 0xb3, 0x3d, 0x87, 0x53, 0xd1, 0x0b, 0xa8, 0x2d, 0xc9, 0x92, 0xce, 0x0d, 0x22, 0x6c, 0x9e, 0xbe, + 0x6b, 0x73, 0xc9, 0x95, 0x17, 0x34, 0xba, 0x3d, 0xdb, 0x73, 0x24, 0x13, 0xfd, 0x0e, 0x9a, 0xde, + 0x3c, 0xf1, 0xfc, 0x5b, 0xc2, 0x8c, 0xf9, 0xfd, 0x91, 0x4c, 0xa5, 0x57, 0x86, 0x05, 0x1f, 0xfd, + 0x1a, 0x2a, 0xcc, 0x4b, 0x8c, 0x85, 0x30, 0xfb, 0xf0, 0x5d, 0x33, 0xd7, 0x4b, 0x94, 0x05, 0x67, + 0xf1, 0xdc, 0xd6, 0x9b, 0xd0, 0x8b, 0x8c, 0x9b, 0xfb, 0x73, 0xbb, 0xe6, 0xca, 0x3c, 0x37, 0xc1, + 0xe4, 0x26, 0x34, 0xc9, 0x88, 0x6f, 0xd0, 0xfb, 0x4d, 0x46, 0xe3, 0x09, 0xf1, 0x73, 0x13, 0xc1, + 0x44, 0xaf, 0xa0, 0xb9, 0x5e, 0x6e, 0x70, 0x44, 0xd8, 0xb1, 0xf1, 0x17, 0x61, 0xf5, 0xd1, 0x7b, + 0x81, 0x96, 0x1b, 0x9b, 0xb0, 0x63, 0x65, 0xd7, 0x58, 0x4b, 0xf1, 0xd9, 0x2b, 0x80, 0x6d, 0xfb, + 0xa0, 0x5f, 0xc1, 0xe3, 0xc2, 0x04, 0x7f, 0x47, 0xd9, 0x0d, 0xa6, 0x89, 0x9a, 0xbc, 0x47, 0x85, + 0xe2, 0x35, 0x65, 0x37, 0xa3, 0xe4, 0xd9, 0x7f, 0x4b, 0xd0, 0x7d, 0xbb, 0x5b, 0xd0, 0x05, 0x34, + 0x54, 0xc7, 0x09, 0xa3, 0x6e, 0xff, 0xf8, 0x87, 0x36, 0x9a, 0x12, 0xc5, 0xbc, 0xd6, 0x65, 0xcf, + 0xa1, 0xa7, 0xd0, 0x7c, 0xb3, 0x22, 0x2b, 0x82, 0x69, 0x20, 0x26, 0xbd, 0xe3, 0x34, 0x84, 0x3c, + 0x0a, 0xd0, 0x67, 0xd0, 0xcd, 0x55, 0x78, 0xed, 0x85, 0x54, 0xce, 0x74, 0xc7, 0x69, 0x2b, 0xc2, + 0x35, 0xc7, 0x7a, 0x63, 0x80, 0xad, 0x5b, 0xd4, 0x82, 0xc6, 0xd4, 0x3e, 0xb7, 0xaf, 0x5e, 0xdb, + 0xfa, 0x1e, 0x17, 0xc6, 0x57, 0x17, 0x17, 0x23, 0xfb, 0x2b, 0xbd, 0x84, 0x3a, 0xa0, 0x8d, 0x6c, + 0xd7, 0x72, 0x9c, 0xe9, 0xd8, 0xd5, 0xcb, 0xa8, 0x0d, 0x4d, 0x73, 0x68, 0x8e, 0xdd, 0xd1, 0xb5, + 0xa5, 0x57, 0x38, 0x73, 0x68, 0x9d, 0x9a, 0xd3, 0x0b, 0x57, 0xaf, 0x3e, 0xfb, 0x33, 0x7c, 0x70, + 0x4f, 0x1f, 0xa3, 0x1f, 0x41, 0x4d, 0x04, 0x16, 0x5f, 0xdd, 0x71, 0xa4, 0x80, 0x7e, 0x0c, 0xf5, + 0xef, 0xe2, 0xf4, 0x96, 0xa4, 0x2a, 0x7b, 0x25, 0xa1, 0x0f, 0xa1, 0x41, 0x33, 0xbc, 0xf4, 0x68, + 0xa4, 0x36, 0x51, 0x9d, 0x66, 0x97, 0x1e, 0x8d, 0x7a, 0x7f, 0x2b, 0x41, 0x55, 0xa4, 0x8a, 0xa0, + 0x3b, 0xb5, 0x87, 0xd6, 0xe9, 0xc8, 0xb6, 0x86, 0xd8, 0xfd, 0x76, 0x6c, 0xe9, 0x7b, 0xe8, 0x31, + 0x74, 0x26, 0xd3, 0x13, 0x2c, 0x12, 0x3d, 0x35, 0x07, 0x96, 0x5e, 0x42, 0x4f, 0xe0, 0xf1, 0xe4, + 0xea, 0xd4, 0x7d, 0x6d, 0x3a, 0x16, 0xbe, 0xb8, 0xba, 0x1a, 0x9f, 0x98, 0x83, 0x73, 0xbd, 0x8c, + 0x9a, 0x50, 0x1d, 0x8e, 0x87, 0xe7, 0x7a, 0x05, 0x69, 0x50, 0xbb, 0xb4, 0x2e, 0x47, 0xa7, 0x7a, + 0x15, 0x35, 0xa0, 0xe2, 0x9a, 0x63, 0xbd, 0xc6, 0x3f, 0xd6, 0x3c, 0xc5, 0x63, 0x73, 0x70, 0x6e, + 0xb9, 0x7a, 0x1d, 0xe9, 0xd0, 0xbe, 0xfe, 0xe6, 0xc2, 0xb4, 0xb1, 0x3b, 0xb5, 0x6d, 0xeb, 0x42, + 0x6f, 0x70, 0x64, 0x34, 0x9e, 0x58, 0x83, 0x1c, 0x69, 0xf2, 0x38, 0xd7, 0x97, 0xdf, 0xd8, 0x96, + 0x7b, 0xbc, 0x13, 0x5e, 0x3b, 0xa9, 0x43, 0x35, 0xa4, 0xd1, 0x6d, 0xef, 0x14, 0xda, 0xbb, 0x53, + 0x89, 0x3e, 0x85, 0x56, 0xe2, 0xa5, 0x7c, 0x3b, 0xec, 0x2c, 0x6e, 0x90, 0x90, 0xcd, 0xd7, 0xf7, + 0x13, 0xa8, 0x67, 0xab, 0xd9, 0xf6, 0x5a, 0x6b, 0xd9, 0x6a, 0x36, 0x0a, 0x7a, 0xff, 0x2e, 0x83, + 0x56, 0x8c, 0x2a, 0xfa, 0x2d, 0x54, 0x77, 0x1a, 0xe9, 0x67, 0x0f, 0xce, 0xb4, 0x3c, 0xf1, 0x5b, + 0x76, 0x84, 0x01, 0x2f, 0xfb, 0xd2, 0xcb, 0x98, 0x2a, 0x7b, 0xd3, 0x51, 0x12, 0xea, 0x42, 0xb9, + 0xe8, 0x93, 0x32, 0x0d, 0xd0, 0x2f, 0xe1, 0x51, 0x16, 0xf3, 0x81, 0xc6, 0x73, 0x1a, 0x12, 0x91, + 0xaa, 0xdc, 0xfc, 0x5d, 0x09, 0x9f, 0x2a, 0x94, 0x3b, 0xcc, 0x88, 0x9f, 0x12, 0x66, 0xd4, 0x84, + 0x5e, 0x49, 0xe8, 0x23, 0xd0, 0x52, 0x1a, 0x2d, 0x70, 0x46, 0xbf, 0x27, 0x6a, 0xef, 0x37, 0x39, + 0x30, 0xa1, 0xdf, 0x8b, 0x22, 0xcc, 0x56, 0xf3, 0x39, 0x49, 0xa5, 0xba, 0x21, 0xd4, 0x20, 0x21, + 0x41, 0xe0, 0xd6, 0x1b, 0x2c, 0x3a, 0x25, 0x53, 0xbb, 0xbf, 0x99, 0x6e, 0xfe, 0x28, 0x64, 0xae, + 0x64, 0x85, 0x52, 0x93, 0x4a, 0xa6, 0x94, 0xbd, 0xbe, 0x2a, 0x93, 0x18, 0x92, 0x36, 0x34, 0x2d, + 0xf7, 0xcc, 0x72, 0x6c, 0xcb, 0xd5, 0xf7, 0x50, 0x1d, 0xca, 0xa3, 0xb1, 0x5e, 0x42, 0x8f, 0xa0, + 0x35, 0x9e, 0xda, 0x2e, 0x1e, 0xd9, 0x5f, 0x5b, 0x03, 0x57, 0x2f, 0xf7, 0xfe, 0x0a, 0x5a, 0xb1, + 0x69, 0x78, 0x6e, 0x59, 0xea, 0x17, 0xef, 0x9d, 0xba, 0xa0, 0x2c, 0xf5, 0xf3, 0xe7, 0xee, 0x53, + 0x68, 0x05, 0x19, 0x2b, 0x08, 0x65, 0x49, 0x08, 0x32, 0x96, 0x13, 0xf8, 0x63, 0x17, 0x51, 0x55, + 0x4c, 0x7e, 0x44, 0x1f, 0x83, 0xb6, 0x5c, 0x85, 0x8c, 0xfa, 0x5e, 0xc6, 0x54, 0x1d, 0xb7, 0x40, + 0xef, 0x73, 0x68, 0xef, 0xae, 0x53, 0xb4, 0x0f, 0xed, 0x9b, 0x38, 0x63, 0x98, 0xce, 0xdf, 0xea, + 0x11, 0x8e, 0x8d, 0xe6, 0xbc, 0x47, 0x7a, 0xff, 0x28, 0x41, 0x43, 0xad, 0x52, 0xfe, 0x74, 0xaf, + 0x49, 0x9a, 0xd1, 0x38, 0x52, 0x03, 0x96, 0x8b, 0xef, 0xf9, 0x29, 0xbf, 0xeb, 0x87, 0xdf, 0x32, + 0x8b, 0xf1, 0x92, 0xfa, 0x69, 0x9c, 0x91, 0x74, 0x4d, 0x7d, 0x22, 0xb2, 0xd6, 0x9c, 0x2e, 0x8b, + 0x2f, 0x77, 0x50, 0xee, 0x2a, 0xdd, 0xe0, 0xed, 0x85, 0x56, 0xe5, 0x8d, 0xa5, 0x1b, 0x27, 0xbf, + 0xd2, 0x7d, 0x68, 0xb3, 0x5d, 0x46, 0x4d, 0x32, 0x58, 0xc1, 0xe8, 0xfd, 0xab, 0x06, 0x5a, 0xb1, + 0x9d, 0x79, 0x91, 0x48, 0x16, 0xa9, 0x2e, 0xe4, 0x47, 0x5e, 0x57, 0x2f, 0x62, 0x14, 0xa7, 0x24, + 0x09, 0xbd, 0x3b, 0x35, 0xfd, 0xc0, 0x21, 0x47, 0x20, 0x7c, 0xe5, 0x85, 0xb1, 0xef, 0x85, 0x7c, + 0xed, 0xca, 0x22, 0x36, 0x84, 0x3c, 0x4a, 0x44, 0xbf, 0x90, 0x65, 0xcc, 0x08, 0xd7, 0xc9, 0x46, + 0x6c, 0x4a, 0x40, 0x2a, 0xa5, 0x5d, 0x96, 0xd0, 0xbc, 0x15, 0x05, 0x30, 0x49, 0x28, 0xfa, 0x09, + 0x80, 0xb2, 0xe4, 0x5a, 0xd9, 0x89, 0xca, 0x17, 0x57, 0x9f, 0x00, 0xf8, 0xe9, 0x5d, 0xc2, 0x62, + 0xec, 0x85, 0x0b, 0xd1, 0x89, 0xf7, 0x8c, 0x5b, 0xf1, 0x55, 0x87, 0x03, 0xc1, 0x35, 0xc3, 0x85, + 0xa3, 0xf9, 0xf9, 0x11, 0x1d, 0x80, 0x2e, 0xe3, 0x2b, 0x4f, 0xb7, 0xe4, 0x4e, 0xb4, 0xad, 0xe6, + 0x74, 0x05, 0x2e, 0x8d, 0xce, 0xc9, 0x1d, 0x7f, 0x61, 0x54, 0x32, 0x3b, 0x54, 0x90, 0x2f, 0x8c, + 0x54, 0x6c, 0xb9, 0x7f, 0x00, 0x8d, 0xa7, 0xb0, 0x10, 0x89, 0xb5, 0x44, 0x62, 0xbd, 0x87, 0x13, + 0xe3, 0x0b, 0x68, 0xc1, 0xf3, 0x6a, 0x52, 0x75, 0xe2, 0xbf, 0xc0, 0x54, 0x39, 0x85, 0x1b, 0x1e, + 0xaa, 0x2d, 0x42, 0x75, 0x64, 0x55, 0x39, 0xca, 0x03, 0x1d, 0x80, 0x9e, 0xd7, 0xb6, 0x20, 0x76, + 0x64, 0xfa, 0xaa, 0xc4, 0x3b, 0x4c, 0xf9, 0xb3, 0x11, 0xaf, 0x82, 0x04, 0x93, 0xc8, 0xf7, 0x12, + 0xa3, 0x2b, 0xae, 0xb1, 0x2b, 0xf1, 0x69, 0x90, 0x58, 0x1c, 0xed, 0x5d, 0x81, 0x56, 0x94, 0x8a, + 0xcf, 0xa3, 0x7d, 0x65, 0x5b, 0x78, 0xe0, 0x7c, 0x3b, 0x76, 0xaf, 0xf4, 0x3d, 0x0e, 0x98, 0xd6, + 0x04, 0x0f, 0x4e, 0x06, 0xf8, 0x45, 0xff, 0x95, 0x9c, 0xd8, 0x02, 0xf8, 0xb2, 0xaf, 0x97, 0x77, + 0x81, 0xfe, 0xcb, 0x2f, 0xf4, 0x4a, 0x6f, 0x0d, 0xcd, 0xfc, 0x13, 0x51, 0x17, 0x40, 0xf8, 0xe3, + 0xeb, 0xf8, 0x2b, 0x7d, 0x0f, 0x01, 0xd4, 0x2f, 0x87, 0x2f, 0xf1, 0x97, 0x5f, 0xe8, 0x25, 0xfe, + 0x60, 0x4d, 0xce, 0xcc, 0x17, 0x5c, 0x28, 0x73, 0xe2, 0xe4, 0xcc, 0xe4, 0x1e, 0xb8, 0x5c, 0xe1, + 0x5e, 0x73, 0x99, 0xc7, 0xad, 0xe6, 0xc0, 0xf1, 0xab, 0xdf, 0x88, 0xb8, 0xb5, 0x1c, 0x78, 0xf9, + 0xa2, 0x2f, 0xe2, 0xd6, 0x7b, 0x01, 0xb4, 0x76, 0x7e, 0x3b, 0xf0, 0x1e, 0x56, 0x15, 0x20, 0x61, + 0xbc, 0x50, 0xdd, 0x0d, 0x12, 0xb2, 0xc2, 0x78, 0xc1, 0x7b, 0x38, 0xdd, 0xbc, 0x91, 0x23, 0x22, + 0x17, 0x44, 0x23, 0xdd, 0xbc, 0x11, 0x13, 0xf4, 0x14, 0x9a, 0x2c, 0x57, 0xc9, 0xf9, 0x6a, 0x30, + 0xa9, 0x3a, 0xf9, 0xfa, 0x9f, 0xff, 0xf9, 0xa4, 0xf4, 0xa7, 0xe1, 0xce, 0x3f, 0x85, 0x90, 0x2e, + 0x3c, 0x16, 0xf3, 0xff, 0x1a, 0xcf, 0xbd, 0x05, 0x89, 0xd8, 0x91, 0x97, 0xd0, 0xa3, 0x7b, 0xff, + 0x80, 0xfc, 0x7e, 0x9d, 0x24, 0x78, 0x2b, 0xce, 0xea, 0xe2, 0x7f, 0xc4, 0xf1, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x01, 0xa5, 0xf3, 0x7b, 0xaf, 0x0c, 0x00, 0x00, +} diff --git a/api/models/vpp/interfaces/interface.proto b/api/models/vpp/interfaces/interface.proto new file mode 100644 index 0000000000..f749f86f7c --- /dev/null +++ b/api/models/vpp/interfaces/interface.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package vpp.interfaces; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/interfaces;vpp_interfaces"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message Interface { + enum Type { + UNDEFINED_TYPE = 0; + SUB_INTERFACE = 1; + SOFTWARE_LOOPBACK = 2; + DPDK = 3; + MEMIF = 4; + TAP = 5; + AF_PACKET = 6; + VXLAN_TUNNEL = 7; + IPSEC_TUNNEL = 8; + VMXNET3_INTERFACE = 9; + }; + + string name = 1; /* Logical interface name unique across all configured interfaces (mandatory) */ + Type type = 2; + bool enabled = 3; + string phys_address = 4; + repeated string ip_addresses = 5; /* IP addresses in the format / */ + uint32 vrf = 6; + bool set_dhcp_client = 7; + uint32 mtu = 8; + + message Unnumbered { + string interface_with_ip = 1; + } + Unnumbered unnumbered = 9; + + message RxModeSettings { + // from vpp/build-root/install-vpp-native/vpp/include/vnet/interface.h + enum RxModeType { + UNKNOWN = 0; + POLLING = 1; + INTERRUPT = 2; + ADAPTIVE = 3; + DEFAULT = 4; + }; + RxModeType rx_mode = 1; + uint32 queue_id = 2; + uint32 queue_id_valid = 3; + } + RxModeSettings rx_mode_settings = 10; + + message RxPlacementSettings { + uint32 queue = 1; + uint32 worker = 2; + bool is_main = 3; + } + RxPlacementSettings rx_placement_settings = 11; + + oneof link { + SubInterface sub = 100; /* sub-interface configuration */ + MemifLink memif = 101; /* memif-specific configuration */ + AfpacketLink afpacket = 102; /* AF-packet-specific configuration */ + TapLink tap = 103; /* TAP-specific configuration */ + VxlanLink vxlan = 104; /* VXLAN-specific configuration */ + IPSecLink ipsec = 105; /* IPSec tunnel-specific configuration */ + VmxNet3Link vmx_net3 = 106; /* VmxNet3-specific configuration */ + }; +}; + +message SubInterface { + string parent_name = 1; /* Name of the parent (super) interface */ + uint32 sub_id = 2; /* SubInterface ID, used as VLAN */ +} + +message MemifLink { + enum MemifMode { + ETHERNET = 0; + IP = 1; + PUNT_INJECT = 2; + } + MemifMode mode = 1; + bool master = 2; + uint32 id = 3; /* 32bit integer used to authenticate and match opposite sides of the connection */ + string socket_filename = 4; /* filename of the socket used for connection establishment */ + string secret = 5; + uint32 ring_size = 6; /* the number of entries of RX/TX rings */ + uint32 buffer_size = 7; /* size of the buffer allocated for each ring entry */ + uint32 rx_queues = 8; /* number of rx queues (only valid for slave) */ + uint32 tx_queues = 9; /* number of tx queues (only valid for slave) */ +} + +message VxlanLink { + string src_address = 1; /* source VTEP address */ + string dst_address = 2; /* destination VTEP address */ + uint32 vni = 3; /* VXLAN Network Identifier */ + string multicast = 4; /* multicast interface */ +} + +message AfpacketLink { + string host_if_name = 1; /* name of the host interface to bind to (mandatory) */ +} + +message TapLink { + uint32 version = 1; /* 1 / unset = use the original TAP interface; 2 = use a fast virtio-based TAP */ + string host_if_name = 2; /* name of the TAP interface in the host OS; + if empty, it will be auto-generated (suitable for combination with TAP_TO_VPP + interface from Linux ifplugin, because then this name is only temporary anyway) + */ + string to_microservice = 3; /* if TAP connects VPP with microservice, fill this parameter with the target + microservice name - should match with the namespace reference of the associated + TAP_TO_VPP interface (it is still moved to the namespace by Linux-ifplugin but + VPP-ifplugin needs to be aware of this dependency) */ + uint32 rx_ring_size = 4; /* Rx ring buffer size; must be power of 2; default is 256; only for TAP v.2 */ + uint32 tx_ring_size = 5; /* Tx ring buffer size; must be power of 2; default is 256; only for TAP v.2 */ +} + +message IPSecLink { + bool esn = 2; /* Extended sequence number */ + bool anti_replay = 3; /* Anti replay option */ + string local_ip = 4; /* Local IP address */ + string remote_ip = 5; /* Remote IP address */ + uint32 local_spi = 6; /* Local security parameter index */ + uint32 remote_spi = 7; /* Remote security parameter index */ + + enum CryptoAlg { + NONE_CRYPTO = 0; + AES_CBC_128 = 1; + AES_CBC_192 = 2; + AES_CBC_256 = 3; + } + CryptoAlg crypto_alg = 8; /* Cryptographic algorithm for encryption */ + string local_crypto_key = 9; + string remote_crypto_key = 10; + + enum IntegAlg { + NONE_INTEG = 0; + MD5_96 = 1; + SHA1_96 = 2; + SHA_256_96 = 3; + SHA_256_128 = 4; + SHA_384_192 = 5; + SHA_512_256 = 6; + } + IntegAlg integ_alg = 11; /* Cryptographic algorithm for authentication */ + string local_integ_key = 12; + string remote_integ_key = 13; + bool enable_udp_encap = 14; /* Enable UDP encapsulation */ +} + +/* PCI address (unsigned 32bit int) is derived from vmxnet3 interface name. It is expected that the interface + name is in format "vmxnet3-///", where 'd' stands for domain (max ffff), 'b' is bus (max ff), + 's' is slot (max 1f) and 'f is function' (max 7). All values are base 16 */ +message VmxNet3Link { + bool enable_elog = 2; /* turn on elog */ + uint32 rxq_size = 3; /* receive queue size (default is 1024) */ + uint32 txq_size = 4; /* transmit queue size (default is 1024) */ +} \ No newline at end of file diff --git a/api/models/vpp/interfaces/keys.go b/api/models/vpp/interfaces/keys.go new file mode 100644 index 0000000000..f015252042 --- /dev/null +++ b/api/models/vpp/interfaces/keys.go @@ -0,0 +1,231 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_interfaces + +import ( + "net" + "strings" + + "github.com/gogo/protobuf/jsonpb" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp" + +var ( + ModelInterface = models.Register(&Interface{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "interfaces", + }) +) + +// InterfaceKey returns the key used in NB DB to store the configuration of the +// given vpp interface. +func InterfaceKey(name string) string { + return models.Key(&Interface{ + Name: name, + }) +} + +/* Interface State */ +const ( + // StatePrefix is a key prefix used in NB DB to store interface states. + StatePrefix = "vpp/status/v2/interface/" +) + +/* Interface Error */ +const ( + // ErrorPrefix is a key prefix used in NB DB to store interface errors. + ErrorPrefix = "vpp/status/v2/interface/error/" +) + +/* Interface Address (derived) */ +const ( + // AddressKeyPrefix is used as a common prefix for keys derived from + // interfaces to represent assigned IP addresses. + AddressKeyPrefix = "vpp/interface/address/" + + // addressKeyTemplate is a template for (derived) key representing IP address + // (incl. mask) assigned to a VPP interface. + addressKeyTemplate = AddressKeyPrefix + "{iface}/{addr}/{mask}" +) + +/* Unnumbered interface (derived) */ +const ( + // UnnumberedKeyPrefix is used as a common prefix for keys derived from + // interfaces to represent unnumbered interfaces. + UnnumberedKeyPrefix = "vpp/interface/unnumbered/" +) + +/* DHCP (client - derived, lease - notification) */ +const ( + // DHCPClientKeyPrefix is used as a common prefix for keys derived from + // interfaces to represent enabled DHCP clients. + DHCPClientKeyPrefix = "vpp/interface/dhcp-client/" + + // DHCPLeaseKeyPrefix is used as a common prefix for keys representing + // notifications with DHCP leases. + DHCPLeaseKeyPrefix = "vpp/interface/dhcp-lease/" +) + +const ( + // InvalidKeyPart is used in key for parts which are invalid + InvalidKeyPart = "" +) + +/* Interface Error */ + +// InterfaceErrorKey returns the key used in NB DB to store the interface errors. +func InterfaceErrorKey(iface string) string { + if iface == "" { + iface = InvalidKeyPart + } + return ErrorPrefix + iface +} + +/* Interface State */ + +// InterfaceStateKey returns the key used in NB DB to store the state data of the +// given vpp interface. +func InterfaceStateKey(iface string) string { + if iface == "" { + iface = InvalidKeyPart + } + return StatePrefix + iface +} + +/* Interface Address (derived) */ + +// InterfaceAddressKey returns key representing IP address assigned to VPP interface. +func InterfaceAddressKey(iface string, address string) string { + if iface == "" { + iface = InvalidKeyPart + } + + // parse address + ipAddr, addrNet, err := net.ParseCIDR(address) + if err != nil { + address = InvalidKeyPart + "/" + InvalidKeyPart + } else { + addrNet.IP = ipAddr + address = addrNet.String() + } + + key := strings.Replace(addressKeyTemplate, "{iface}", iface, 1) + key = strings.Replace(key, "{addr}/{mask}", address, 1) + + return key +} + +// ParseInterfaceAddressKey parses interface address from key derived +// from interface by InterfaceAddressKey(). +func ParseInterfaceAddressKey(key string) (iface string, ipAddr net.IP, ipAddrNet *net.IPNet, isAddrKey bool) { + if suffix := strings.TrimPrefix(key, AddressKeyPrefix); suffix != key { + parts := strings.Split(suffix, "/") + + // beware: interface name may contain forward slashes (e.g. ETHERNET_CSMACD) + if len(parts) < 3 { + return "", nil, nil, false + } + + // parse IP address + lastIdx := len(parts) - 1 + var err error + ipAddr, ipAddrNet, err = net.ParseCIDR(parts[lastIdx-1] + "/" + parts[lastIdx]) + if err != nil { + return "", nil, nil, false + } + + // parse interface name + iface = strings.Join(parts[:lastIdx-1], "/") + if iface == "" { + return "", nil, nil, false + } + return iface, ipAddr, ipAddrNet, true + } + return +} + +/* Unnumbered interface (derived) */ + +// UnnumberedKey returns key representing unnumbered interface. +func UnnumberedKey(iface string) string { + if iface == "" { + iface = InvalidKeyPart + } + return UnnumberedKeyPrefix + iface +} + +// ParseNameFromUnnumberedKey returns suffix of the key. +func ParseNameFromUnnumberedKey(key string) (iface string, isUnnumberedKey bool) { + suffix := strings.TrimPrefix(key, UnnumberedKeyPrefix) + if suffix != key && suffix != "" { + return suffix, true + } + return +} + +/* DHCP (client - derived, lease - notification) */ + +// DHCPClientKey returns a (derived) key used to represent enabled DHCP lease. +func DHCPClientKey(iface string) string { + if iface == "" { + iface = InvalidKeyPart + } + return DHCPClientKeyPrefix + iface +} + +// ParseNameFromDHCPClientKey returns suffix of the key. +func ParseNameFromDHCPClientKey(key string) (iface string, isDHCPClientKey bool) { + if suffix := strings.TrimPrefix(key, DHCPClientKeyPrefix); suffix != key && suffix != "" { + return suffix, true + } + return +} + +// DHCPLeaseKey returns a key used to represent DHCP lease for the given interface. +func DHCPLeaseKey(iface string) string { + if iface == "" { + iface = InvalidKeyPart + } + return DHCPLeaseKeyPrefix + iface +} + +// ParseNameFromDHCPLeaseKey returns suffix of the key. +func ParseNameFromDHCPLeaseKey(key string) (iface string, isDHCPLeaseKey bool) { + if suffix := strings.TrimPrefix(key, DHCPLeaseKeyPrefix); suffix != key && suffix != "" { + return suffix, true + } + return +} + +// MarshalJSON ensures that field of type 'oneOf' is correctly marshaled +// by using gogo lib marshaller +func (m *Interface) MarshalJSON() ([]byte, error) { + marshaller := &jsonpb.Marshaler{} + str, err := marshaller.MarshalToString(m) + if err != nil { + return nil, err + } + return []byte(str), nil +} + +// UnmarshalJSON ensures that field of type 'oneOf' is correctly unmarshaled +func (m *Interface) UnmarshalJSON(data []byte) error { + return jsonpb.UnmarshalString(string(data), m) +} diff --git a/api/models/vpp/interfaces/keys_test.go b/api/models/vpp/interfaces/keys_test.go new file mode 100644 index 0000000000..520f527e06 --- /dev/null +++ b/api/models/vpp/interfaces/keys_test.go @@ -0,0 +1,475 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_interfaces + +import ( + "testing" +) + +/*func TestInterfaceKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/config/v2/interface/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/config/v2/interface/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/config/v2/interface/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := InterfaceKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} + +func TestParseNameFromKey(t *testing.T) { + tests := []struct { + name string + key string + expectedIface string + expectedIsIfaceKey bool + }{ + { + name: "valid interface name", + key: "vpp/config/v2/interface/memif0", + expectedIface: "memif0", + expectedIsIfaceKey: true, + }, + { + name: "invalid interface name", + key: "vpp/config/v2/interface/", + expectedIface: "", + expectedIsIfaceKey: true, + }, + { + name: "Gbe interface", + key: "vpp/config/v2/interface/GigabitEthernet0/8/0", + expectedIface: "GigabitEthernet0/8/0", + expectedIsIfaceKey: true, + }, + { + name: "not an interface key", + key: "vpp/config/v2/bd/bd1", + expectedIface: "", + expectedIsIfaceKey: false, + }, + { + name: "not an interface key (empty interface)", + key: "vpp/config/v2/interface/", + expectedIface: "", + expectedIsIfaceKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + iface, isInterfaceKey := models.Model(&Interface{}).ParseKey(test.key) + if isInterfaceKey != test.expectedIsIfaceKey { + t.Errorf("expected isInterfaceKey: %v\tgot: %v", test.expectedIsIfaceKey, isInterfaceKey) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + }) + } +}*/ + +func TestInterfaceErrorKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/status/v2/interface/error/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/status/v2/interface/error/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/status/v2/interface/error/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := InterfaceErrorKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} + +func TestInterfaceStateKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/status/v2/interface/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/status/v2/interface/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/status/v2/interface/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := InterfaceStateKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} + +func TestInterfaceAddressKey(t *testing.T) { + tests := []struct { + name string + iface string + address string + expectedKey string + }{ + { + name: "IPv4 address", + iface: "memif0", + address: "192.168.1.12/24", + expectedKey: "vpp/interface/address/memif0/192.168.1.12/24", + }, + { + name: "IPv6 address", + iface: "memif0", + address: "2001:db8:0000:0000:0000:0000:0000:0000/32", + expectedKey: "vpp/interface/address/memif0/2001:db8::/32", + }, + { + name: "invalid interface", + iface: "", + address: "10.10.10.10/32", + expectedKey: "vpp/interface/address//10.10.10.10/32", + }, + { + name: "invalid address", + iface: "tap0", + address: "invalid-addr", + expectedKey: "vpp/interface/address/tap0//", + }, + { + name: "missing mask", + iface: "tap1", + address: "10.10.10.10", + expectedKey: "vpp/interface/address/tap1//", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := InterfaceAddressKey(test.iface, test.address) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s address=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.address, test.expectedKey, key) + } + }) + } +} + +func TestParseInterfaceAddressKey(t *testing.T) { + tests := []struct { + name string + key string + expectedIface string + expectedIfaceAddr string + expectedIfaceAddrNet string + expectedIsAddrKey bool + }{ + { + name: "IPv4 address", + key: "vpp/interface/address/memif0/192.168.1.12/24", + expectedIface: "memif0", + expectedIfaceAddr: "192.168.1.12", + expectedIfaceAddrNet: "192.168.1.0/24", + expectedIsAddrKey: true, + }, + { + name: "IPv6 address", + key: "vpp/interface/address/tap1/2001:db8:85a3::8a2e:370:7334/48", + expectedIface: "tap1", + expectedIfaceAddr: "2001:db8:85a3::8a2e:370:7334", + expectedIfaceAddrNet: "2001:db8:85a3::/48", + expectedIsAddrKey: true, + }, + { + name: "invalid interface", + key: "vpp/interface/address//10.10.10.10/30", + expectedIface: "", + expectedIfaceAddr: "10.10.10.10", + expectedIfaceAddrNet: "10.10.10.8/30", + expectedIsAddrKey: true, + }, + { + name: "gbe interface", + key: "vpp/interface/address/GigabitEthernet0/8/0/192.168.5.5/16", + expectedIface: "GigabitEthernet0/8/0", + expectedIfaceAddr: "192.168.5.5", + expectedIfaceAddrNet: "192.168.0.0/16", + expectedIsAddrKey: true, + }, + { + name: "not valid key (missing interface)", + key: "vpp/interface/address//192.168.5.5/16", + expectedIface: "", + expectedIfaceAddr: "", + expectedIfaceAddrNet: "", + expectedIsAddrKey: false, + }, + { + name: "not valid key (missing mask)", + key: "vpp/interface/address/tap3/192.168.5.5", + expectedIface: "", + expectedIfaceAddr: "", + expectedIfaceAddrNet: "", + expectedIsAddrKey: false, + }, + { + name: "not valid key (missing address and mask)", + key: "vpp/interface/address/tap3", + expectedIface: "", + expectedIfaceAddr: "", + expectedIfaceAddrNet: "", + expectedIsAddrKey: false, + }, + { + name: "not interface address key", + key: "vpp/config/v2/interface/GigabitEthernet0/8/0", + expectedIface: "", + expectedIfaceAddr: "", + expectedIfaceAddrNet: "", + expectedIsAddrKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + iface, ipAddr, ipAddrNet, isAddrKey := ParseInterfaceAddressKey(test.key) + var ipAddrStr, ipAddrNetStr string + if ipAddr != nil { + ipAddrStr = ipAddr.String() + } + if ipAddrNet != nil { + ipAddrNetStr = ipAddrNet.String() + } + if isAddrKey != test.expectedIsAddrKey { + t.Errorf("expected isAddrKey: %v\tgot: %v", test.expectedIsAddrKey, isAddrKey) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + if ipAddrStr != test.expectedIfaceAddr { + t.Errorf("expected ipAddr: %s\tgot: %s", test.expectedIface, ipAddrStr) + } + if ipAddrNetStr != test.expectedIfaceAddrNet { + t.Errorf("expected ipAddrNet: %s\tgot: %s", test.expectedIfaceAddrNet, ipAddrNetStr) + } + }) + } +} + +func TestUnnumberedKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/interface/unnumbered/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/interface/unnumbered/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/interface/unnumbered/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := UnnumberedKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} + +func TestDHCPClientKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/interface/dhcp-client/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/interface/dhcp-client/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/interface/dhcp-client/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := DHCPClientKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} + +func TestParseNameFromDHCPClientKey(t *testing.T) { + tests := []struct { + name string + key string + expectedIface string + expectedIsDHCPClientKey bool + }{ + { + name: "valid interface name", + key: "vpp/interface/dhcp-client/memif0", + expectedIface: "memif0", + expectedIsDHCPClientKey: true, + }, + { + name: "invalid interface name", + key: "vpp/interface/dhcp-client/", + expectedIface: "", + expectedIsDHCPClientKey: true, + }, + { + name: "Gbe interface", + key: "vpp/interface/dhcp-client/GigabitEthernet0/8/0", + expectedIface: "GigabitEthernet0/8/0", + expectedIsDHCPClientKey: true, + }, + { + name: "not DHCP client key", + key: "vpp/config/v2/bd/bd1", + expectedIface: "", + expectedIsDHCPClientKey: false, + }, + { + name: "not DHCP client key (empty interface)", + key: "vpp/interface/dhcp-client/", + expectedIface: "", + expectedIsDHCPClientKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + iface, isDHCPClientKey := ParseNameFromDHCPClientKey(test.key) + if isDHCPClientKey != test.expectedIsDHCPClientKey { + t.Errorf("expected isInterfaceKey: %v\tgot: %v", test.expectedIsDHCPClientKey, isDHCPClientKey) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + }) + } +} + +func TestDHCPLeaseKey(t *testing.T) { + tests := []struct { + name string + iface string + expectedKey string + }{ + { + name: "valid interface name", + iface: "memif0", + expectedKey: "vpp/interface/dhcp-lease/memif0", + }, + { + name: "invalid interface name", + iface: "", + expectedKey: "vpp/interface/dhcp-lease/", + }, + { + name: "Gbe interface", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/interface/dhcp-lease/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := DHCPLeaseKey(test.iface) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.expectedKey, key) + } + }) + } +} diff --git a/api/models/vpp/interfaces/state.pb.go b/api/models/vpp/interfaces/state.pb.go new file mode 100644 index 0000000000..cd5ed4d329 --- /dev/null +++ b/api/models/vpp/interfaces/state.pb.go @@ -0,0 +1,474 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/interfaces/state.proto + +package vpp_interfaces // import "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type InterfaceState_Status int32 + +const ( + InterfaceState_UNKNOWN_STATUS InterfaceState_Status = 0 + InterfaceState_UP InterfaceState_Status = 1 + InterfaceState_DOWN InterfaceState_Status = 2 + InterfaceState_DELETED InterfaceState_Status = 3 +) + +var InterfaceState_Status_name = map[int32]string{ + 0: "UNKNOWN_STATUS", + 1: "UP", + 2: "DOWN", + 3: "DELETED", +} +var InterfaceState_Status_value = map[string]int32{ + "UNKNOWN_STATUS": 0, + "UP": 1, + "DOWN": 2, + "DELETED": 3, +} + +func (x InterfaceState_Status) String() string { + return proto.EnumName(InterfaceState_Status_name, int32(x)) +} +func (InterfaceState_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{0, 0} +} + +type InterfaceState_Duplex int32 + +const ( + InterfaceState_UNKNOWN_DUPLEX InterfaceState_Duplex = 0 + InterfaceState_HALF InterfaceState_Duplex = 1 + InterfaceState_FULL InterfaceState_Duplex = 2 +) + +var InterfaceState_Duplex_name = map[int32]string{ + 0: "UNKNOWN_DUPLEX", + 1: "HALF", + 2: "FULL", +} +var InterfaceState_Duplex_value = map[string]int32{ + "UNKNOWN_DUPLEX": 0, + "HALF": 1, + "FULL": 2, +} + +func (x InterfaceState_Duplex) String() string { + return proto.EnumName(InterfaceState_Duplex_name, int32(x)) +} +func (InterfaceState_Duplex) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{0, 1} +} + +type InterfaceNotification_NotifType int32 + +const ( + InterfaceNotification_UNKNOWN InterfaceNotification_NotifType = 0 + InterfaceNotification_UPDOWN InterfaceNotification_NotifType = 1 + InterfaceNotification_COUNTERS InterfaceNotification_NotifType = 2 +) + +var InterfaceNotification_NotifType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UPDOWN", + 2: "COUNTERS", +} +var InterfaceNotification_NotifType_value = map[string]int32{ + "UNKNOWN": 0, + "UPDOWN": 1, + "COUNTERS": 2, +} + +func (x InterfaceNotification_NotifType) String() string { + return proto.EnumName(InterfaceNotification_NotifType_name, int32(x)) +} +func (InterfaceNotification_NotifType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{1, 0} +} + +type InterfaceState struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + InternalName string `protobuf:"bytes,2,opt,name=internal_name,json=internalName,proto3" json:"internal_name,omitempty"` + Type Interface_Type `protobuf:"varint,3,opt,name=type,proto3,enum=vpp.interfaces.Interface_Type" json:"type,omitempty"` + IfIndex uint32 `protobuf:"varint,4,opt,name=if_index,json=ifIndex,proto3" json:"if_index,omitempty"` + AdminStatus InterfaceState_Status `protobuf:"varint,5,opt,name=admin_status,json=adminStatus,proto3,enum=vpp.interfaces.InterfaceState_Status" json:"admin_status,omitempty"` + OperStatus InterfaceState_Status `protobuf:"varint,6,opt,name=oper_status,json=operStatus,proto3,enum=vpp.interfaces.InterfaceState_Status" json:"oper_status,omitempty"` + LastChange int64 `protobuf:"varint,7,opt,name=last_change,json=lastChange,proto3" json:"last_change,omitempty"` + PhysAddress string `protobuf:"bytes,8,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + Speed uint64 `protobuf:"varint,9,opt,name=speed,proto3" json:"speed,omitempty"` + Mtu uint32 `protobuf:"varint,10,opt,name=mtu,proto3" json:"mtu,omitempty"` + Duplex InterfaceState_Duplex `protobuf:"varint,11,opt,name=duplex,proto3,enum=vpp.interfaces.InterfaceState_Duplex" json:"duplex,omitempty"` + Statistics *InterfaceState_Statistics `protobuf:"bytes,100,opt,name=statistics,proto3" json:"statistics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InterfaceState) Reset() { *m = InterfaceState{} } +func (m *InterfaceState) String() string { return proto.CompactTextString(m) } +func (*InterfaceState) ProtoMessage() {} +func (*InterfaceState) Descriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{0} +} +func (m *InterfaceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InterfaceState.Unmarshal(m, b) +} +func (m *InterfaceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InterfaceState.Marshal(b, m, deterministic) +} +func (dst *InterfaceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_InterfaceState.Merge(dst, src) +} +func (m *InterfaceState) XXX_Size() int { + return xxx_messageInfo_InterfaceState.Size(m) +} +func (m *InterfaceState) XXX_DiscardUnknown() { + xxx_messageInfo_InterfaceState.DiscardUnknown(m) +} + +var xxx_messageInfo_InterfaceState proto.InternalMessageInfo + +func (m *InterfaceState) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InterfaceState) GetInternalName() string { + if m != nil { + return m.InternalName + } + return "" +} + +func (m *InterfaceState) GetType() Interface_Type { + if m != nil { + return m.Type + } + return Interface_UNDEFINED_TYPE +} + +func (m *InterfaceState) GetIfIndex() uint32 { + if m != nil { + return m.IfIndex + } + return 0 +} + +func (m *InterfaceState) GetAdminStatus() InterfaceState_Status { + if m != nil { + return m.AdminStatus + } + return InterfaceState_UNKNOWN_STATUS +} + +func (m *InterfaceState) GetOperStatus() InterfaceState_Status { + if m != nil { + return m.OperStatus + } + return InterfaceState_UNKNOWN_STATUS +} + +func (m *InterfaceState) GetLastChange() int64 { + if m != nil { + return m.LastChange + } + return 0 +} + +func (m *InterfaceState) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (m *InterfaceState) GetSpeed() uint64 { + if m != nil { + return m.Speed + } + return 0 +} + +func (m *InterfaceState) GetMtu() uint32 { + if m != nil { + return m.Mtu + } + return 0 +} + +func (m *InterfaceState) GetDuplex() InterfaceState_Duplex { + if m != nil { + return m.Duplex + } + return InterfaceState_UNKNOWN_DUPLEX +} + +func (m *InterfaceState) GetStatistics() *InterfaceState_Statistics { + if m != nil { + return m.Statistics + } + return nil +} + +func (*InterfaceState) XXX_MessageName() string { + return "vpp.interfaces.InterfaceState" +} + +type InterfaceState_Statistics struct { + InPackets uint64 `protobuf:"varint,1,opt,name=in_packets,json=inPackets,proto3" json:"in_packets,omitempty"` + InBytes uint64 `protobuf:"varint,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + OutPackets uint64 `protobuf:"varint,3,opt,name=out_packets,json=outPackets,proto3" json:"out_packets,omitempty"` + OutBytes uint64 `protobuf:"varint,4,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` + DropPackets uint64 `protobuf:"varint,5,opt,name=drop_packets,json=dropPackets,proto3" json:"drop_packets,omitempty"` + PuntPackets uint64 `protobuf:"varint,6,opt,name=punt_packets,json=puntPackets,proto3" json:"punt_packets,omitempty"` + Ipv4Packets uint64 `protobuf:"varint,7,opt,name=ipv4_packets,json=ipv4Packets,proto3" json:"ipv4_packets,omitempty"` + Ipv6Packets uint64 `protobuf:"varint,8,opt,name=ipv6_packets,json=ipv6Packets,proto3" json:"ipv6_packets,omitempty"` + InNobufPackets uint64 `protobuf:"varint,9,opt,name=in_nobuf_packets,json=inNobufPackets,proto3" json:"in_nobuf_packets,omitempty"` + InMissPackets uint64 `protobuf:"varint,10,opt,name=in_miss_packets,json=inMissPackets,proto3" json:"in_miss_packets,omitempty"` + InErrorPackets uint64 `protobuf:"varint,11,opt,name=in_error_packets,json=inErrorPackets,proto3" json:"in_error_packets,omitempty"` + OutErrorPackets uint64 `protobuf:"varint,12,opt,name=out_error_packets,json=outErrorPackets,proto3" json:"out_error_packets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InterfaceState_Statistics) Reset() { *m = InterfaceState_Statistics{} } +func (m *InterfaceState_Statistics) String() string { return proto.CompactTextString(m) } +func (*InterfaceState_Statistics) ProtoMessage() {} +func (*InterfaceState_Statistics) Descriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{0, 0} +} +func (m *InterfaceState_Statistics) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InterfaceState_Statistics.Unmarshal(m, b) +} +func (m *InterfaceState_Statistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InterfaceState_Statistics.Marshal(b, m, deterministic) +} +func (dst *InterfaceState_Statistics) XXX_Merge(src proto.Message) { + xxx_messageInfo_InterfaceState_Statistics.Merge(dst, src) +} +func (m *InterfaceState_Statistics) XXX_Size() int { + return xxx_messageInfo_InterfaceState_Statistics.Size(m) +} +func (m *InterfaceState_Statistics) XXX_DiscardUnknown() { + xxx_messageInfo_InterfaceState_Statistics.DiscardUnknown(m) +} + +var xxx_messageInfo_InterfaceState_Statistics proto.InternalMessageInfo + +func (m *InterfaceState_Statistics) GetInPackets() uint64 { + if m != nil { + return m.InPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetInBytes() uint64 { + if m != nil { + return m.InBytes + } + return 0 +} + +func (m *InterfaceState_Statistics) GetOutPackets() uint64 { + if m != nil { + return m.OutPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetOutBytes() uint64 { + if m != nil { + return m.OutBytes + } + return 0 +} + +func (m *InterfaceState_Statistics) GetDropPackets() uint64 { + if m != nil { + return m.DropPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetPuntPackets() uint64 { + if m != nil { + return m.PuntPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetIpv4Packets() uint64 { + if m != nil { + return m.Ipv4Packets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetIpv6Packets() uint64 { + if m != nil { + return m.Ipv6Packets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetInNobufPackets() uint64 { + if m != nil { + return m.InNobufPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetInMissPackets() uint64 { + if m != nil { + return m.InMissPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetInErrorPackets() uint64 { + if m != nil { + return m.InErrorPackets + } + return 0 +} + +func (m *InterfaceState_Statistics) GetOutErrorPackets() uint64 { + if m != nil { + return m.OutErrorPackets + } + return 0 +} + +func (*InterfaceState_Statistics) XXX_MessageName() string { + return "vpp.interfaces.InterfaceState.Statistics" +} + +type InterfaceNotification struct { + Type InterfaceNotification_NotifType `protobuf:"varint,1,opt,name=type,proto3,enum=vpp.interfaces.InterfaceNotification_NotifType" json:"type,omitempty"` + State *InterfaceState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InterfaceNotification) Reset() { *m = InterfaceNotification{} } +func (m *InterfaceNotification) String() string { return proto.CompactTextString(m) } +func (*InterfaceNotification) ProtoMessage() {} +func (*InterfaceNotification) Descriptor() ([]byte, []int) { + return fileDescriptor_state_bb2097df664231a9, []int{1} +} +func (m *InterfaceNotification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InterfaceNotification.Unmarshal(m, b) +} +func (m *InterfaceNotification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InterfaceNotification.Marshal(b, m, deterministic) +} +func (dst *InterfaceNotification) XXX_Merge(src proto.Message) { + xxx_messageInfo_InterfaceNotification.Merge(dst, src) +} +func (m *InterfaceNotification) XXX_Size() int { + return xxx_messageInfo_InterfaceNotification.Size(m) +} +func (m *InterfaceNotification) XXX_DiscardUnknown() { + xxx_messageInfo_InterfaceNotification.DiscardUnknown(m) +} + +var xxx_messageInfo_InterfaceNotification proto.InternalMessageInfo + +func (m *InterfaceNotification) GetType() InterfaceNotification_NotifType { + if m != nil { + return m.Type + } + return InterfaceNotification_UNKNOWN +} + +func (m *InterfaceNotification) GetState() *InterfaceState { + if m != nil { + return m.State + } + return nil +} + +func (*InterfaceNotification) XXX_MessageName() string { + return "vpp.interfaces.InterfaceNotification" +} +func init() { + proto.RegisterType((*InterfaceState)(nil), "vpp.interfaces.InterfaceState") + proto.RegisterType((*InterfaceState_Statistics)(nil), "vpp.interfaces.InterfaceState.Statistics") + proto.RegisterType((*InterfaceNotification)(nil), "vpp.interfaces.InterfaceNotification") + proto.RegisterEnum("vpp.interfaces.InterfaceState_Status", InterfaceState_Status_name, InterfaceState_Status_value) + proto.RegisterEnum("vpp.interfaces.InterfaceState_Duplex", InterfaceState_Duplex_name, InterfaceState_Duplex_value) + proto.RegisterEnum("vpp.interfaces.InterfaceNotification_NotifType", InterfaceNotification_NotifType_name, InterfaceNotification_NotifType_value) +} + +func init() { + proto.RegisterFile("models/vpp/interfaces/state.proto", fileDescriptor_state_bb2097df664231a9) +} + +var fileDescriptor_state_bb2097df664231a9 = []byte{ + // 749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xe1, 0x6e, 0xe3, 0x44, + 0x10, 0xc7, 0xeb, 0xc4, 0x75, 0x92, 0x71, 0x9a, 0x33, 0x2b, 0x90, 0x4c, 0x11, 0x47, 0x1a, 0x74, + 0x28, 0x20, 0x9d, 0x8d, 0xca, 0xe9, 0xbe, 0x9c, 0xf8, 0xd0, 0x6b, 0x52, 0x5d, 0x21, 0xb8, 0x95, + 0x93, 0xe8, 0x10, 0x5f, 0x2c, 0x37, 0xde, 0xa4, 0x2b, 0x92, 0xdd, 0x95, 0x77, 0x5d, 0x5d, 0xdf, + 0x8c, 0x27, 0x40, 0x88, 0xd7, 0xe0, 0x45, 0xd0, 0x8e, 0x63, 0x27, 0x91, 0xee, 0xd0, 0xf1, 0x6d, + 0xe7, 0x3f, 0xbf, 0xff, 0x6c, 0x76, 0x3c, 0x19, 0x38, 0xdb, 0x88, 0x8c, 0xae, 0x55, 0xf8, 0x20, + 0x65, 0xc8, 0xb8, 0xa6, 0xf9, 0x32, 0x5d, 0x50, 0x15, 0x2a, 0x9d, 0x6a, 0x1a, 0xc8, 0x5c, 0x68, + 0x41, 0x7a, 0x0f, 0x52, 0x06, 0xbb, 0xdc, 0xe9, 0xf3, 0x15, 0xd3, 0xf7, 0xc5, 0x5d, 0xb0, 0x10, + 0x9b, 0x70, 0x25, 0x56, 0x22, 0x44, 0xec, 0xae, 0x58, 0x62, 0x84, 0x01, 0x9e, 0x4a, 0xfb, 0xe9, + 0xb3, 0xf7, 0xdf, 0x50, 0x1f, 0x4b, 0x6c, 0xf0, 0x77, 0x1b, 0x7a, 0xd7, 0x95, 0x36, 0x35, 0xd7, + 0x13, 0x02, 0x36, 0x4f, 0x37, 0xd4, 0xb7, 0xfa, 0xd6, 0xb0, 0x13, 0xe3, 0x99, 0x7c, 0x0d, 0x27, + 0xe8, 0xe4, 0xe9, 0x3a, 0xc1, 0x64, 0x03, 0x93, 0xdd, 0x4a, 0x8c, 0x0c, 0x74, 0x0e, 0xb6, 0x7e, + 0x94, 0xd4, 0x6f, 0xf6, 0xad, 0x61, 0xef, 0xfc, 0x69, 0x70, 0xf8, 0x80, 0xa0, 0xbe, 0x26, 0x98, + 0x3d, 0x4a, 0x1a, 0x23, 0x4b, 0x3e, 0x87, 0x36, 0x5b, 0x26, 0x8c, 0x67, 0xf4, 0x9d, 0x6f, 0xf7, + 0xad, 0xe1, 0x49, 0xdc, 0x62, 0xcb, 0x6b, 0x13, 0x92, 0x37, 0xd0, 0x4d, 0xb3, 0x0d, 0xe3, 0x89, + 0xe9, 0x4a, 0xa1, 0xfc, 0x63, 0x2c, 0xfb, 0xec, 0x83, 0x65, 0xf1, 0xd7, 0x07, 0x53, 0x84, 0x63, + 0x17, 0xad, 0x65, 0x40, 0xae, 0xc0, 0x15, 0x92, 0xe6, 0x55, 0x21, 0xe7, 0xff, 0x14, 0x02, 0xe3, + 0xdc, 0xd6, 0xf9, 0x0a, 0xdc, 0x75, 0xaa, 0x74, 0xb2, 0xb8, 0x4f, 0xf9, 0x8a, 0xfa, 0xad, 0xbe, + 0x35, 0x6c, 0xc6, 0x60, 0xa4, 0x4b, 0x54, 0xc8, 0x19, 0x74, 0xe5, 0xfd, 0xa3, 0x4a, 0xd2, 0x2c, + 0xcb, 0xa9, 0x52, 0x7e, 0x1b, 0xbb, 0xe4, 0x1a, 0xed, 0xa2, 0x94, 0xc8, 0xa7, 0x70, 0xac, 0x24, + 0xa5, 0x99, 0xdf, 0xe9, 0x5b, 0x43, 0x3b, 0x2e, 0x03, 0xe2, 0x41, 0x73, 0xa3, 0x0b, 0x1f, 0xb0, + 0x03, 0xe6, 0x48, 0x7e, 0x04, 0x27, 0x2b, 0xe4, 0x9a, 0xbe, 0xf3, 0xdd, 0x8f, 0xfa, 0xb9, 0x23, + 0x84, 0xe3, 0xad, 0x89, 0x5c, 0x03, 0x98, 0xd7, 0x32, 0xa5, 0xd9, 0x42, 0xf9, 0x59, 0xdf, 0x1a, + 0xba, 0xe7, 0xdf, 0x7e, 0xc4, 0x8b, 0x4b, 0x43, 0xbc, 0x67, 0x3e, 0xfd, 0xa3, 0x09, 0xb0, 0x4b, + 0x91, 0x2f, 0x01, 0x18, 0x4f, 0x64, 0xba, 0xf8, 0x9d, 0x6a, 0x85, 0x43, 0x62, 0xc7, 0x1d, 0xc6, + 0x6f, 0x4b, 0x01, 0x3f, 0x28, 0x4f, 0xee, 0x1e, 0x35, 0x55, 0x38, 0x24, 0x76, 0xdc, 0x62, 0xfc, + 0xb5, 0x09, 0x4d, 0xfb, 0x44, 0xa1, 0x6b, 0x6b, 0x13, 0xb3, 0x20, 0x0a, 0x5d, 0x79, 0xbf, 0x80, + 0x8e, 0x01, 0x4a, 0xb3, 0x8d, 0xe9, 0xb6, 0x28, 0x74, 0xe9, 0x3e, 0x83, 0x6e, 0x96, 0x0b, 0x59, + 0xdb, 0x8f, 0x31, 0xef, 0x1a, 0xad, 0xf2, 0x9b, 0xf6, 0x17, 0x7c, 0x77, 0x83, 0x53, 0x22, 0x46, + 0xdb, 0x43, 0x98, 0x7c, 0x78, 0x51, 0x23, 0xad, 0x12, 0x31, 0xda, 0x21, 0xf2, 0xb2, 0x46, 0xda, + 0x35, 0xf2, 0xb2, 0x42, 0x86, 0xe0, 0x31, 0x9e, 0x70, 0xf3, 0xef, 0xab, 0xb1, 0xf2, 0x7b, 0xf6, + 0x18, 0x8f, 0x8c, 0x5c, 0x91, 0xdf, 0xc0, 0x13, 0xc6, 0x93, 0x0d, 0x53, 0xaa, 0x06, 0x01, 0xc1, + 0x13, 0xc6, 0x7f, 0x61, 0x4a, 0x1d, 0x56, 0xa4, 0x79, 0x2e, 0xf2, 0x1a, 0x74, 0xab, 0x8a, 0x63, + 0x23, 0x57, 0xe4, 0x77, 0xf0, 0x89, 0x69, 0xd2, 0x21, 0xda, 0x45, 0xf4, 0x89, 0x28, 0xf4, 0x3e, + 0x3b, 0x78, 0x05, 0xce, 0x76, 0x74, 0x09, 0xf4, 0xe6, 0xd1, 0xcf, 0xd1, 0xcd, 0xdb, 0x28, 0x99, + 0xce, 0x2e, 0x66, 0xf3, 0xa9, 0x77, 0x44, 0x1c, 0x68, 0xcc, 0x6f, 0x3d, 0x8b, 0xb4, 0xc1, 0x1e, + 0xdd, 0xbc, 0x8d, 0xbc, 0x06, 0x71, 0xa1, 0x35, 0x1a, 0x4f, 0xc6, 0xb3, 0xf1, 0xc8, 0x6b, 0x0e, + 0xbe, 0x07, 0xa7, 0x1c, 0xaa, 0x7d, 0xf3, 0x68, 0x7e, 0x3b, 0x19, 0xff, 0xea, 0x1d, 0x19, 0xd3, + 0x9b, 0x8b, 0xc9, 0x55, 0x69, 0xbf, 0x9a, 0x4f, 0x26, 0x5e, 0x63, 0xf0, 0xa7, 0x05, 0x9f, 0xd5, + 0x33, 0x15, 0x09, 0xcd, 0x96, 0x6c, 0x91, 0x6a, 0x26, 0x38, 0xb9, 0xdc, 0xae, 0x06, 0x0b, 0x67, + 0x39, 0xfc, 0xe0, 0x20, 0xee, 0x9b, 0x02, 0x0c, 0xf6, 0x76, 0xc5, 0x0b, 0x38, 0xc6, 0x05, 0x89, + 0x73, 0xe5, 0xfe, 0xc7, 0x82, 0xc1, 0x71, 0x8e, 0x4b, 0x78, 0x70, 0x0e, 0x9d, 0xba, 0x90, 0x79, + 0xe0, 0xf6, 0x25, 0xde, 0x11, 0x01, 0x70, 0xe6, 0xb7, 0xf8, 0x72, 0x8b, 0x74, 0xa1, 0x7d, 0x79, + 0x33, 0x8f, 0x66, 0xe3, 0x78, 0xea, 0x35, 0x5e, 0xff, 0xf4, 0xd7, 0x3f, 0x4f, 0xad, 0xdf, 0x46, + 0x7b, 0x1b, 0x77, 0xcd, 0x56, 0xa9, 0x16, 0x66, 0x9b, 0x3e, 0x4f, 0x57, 0x94, 0xeb, 0x30, 0x95, + 0x2c, 0x7c, 0xef, 0x8a, 0x7d, 0xf5, 0x20, 0x65, 0xb2, 0x0b, 0xef, 0x1c, 0x5c, 0xb4, 0x3f, 0xfc, + 0x1b, 0x00, 0x00, 0xff, 0xff, 0x18, 0xae, 0x97, 0x14, 0xf3, 0x05, 0x00, 0x00, +} diff --git a/api/models/vpp/interfaces/state.proto b/api/models/vpp/interfaces/state.proto new file mode 100644 index 0000000000..4d0e163dc7 --- /dev/null +++ b/api/models/vpp/interfaces/state.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package vpp.interfaces; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/interfaces;vpp_interfaces"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +import "models/vpp/interfaces/interface.proto"; + +message InterfaceState { + string name = 1; + string internal_name = 2; + interfaces.Interface.Type type = 3; + uint32 if_index = 4; + + enum Status { + UNKNOWN_STATUS = 0; + UP = 1; + DOWN = 2; + DELETED = 3; + }; + Status admin_status = 5; + Status oper_status = 6; + + int64 last_change = 7; + string phys_address = 8; + uint64 speed = 9; + uint32 mtu = 10; + + enum Duplex { + UNKNOWN_DUPLEX = 0; + HALF = 1; + FULL = 2; + }; + Duplex duplex = 11; + + message Statistics { + uint64 in_packets = 1; + uint64 in_bytes = 2; + uint64 out_packets = 3; + uint64 out_bytes = 4; + uint64 drop_packets = 5; + uint64 punt_packets = 6; + uint64 ipv4_packets = 7; + uint64 ipv6_packets = 8; + uint64 in_nobuf_packets = 9; + uint64 in_miss_packets = 10; + uint64 in_error_packets = 11; + uint64 out_error_packets = 12; + } + Statistics statistics = 100; +} + +message InterfaceNotification { + enum NotifType { + UNKNOWN = 0; + UPDOWN = 1; + COUNTERS = 2; + } + NotifType type = 1; + InterfaceState state = 2; +} diff --git a/api/models/vpp/ipsec/ipsec.pb.go b/api/models/vpp/ipsec/ipsec.pb.go new file mode 100644 index 0000000000..9796fa5701 --- /dev/null +++ b/api/models/vpp/ipsec/ipsec.pb.go @@ -0,0 +1,580 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/ipsec/ipsec.proto + +package vpp_ipsec // import "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type SecurityPolicyDatabase_PolicyEntry_Action int32 + +const ( + SecurityPolicyDatabase_PolicyEntry_BYPASS SecurityPolicyDatabase_PolicyEntry_Action = 0 + SecurityPolicyDatabase_PolicyEntry_DISCARD SecurityPolicyDatabase_PolicyEntry_Action = 1 + SecurityPolicyDatabase_PolicyEntry_RESOLVE SecurityPolicyDatabase_PolicyEntry_Action = 2 + SecurityPolicyDatabase_PolicyEntry_PROTECT SecurityPolicyDatabase_PolicyEntry_Action = 3 +) + +var SecurityPolicyDatabase_PolicyEntry_Action_name = map[int32]string{ + 0: "BYPASS", + 1: "DISCARD", + 2: "RESOLVE", + 3: "PROTECT", +} +var SecurityPolicyDatabase_PolicyEntry_Action_value = map[string]int32{ + "BYPASS": 0, + "DISCARD": 1, + "RESOLVE": 2, + "PROTECT": 3, +} + +func (x SecurityPolicyDatabase_PolicyEntry_Action) String() string { + return proto.EnumName(SecurityPolicyDatabase_PolicyEntry_Action_name, int32(x)) +} +func (SecurityPolicyDatabase_PolicyEntry_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{0, 1, 0} +} + +type SecurityAssociation_IPSecProtocol int32 + +const ( + SecurityAssociation_AH SecurityAssociation_IPSecProtocol = 0 + SecurityAssociation_ESP SecurityAssociation_IPSecProtocol = 1 +) + +var SecurityAssociation_IPSecProtocol_name = map[int32]string{ + 0: "AH", + 1: "ESP", +} +var SecurityAssociation_IPSecProtocol_value = map[string]int32{ + "AH": 0, + "ESP": 1, +} + +func (x SecurityAssociation_IPSecProtocol) String() string { + return proto.EnumName(SecurityAssociation_IPSecProtocol_name, int32(x)) +} +func (SecurityAssociation_IPSecProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{1, 0} +} + +type SecurityAssociation_CryptoAlg int32 + +const ( + SecurityAssociation_NONE_CRYPTO SecurityAssociation_CryptoAlg = 0 + SecurityAssociation_AES_CBC_128 SecurityAssociation_CryptoAlg = 1 + SecurityAssociation_AES_CBC_192 SecurityAssociation_CryptoAlg = 2 + SecurityAssociation_AES_CBC_256 SecurityAssociation_CryptoAlg = 3 +) + +var SecurityAssociation_CryptoAlg_name = map[int32]string{ + 0: "NONE_CRYPTO", + 1: "AES_CBC_128", + 2: "AES_CBC_192", + 3: "AES_CBC_256", +} +var SecurityAssociation_CryptoAlg_value = map[string]int32{ + "NONE_CRYPTO": 0, + "AES_CBC_128": 1, + "AES_CBC_192": 2, + "AES_CBC_256": 3, +} + +func (x SecurityAssociation_CryptoAlg) String() string { + return proto.EnumName(SecurityAssociation_CryptoAlg_name, int32(x)) +} +func (SecurityAssociation_CryptoAlg) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{1, 1} +} + +type SecurityAssociation_IntegAlg int32 + +const ( + SecurityAssociation_NONE_INTEG SecurityAssociation_IntegAlg = 0 + SecurityAssociation_MD5_96 SecurityAssociation_IntegAlg = 1 + SecurityAssociation_SHA1_96 SecurityAssociation_IntegAlg = 2 + SecurityAssociation_SHA_256_96 SecurityAssociation_IntegAlg = 3 + SecurityAssociation_SHA_256_128 SecurityAssociation_IntegAlg = 4 + SecurityAssociation_SHA_384_192 SecurityAssociation_IntegAlg = 5 + SecurityAssociation_SHA_512_256 SecurityAssociation_IntegAlg = 6 +) + +var SecurityAssociation_IntegAlg_name = map[int32]string{ + 0: "NONE_INTEG", + 1: "MD5_96", + 2: "SHA1_96", + 3: "SHA_256_96", + 4: "SHA_256_128", + 5: "SHA_384_192", + 6: "SHA_512_256", +} +var SecurityAssociation_IntegAlg_value = map[string]int32{ + "NONE_INTEG": 0, + "MD5_96": 1, + "SHA1_96": 2, + "SHA_256_96": 3, + "SHA_256_128": 4, + "SHA_384_192": 5, + "SHA_512_256": 6, +} + +func (x SecurityAssociation_IntegAlg) String() string { + return proto.EnumName(SecurityAssociation_IntegAlg_name, int32(x)) +} +func (SecurityAssociation_IntegAlg) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{1, 2} +} + +// Security Policy Database (SPD) +type SecurityPolicyDatabase struct { + Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` + Interfaces []*SecurityPolicyDatabase_Interface `protobuf:"bytes,2,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + PolicyEntries []*SecurityPolicyDatabase_PolicyEntry `protobuf:"bytes,3,rep,name=policy_entries,json=policyEntries,proto3" json:"policy_entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityPolicyDatabase) Reset() { *m = SecurityPolicyDatabase{} } +func (m *SecurityPolicyDatabase) String() string { return proto.CompactTextString(m) } +func (*SecurityPolicyDatabase) ProtoMessage() {} +func (*SecurityPolicyDatabase) Descriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{0} +} +func (m *SecurityPolicyDatabase) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityPolicyDatabase.Unmarshal(m, b) +} +func (m *SecurityPolicyDatabase) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityPolicyDatabase.Marshal(b, m, deterministic) +} +func (dst *SecurityPolicyDatabase) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityPolicyDatabase.Merge(dst, src) +} +func (m *SecurityPolicyDatabase) XXX_Size() int { + return xxx_messageInfo_SecurityPolicyDatabase.Size(m) +} +func (m *SecurityPolicyDatabase) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityPolicyDatabase.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityPolicyDatabase proto.InternalMessageInfo + +func (m *SecurityPolicyDatabase) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *SecurityPolicyDatabase) GetInterfaces() []*SecurityPolicyDatabase_Interface { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *SecurityPolicyDatabase) GetPolicyEntries() []*SecurityPolicyDatabase_PolicyEntry { + if m != nil { + return m.PolicyEntries + } + return nil +} + +func (*SecurityPolicyDatabase) XXX_MessageName() string { + return "vpp.ipsec.SecurityPolicyDatabase" +} + +type SecurityPolicyDatabase_Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityPolicyDatabase_Interface) Reset() { *m = SecurityPolicyDatabase_Interface{} } +func (m *SecurityPolicyDatabase_Interface) String() string { return proto.CompactTextString(m) } +func (*SecurityPolicyDatabase_Interface) ProtoMessage() {} +func (*SecurityPolicyDatabase_Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{0, 0} +} +func (m *SecurityPolicyDatabase_Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityPolicyDatabase_Interface.Unmarshal(m, b) +} +func (m *SecurityPolicyDatabase_Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityPolicyDatabase_Interface.Marshal(b, m, deterministic) +} +func (dst *SecurityPolicyDatabase_Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityPolicyDatabase_Interface.Merge(dst, src) +} +func (m *SecurityPolicyDatabase_Interface) XXX_Size() int { + return xxx_messageInfo_SecurityPolicyDatabase_Interface.Size(m) +} +func (m *SecurityPolicyDatabase_Interface) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityPolicyDatabase_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityPolicyDatabase_Interface proto.InternalMessageInfo + +func (m *SecurityPolicyDatabase_Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (*SecurityPolicyDatabase_Interface) XXX_MessageName() string { + return "vpp.ipsec.SecurityPolicyDatabase.Interface" +} + +type SecurityPolicyDatabase_PolicyEntry struct { + SaIndex string `protobuf:"bytes,1,opt,name=sa_index,json=saIndex,proto3" json:"sa_index,omitempty"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + IsOutbound bool `protobuf:"varint,3,opt,name=is_outbound,json=isOutbound,proto3" json:"is_outbound,omitempty"` + RemoteAddrStart string `protobuf:"bytes,4,opt,name=remote_addr_start,json=remoteAddrStart,proto3" json:"remote_addr_start,omitempty"` + RemoteAddrStop string `protobuf:"bytes,5,opt,name=remote_addr_stop,json=remoteAddrStop,proto3" json:"remote_addr_stop,omitempty"` + LocalAddrStart string `protobuf:"bytes,6,opt,name=local_addr_start,json=localAddrStart,proto3" json:"local_addr_start,omitempty"` + LocalAddrStop string `protobuf:"bytes,7,opt,name=local_addr_stop,json=localAddrStop,proto3" json:"local_addr_stop,omitempty"` + Protocol uint32 `protobuf:"varint,8,opt,name=protocol,proto3" json:"protocol,omitempty"` + RemotePortStart uint32 `protobuf:"varint,9,opt,name=remote_port_start,json=remotePortStart,proto3" json:"remote_port_start,omitempty"` + RemotePortStop uint32 `protobuf:"varint,10,opt,name=remote_port_stop,json=remotePortStop,proto3" json:"remote_port_stop,omitempty"` + LocalPortStart uint32 `protobuf:"varint,11,opt,name=local_port_start,json=localPortStart,proto3" json:"local_port_start,omitempty"` + LocalPortStop uint32 `protobuf:"varint,12,opt,name=local_port_stop,json=localPortStop,proto3" json:"local_port_stop,omitempty"` + Action SecurityPolicyDatabase_PolicyEntry_Action `protobuf:"varint,13,opt,name=action,proto3,enum=vpp.ipsec.SecurityPolicyDatabase_PolicyEntry_Action" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityPolicyDatabase_PolicyEntry) Reset() { *m = SecurityPolicyDatabase_PolicyEntry{} } +func (m *SecurityPolicyDatabase_PolicyEntry) String() string { return proto.CompactTextString(m) } +func (*SecurityPolicyDatabase_PolicyEntry) ProtoMessage() {} +func (*SecurityPolicyDatabase_PolicyEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{0, 1} +} +func (m *SecurityPolicyDatabase_PolicyEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry.Unmarshal(m, b) +} +func (m *SecurityPolicyDatabase_PolicyEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry.Marshal(b, m, deterministic) +} +func (dst *SecurityPolicyDatabase_PolicyEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry.Merge(dst, src) +} +func (m *SecurityPolicyDatabase_PolicyEntry) XXX_Size() int { + return xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry.Size(m) +} +func (m *SecurityPolicyDatabase_PolicyEntry) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityPolicyDatabase_PolicyEntry proto.InternalMessageInfo + +func (m *SecurityPolicyDatabase_PolicyEntry) GetSaIndex() string { + if m != nil { + return m.SaIndex + } + return "" +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetPriority() int32 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetIsOutbound() bool { + if m != nil { + return m.IsOutbound + } + return false +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetRemoteAddrStart() string { + if m != nil { + return m.RemoteAddrStart + } + return "" +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetRemoteAddrStop() string { + if m != nil { + return m.RemoteAddrStop + } + return "" +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetLocalAddrStart() string { + if m != nil { + return m.LocalAddrStart + } + return "" +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetLocalAddrStop() string { + if m != nil { + return m.LocalAddrStop + } + return "" +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetProtocol() uint32 { + if m != nil { + return m.Protocol + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetRemotePortStart() uint32 { + if m != nil { + return m.RemotePortStart + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetRemotePortStop() uint32 { + if m != nil { + return m.RemotePortStop + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetLocalPortStart() uint32 { + if m != nil { + return m.LocalPortStart + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetLocalPortStop() uint32 { + if m != nil { + return m.LocalPortStop + } + return 0 +} + +func (m *SecurityPolicyDatabase_PolicyEntry) GetAction() SecurityPolicyDatabase_PolicyEntry_Action { + if m != nil { + return m.Action + } + return SecurityPolicyDatabase_PolicyEntry_BYPASS +} + +func (*SecurityPolicyDatabase_PolicyEntry) XXX_MessageName() string { + return "vpp.ipsec.SecurityPolicyDatabase.PolicyEntry" +} + +// Security Association (SA) +type SecurityAssociation struct { + Index string `protobuf:"bytes,1,opt,name=index,proto3" json:"index,omitempty"` + Spi uint32 `protobuf:"varint,2,opt,name=spi,proto3" json:"spi,omitempty"` + Protocol SecurityAssociation_IPSecProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=vpp.ipsec.SecurityAssociation_IPSecProtocol" json:"protocol,omitempty"` + CryptoAlg SecurityAssociation_CryptoAlg `protobuf:"varint,4,opt,name=crypto_alg,json=cryptoAlg,proto3,enum=vpp.ipsec.SecurityAssociation_CryptoAlg" json:"crypto_alg,omitempty"` + CryptoKey string `protobuf:"bytes,5,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + IntegAlg SecurityAssociation_IntegAlg `protobuf:"varint,6,opt,name=integ_alg,json=integAlg,proto3,enum=vpp.ipsec.SecurityAssociation_IntegAlg" json:"integ_alg,omitempty"` + IntegKey string `protobuf:"bytes,7,opt,name=integ_key,json=integKey,proto3" json:"integ_key,omitempty"` + UseEsn bool `protobuf:"varint,8,opt,name=use_esn,json=useEsn,proto3" json:"use_esn,omitempty"` + UseAntiReplay bool `protobuf:"varint,9,opt,name=use_anti_replay,json=useAntiReplay,proto3" json:"use_anti_replay,omitempty"` + TunnelSrcAddr string `protobuf:"bytes,10,opt,name=tunnel_src_addr,json=tunnelSrcAddr,proto3" json:"tunnel_src_addr,omitempty"` + TunnelDstAddr string `protobuf:"bytes,11,opt,name=tunnel_dst_addr,json=tunnelDstAddr,proto3" json:"tunnel_dst_addr,omitempty"` + EnableUdpEncap bool `protobuf:"varint,12,opt,name=enable_udp_encap,json=enableUdpEncap,proto3" json:"enable_udp_encap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityAssociation) Reset() { *m = SecurityAssociation{} } +func (m *SecurityAssociation) String() string { return proto.CompactTextString(m) } +func (*SecurityAssociation) ProtoMessage() {} +func (*SecurityAssociation) Descriptor() ([]byte, []int) { + return fileDescriptor_ipsec_5ddd9501648734e7, []int{1} +} +func (m *SecurityAssociation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityAssociation.Unmarshal(m, b) +} +func (m *SecurityAssociation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityAssociation.Marshal(b, m, deterministic) +} +func (dst *SecurityAssociation) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityAssociation.Merge(dst, src) +} +func (m *SecurityAssociation) XXX_Size() int { + return xxx_messageInfo_SecurityAssociation.Size(m) +} +func (m *SecurityAssociation) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityAssociation.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityAssociation proto.InternalMessageInfo + +func (m *SecurityAssociation) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *SecurityAssociation) GetSpi() uint32 { + if m != nil { + return m.Spi + } + return 0 +} + +func (m *SecurityAssociation) GetProtocol() SecurityAssociation_IPSecProtocol { + if m != nil { + return m.Protocol + } + return SecurityAssociation_AH +} + +func (m *SecurityAssociation) GetCryptoAlg() SecurityAssociation_CryptoAlg { + if m != nil { + return m.CryptoAlg + } + return SecurityAssociation_NONE_CRYPTO +} + +func (m *SecurityAssociation) GetCryptoKey() string { + if m != nil { + return m.CryptoKey + } + return "" +} + +func (m *SecurityAssociation) GetIntegAlg() SecurityAssociation_IntegAlg { + if m != nil { + return m.IntegAlg + } + return SecurityAssociation_NONE_INTEG +} + +func (m *SecurityAssociation) GetIntegKey() string { + if m != nil { + return m.IntegKey + } + return "" +} + +func (m *SecurityAssociation) GetUseEsn() bool { + if m != nil { + return m.UseEsn + } + return false +} + +func (m *SecurityAssociation) GetUseAntiReplay() bool { + if m != nil { + return m.UseAntiReplay + } + return false +} + +func (m *SecurityAssociation) GetTunnelSrcAddr() string { + if m != nil { + return m.TunnelSrcAddr + } + return "" +} + +func (m *SecurityAssociation) GetTunnelDstAddr() string { + if m != nil { + return m.TunnelDstAddr + } + return "" +} + +func (m *SecurityAssociation) GetEnableUdpEncap() bool { + if m != nil { + return m.EnableUdpEncap + } + return false +} + +func (*SecurityAssociation) XXX_MessageName() string { + return "vpp.ipsec.SecurityAssociation" +} +func init() { + proto.RegisterType((*SecurityPolicyDatabase)(nil), "vpp.ipsec.SecurityPolicyDatabase") + proto.RegisterType((*SecurityPolicyDatabase_Interface)(nil), "vpp.ipsec.SecurityPolicyDatabase.Interface") + proto.RegisterType((*SecurityPolicyDatabase_PolicyEntry)(nil), "vpp.ipsec.SecurityPolicyDatabase.PolicyEntry") + proto.RegisterType((*SecurityAssociation)(nil), "vpp.ipsec.SecurityAssociation") + proto.RegisterEnum("vpp.ipsec.SecurityPolicyDatabase_PolicyEntry_Action", SecurityPolicyDatabase_PolicyEntry_Action_name, SecurityPolicyDatabase_PolicyEntry_Action_value) + proto.RegisterEnum("vpp.ipsec.SecurityAssociation_IPSecProtocol", SecurityAssociation_IPSecProtocol_name, SecurityAssociation_IPSecProtocol_value) + proto.RegisterEnum("vpp.ipsec.SecurityAssociation_CryptoAlg", SecurityAssociation_CryptoAlg_name, SecurityAssociation_CryptoAlg_value) + proto.RegisterEnum("vpp.ipsec.SecurityAssociation_IntegAlg", SecurityAssociation_IntegAlg_name, SecurityAssociation_IntegAlg_value) +} + +func init() { proto.RegisterFile("models/vpp/ipsec/ipsec.proto", fileDescriptor_ipsec_5ddd9501648734e7) } + +var fileDescriptor_ipsec_5ddd9501648734e7 = []byte{ + // 874 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xae, 0x93, 0xc6, 0x49, 0x5e, 0x48, 0xd6, 0x0c, 0x08, 0x4c, 0xf9, 0xb1, 0x51, 0x0e, 0x60, + 0x01, 0x4d, 0xd4, 0xec, 0x76, 0xb5, 0xcb, 0x5e, 0x70, 0x13, 0x6b, 0x1b, 0xed, 0xd2, 0x44, 0x76, + 0x40, 0x5a, 0x2e, 0xd6, 0xc4, 0x9e, 0x0d, 0x23, 0x5c, 0xcf, 0xc8, 0x33, 0xae, 0xc8, 0x7f, 0xc8, + 0x95, 0x3b, 0x37, 0xfe, 0x09, 0x8e, 0x68, 0xc6, 0x4e, 0x3a, 0x5d, 0x55, 0xaa, 0xb8, 0x44, 0xef, + 0x7d, 0xf3, 0xbd, 0xef, 0xcb, 0xb3, 0xbf, 0x91, 0xe1, 0x8b, 0x6b, 0x96, 0x92, 0x4c, 0x4c, 0x6e, + 0x38, 0x9f, 0x50, 0x2e, 0x48, 0x52, 0xfd, 0x8e, 0x79, 0xc1, 0x24, 0x43, 0xdd, 0x1b, 0xce, 0xc7, + 0x1a, 0x38, 0x39, 0xdd, 0x52, 0xf9, 0x5b, 0xb9, 0x19, 0x27, 0xec, 0x7a, 0xb2, 0x65, 0x5b, 0x36, + 0xd1, 0x8c, 0x4d, 0xf9, 0x4e, 0x77, 0xba, 0xd1, 0x55, 0x35, 0x39, 0xfa, 0xcb, 0x86, 0x4f, 0x22, + 0x92, 0x94, 0x05, 0x95, 0xbb, 0x15, 0xcb, 0x68, 0xb2, 0x9b, 0x63, 0x89, 0x37, 0x58, 0x10, 0xf4, + 0x31, 0xb4, 0x68, 0x9e, 0x92, 0x3f, 0x5c, 0x6b, 0x68, 0x79, 0xdd, 0xb0, 0x6a, 0xd0, 0x6b, 0x00, + 0x9a, 0x4b, 0x52, 0xbc, 0xc3, 0x09, 0x11, 0x6e, 0x63, 0xd8, 0xf4, 0x7a, 0xd3, 0xef, 0xc6, 0x07, + 0xff, 0xf1, 0xfd, 0x62, 0xe3, 0xc5, 0x7e, 0x26, 0x34, 0xc6, 0xd1, 0x1a, 0x06, 0x5c, 0xf3, 0x62, + 0x92, 0xcb, 0x82, 0x12, 0xe1, 0x36, 0xb5, 0xe0, 0xe9, 0xc3, 0x82, 0x55, 0x1b, 0xe4, 0xb2, 0xd8, + 0x85, 0x7d, 0x7e, 0x68, 0x28, 0x11, 0x27, 0x8f, 0xa1, 0x7b, 0xb0, 0x43, 0x08, 0x8e, 0x73, 0x7c, + 0x4d, 0xea, 0x25, 0x74, 0x7d, 0xf2, 0xf7, 0x31, 0xf4, 0x8c, 0x79, 0xf4, 0x19, 0x74, 0x04, 0x8e, + 0xcd, 0x65, 0xdb, 0x02, 0x2f, 0xf4, 0xba, 0x27, 0xd0, 0xe1, 0x05, 0x65, 0xea, 0x0f, 0xb8, 0x8d, + 0xa1, 0xe5, 0xb5, 0xc2, 0x43, 0x8f, 0x1e, 0x43, 0x8f, 0x8a, 0x98, 0x95, 0x72, 0xc3, 0xca, 0x3c, + 0x75, 0x9b, 0x43, 0xcb, 0xeb, 0x84, 0x40, 0xc5, 0xb2, 0x46, 0xd0, 0xb7, 0xf0, 0x61, 0x41, 0xae, + 0x99, 0x24, 0x31, 0x4e, 0xd3, 0x22, 0x16, 0x12, 0x17, 0xd2, 0x3d, 0xd6, 0x06, 0x8f, 0xaa, 0x03, + 0x3f, 0x4d, 0x8b, 0x48, 0xc1, 0xc8, 0x03, 0xe7, 0x2e, 0x97, 0x71, 0xb7, 0xa5, 0xa9, 0x03, 0x93, + 0xca, 0xb8, 0x62, 0x66, 0x2c, 0xc1, 0x99, 0x29, 0x6a, 0x57, 0x4c, 0x8d, 0xdf, 0x6a, 0x7e, 0x0d, + 0x8f, 0xee, 0x30, 0x19, 0x77, 0xdb, 0x9a, 0xd8, 0x37, 0x88, 0x8c, 0x57, 0x4b, 0x32, 0xc9, 0x12, + 0x96, 0xb9, 0x9d, 0xa1, 0xe5, 0xf5, 0xc3, 0x43, 0x6f, 0xec, 0xc0, 0x59, 0x21, 0x6b, 0xbb, 0xae, + 0x26, 0xd5, 0x3b, 0xac, 0x58, 0x21, 0xdf, 0xdf, 0xa1, 0xe6, 0x32, 0xee, 0x82, 0xa6, 0x0e, 0x4c, + 0xaa, 0xb9, 0x83, 0x21, 0xda, 0xab, 0x98, 0x1a, 0xbf, 0xd5, 0x3c, 0xec, 0x70, 0x2b, 0xf9, 0x81, + 0x26, 0xf6, 0x0d, 0x22, 0xe3, 0xe8, 0x0d, 0xd8, 0x38, 0x91, 0x94, 0xe5, 0x6e, 0x7f, 0x68, 0x79, + 0x83, 0xe9, 0xd3, 0xff, 0x15, 0xa1, 0xb1, 0xaf, 0x67, 0xc3, 0x5a, 0x63, 0xf4, 0x12, 0xec, 0x0a, + 0x41, 0x00, 0xf6, 0xc5, 0xdb, 0x95, 0x1f, 0x45, 0xce, 0x11, 0xea, 0x41, 0x7b, 0xbe, 0x88, 0x66, + 0x7e, 0x38, 0x77, 0x2c, 0xd5, 0x84, 0x41, 0xb4, 0x7c, 0xf3, 0x4b, 0xe0, 0x34, 0x54, 0xb3, 0x0a, + 0x97, 0xeb, 0x60, 0xb6, 0x76, 0x9a, 0xa3, 0x7f, 0x5b, 0xf0, 0xd1, 0xde, 0xd2, 0x17, 0x82, 0x25, + 0x14, 0x6b, 0xa9, 0xfb, 0x2f, 0x94, 0x03, 0x4d, 0xc1, 0xa9, 0x0e, 0x57, 0x3f, 0x54, 0x25, 0xba, + 0x34, 0x5e, 0x47, 0x53, 0x2f, 0xf3, 0xfd, 0x3d, 0xcb, 0x18, 0xca, 0xe3, 0xc5, 0x2a, 0x22, 0xc9, + 0xaa, 0x9e, 0x31, 0x5e, 0xde, 0x2b, 0x80, 0xa4, 0xd8, 0x71, 0xc9, 0x62, 0x9c, 0x6d, 0x75, 0xf2, + 0x06, 0x53, 0xef, 0x01, 0xad, 0x99, 0x1e, 0xf0, 0xb3, 0x6d, 0xd8, 0x4d, 0xf6, 0x25, 0xfa, 0xf2, + 0x20, 0xf4, 0x3b, 0xd9, 0xd5, 0xb9, 0xac, 0x8f, 0x5f, 0x93, 0x1d, 0x9a, 0x43, 0x57, 0xdd, 0xea, + 0xad, 0xb6, 0xb1, 0xb5, 0xcd, 0x37, 0x0f, 0xfd, 0x65, 0xc5, 0x57, 0x2e, 0x1d, 0x5a, 0x57, 0xe8, + 0xf3, 0xbd, 0x8a, 0xf2, 0xa8, 0x82, 0x5a, 0x1d, 0x2a, 0x8b, 0x4f, 0xa1, 0x5d, 0x0a, 0x12, 0x13, + 0x91, 0xeb, 0x88, 0x76, 0x42, 0xbb, 0x14, 0x24, 0x10, 0xb9, 0x0a, 0x88, 0x3a, 0xc0, 0xb9, 0xa4, + 0x71, 0x41, 0x78, 0x86, 0x77, 0x3a, 0x9e, 0x9d, 0xb0, 0x5f, 0x0a, 0xe2, 0xe7, 0x92, 0x86, 0x1a, + 0x54, 0x3c, 0x59, 0xe6, 0x39, 0xc9, 0x62, 0x51, 0x24, 0xfa, 0x46, 0xe8, 0x6c, 0x76, 0xc3, 0x7e, + 0x05, 0x47, 0x45, 0xa2, 0x2e, 0x84, 0xc1, 0x4b, 0x85, 0xac, 0x78, 0x3d, 0x93, 0x37, 0x17, 0x52, + 0xf3, 0x3c, 0x70, 0x48, 0x8e, 0x37, 0x19, 0x89, 0xcb, 0x94, 0xc7, 0x24, 0x4f, 0x70, 0x95, 0xcc, + 0x4e, 0x38, 0xa8, 0xf0, 0x9f, 0x53, 0x1e, 0x28, 0x74, 0x34, 0x84, 0xfe, 0x9d, 0x17, 0x84, 0x6c, + 0x68, 0xf8, 0x97, 0xce, 0x11, 0x6a, 0x43, 0x33, 0x88, 0x56, 0x8e, 0x35, 0x5a, 0x42, 0xf7, 0xf0, + 0xd8, 0xd1, 0x23, 0xe8, 0x5d, 0x2d, 0xaf, 0x82, 0x78, 0x16, 0xbe, 0x5d, 0xad, 0x97, 0xce, 0x91, + 0x02, 0xfc, 0x20, 0x8a, 0x67, 0x17, 0xb3, 0xf8, 0x6c, 0xfa, 0xdc, 0xb1, 0xee, 0x00, 0x2f, 0xa6, + 0x4e, 0xc3, 0x04, 0xa6, 0xe7, 0xcf, 0x9c, 0xe6, 0xe8, 0x06, 0x3a, 0xfb, 0x07, 0x8c, 0x06, 0x00, + 0x5a, 0x6f, 0x71, 0xb5, 0x0e, 0x5e, 0x39, 0x47, 0x2a, 0xd1, 0x3f, 0xcd, 0xcf, 0xe3, 0x17, 0xcf, + 0xaa, 0x10, 0x47, 0x97, 0xfe, 0x99, 0x6a, 0x1a, 0x8a, 0x18, 0x5d, 0xfa, 0x4a, 0x41, 0xf5, 0x4d, + 0xa5, 0xba, 0xef, 0x95, 0xef, 0xf1, 0x1e, 0x78, 0xf2, 0xfc, 0xa9, 0xf6, 0x6d, 0xed, 0x81, 0xf3, + 0xb3, 0xa9, 0xf6, 0xb5, 0x2f, 0x7e, 0xfc, 0xf3, 0x9f, 0xaf, 0xac, 0x5f, 0x7f, 0x30, 0xbe, 0x41, + 0x19, 0xdd, 0x62, 0xc9, 0xd4, 0x77, 0xeb, 0x14, 0x6f, 0x49, 0x2e, 0x27, 0x98, 0xd3, 0xc9, 0xfb, + 0x1f, 0xb3, 0x97, 0x37, 0x9c, 0xc7, 0xba, 0xda, 0xd8, 0x3a, 0xbc, 0x4f, 0xfe, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0x90, 0xaa, 0x2e, 0x39, 0xf1, 0x06, 0x00, 0x00, +} diff --git a/api/models/vpp/ipsec/ipsec.proto b/api/models/vpp/ipsec/ipsec.proto new file mode 100644 index 0000000000..ada2339c4b --- /dev/null +++ b/api/models/vpp/ipsec/ipsec.proto @@ -0,0 +1,87 @@ +syntax = "proto3"; + +package vpp.ipsec; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/ipsec;vpp_ipsec"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +/* Security Policy Database (SPD) */ +message SecurityPolicyDatabase { + string index = 1; /* Numerical security policy database index, serves as a unique identifier */ + + message Interface { + string name = 1; /* Name of the related interface */ + } + repeated Interface interfaces = 2; /* List of interfaces belonging to this SPD */ + + message PolicyEntry { + string sa_index = 1; /* Security association index */ + + int32 priority = 2; + bool is_outbound = 3; + + string remote_addr_start = 4; + string remote_addr_stop = 5; + string local_addr_start = 6; + string local_addr_stop = 7; + + uint32 protocol = 8; + + uint32 remote_port_start = 9; + uint32 remote_port_stop = 10; + uint32 local_port_start = 11; + uint32 local_port_stop = 12; + + enum Action { + BYPASS = 0; + DISCARD = 1; + RESOLVE = 2; /* Note: this particular action is unused in VPP */ + PROTECT = 3; + } + Action action = 13; + } + repeated PolicyEntry policy_entries = 3; /* List of policy entries belonging to this SPD */ +} + +/* Security Association (SA) */ +message SecurityAssociation { + string index = 1; /* Numerical security association index, serves as a unique identifier */ + uint32 spi = 2; /* Security parameter index */ + + enum IPSecProtocol { + AH = 0; /* Authentication Header, provides a mechanism for authentication only */ + ESP = 1; /* Encapsulating Security Payload is for data confidentiality and authentication */ + } + IPSecProtocol protocol = 3; + + enum CryptoAlg { + NONE_CRYPTO = 0; + AES_CBC_128 = 1; + AES_CBC_192 = 2; + AES_CBC_256 = 3; + } + CryptoAlg crypto_alg = 4; /* Cryptographic algorithm for encryption */ + string crypto_key = 5; + + enum IntegAlg { + NONE_INTEG = 0; + MD5_96 = 1; + SHA1_96 = 2; + SHA_256_96 = 3; + SHA_256_128 = 4; + SHA_384_192 = 5; + SHA_512_256 = 6; + } + IntegAlg integ_alg = 6; /* Cryptographic algorithm for authentication */ + string integ_key = 7; + + bool use_esn = 8; /* Use extended sequence number */ + bool use_anti_replay = 9; /* Use anti replay */ + + string tunnel_src_addr = 10; + string tunnel_dst_addr = 11; + + bool enable_udp_encap = 12; /* Enable UDP encapsulation for NAT traversal */ +} diff --git a/api/models/vpp/ipsec/keys.go b/api/models/vpp/ipsec/keys.go new file mode 100644 index 0000000000..99005ff9a6 --- /dev/null +++ b/api/models/vpp/ipsec/keys.go @@ -0,0 +1,137 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_ipsec + +import ( + "strconv" + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp.ipsec" + +var ( + ModelSecurityPolicyDatabase = models.Register(&SecurityPolicyDatabase{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "spd", + }, models.WithNameTemplate("{{.Index}}")) + + ModelSecurityAssociation = models.Register(&SecurityAssociation{}, models.Spec{ + Module: ModuleName, + Version: "v2", + Type: "sa", + }, models.WithNameTemplate("{{.Index}}")) +) + +// SPDKey returns the key used in NB DB to store the configuration of the +// given security policy database configuration. +func SPDKey(index string) string { + return models.Key(&SecurityPolicyDatabase{ + Index: index, + }) +} + +// SAKey returns the key used in NB DB to store the configuration of the +// given security association configuration. +func SAKey(index string) string { + return models.Key(&SecurityAssociation{ + Index: index, + }) +} + +/* SPD <-> interface binding (derived) */ +const ( + // spdInterfaceKeyTemplate is a template for (derived) key representing binding + // between interface and a security policy database. + spdInterfaceKeyTemplate = "vpp/spd/{spd}/interface/{iface}" +) + +/* SPD <-> policy binding (derived) */ +const ( + // spdPolicyKeyTemplate is a template for (derived) key representing binding + // between policy (security association) and a security policy database. + spdPolicyKeyTemplate = "vpp/spd/{spd}/sa/{sa}" +) + +const ( + // InvalidKeyPart is used in key for parts which are invalid + InvalidKeyPart = "" +) + +/* SPD <-> interface binding (derived) */ + +// SPDInterfaceKey returns the key used to represent binding between the given interface +// and the security policy database. +func SPDInterfaceKey(spdIndex string, ifName string) string { + if spdIndex == "" { + spdIndex = InvalidKeyPart + } + if _, err := strconv.Atoi(spdIndex); err != nil { + spdIndex = InvalidKeyPart + } + if ifName == "" { + ifName = InvalidKeyPart + } + key := strings.Replace(spdInterfaceKeyTemplate, "{spd}", spdIndex, 1) + key = strings.Replace(key, "{iface}", ifName, 1) + return key +} + +// ParseSPDInterfaceKey parses key representing binding between interface and a security +// policy database +func ParseSPDInterfaceKey(key string) (spdIndex string, iface string, isSPDIfaceKey bool) { + keyComps := strings.Split(key, "/") + if len(keyComps) >= 5 && keyComps[0] == "vpp" && keyComps[1] == "spd" && keyComps[3] == "interface" { + iface = strings.Join(keyComps[4:], "/") + return keyComps[2], iface, true + } + return "", "", false +} + +/* SPD <-> policy binding (derived) */ + +// SPDPolicyKey returns the key used to represent binding between the given policy +// (security association) and the security policy database. +func SPDPolicyKey(spdIndex string, saIndex string) string { + if spdIndex == "" { + spdIndex = InvalidKeyPart + } + if _, err := strconv.Atoi(spdIndex); err != nil { + spdIndex = InvalidKeyPart + } + if saIndex == "" { + saIndex = InvalidKeyPart + } + if _, err := strconv.Atoi(saIndex); err != nil { + saIndex = InvalidKeyPart + } + key := strings.Replace(spdPolicyKeyTemplate, "{spd}", spdIndex, 1) + key = strings.Replace(key, "{sa}", saIndex, 1) + return key +} + +// ParseSPDPolicyKey parses key representing binding between policy (security +// association) and a security policy database +func ParseSPDPolicyKey(key string) (spdIndex string, saIndex string, isSPDIfaceKey bool) { + keyComps := strings.Split(key, "/") + if len(keyComps) >= 5 && keyComps[0] == "vpp" && keyComps[1] == "spd" && keyComps[3] == "sa" { + saIndex = strings.Join(keyComps[4:], "/") + return keyComps[2], saIndex, true + } + return "", "", false +} diff --git a/api/models/vpp/ipsec/keys_test.go b/api/models/vpp/ipsec/keys_test.go new file mode 100644 index 0000000000..cdd9c7d532 --- /dev/null +++ b/api/models/vpp/ipsec/keys_test.go @@ -0,0 +1,398 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_ipsec_test + +import ( + "testing" + + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" +) + +/*func TestIPSecSPDKey(t *testing.T) { + tests := []struct { + name string + spdIndex string + expectedKey string + }{ + { + name: "valid SPD index", + spdIndex: "1", + expectedKey: "vpp/config/v2/ipsec/spd/1", + }, + { + name: "empty SPD index", + spdIndex: "", + expectedKey: "vpp/config/v2/ipsec/spd/", + }, + { + name: "invalid SPD index", + spdIndex: "spd1", + expectedKey: "vpp/config/v2/ipsec/spd/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ipsec.SPDKey(test.spdIndex) + if key != test.expectedKey { + t.Errorf("failed for: spdName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.name, test.expectedKey, key) + } + }) + } +} + +func TestParseIPSecSPDNameFromKey(t *testing.T) { + tests := []struct { + name string + key string + expectedSPDIndex string + expectedIsSPDKey bool + }{ + { + name: "valid SPD index", + key: "vpp/config/v2/ipsec/spd/1", + expectedSPDIndex: "1", + expectedIsSPDKey: true, + }, + { + name: "empty SPD index", + key: "vpp/config/v2/ipsec/spd/", + expectedSPDIndex: "", + expectedIsSPDKey: true, + }, + { + name: "invalid SPD index", + key: "vpp/config/v2/ipsec/spd/spd1", + expectedSPDIndex: "", + expectedIsSPDKey: true, + }, + { + name: "not SPD key", + key: "vpp/config/v2/ipsec/sa/spd1", + expectedSPDIndex: "", + expectedIsSPDKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + spdName, isSPDKey := models.Model(&ipsec.SecurityPolicyDatabase{}).ParseKey(test.key) + if isSPDKey != test.expectedIsSPDKey { + t.Errorf("expected isSPDKey: %v\tgot: %v", test.expectedIsSPDKey, isSPDKey) + } + if spdName != test.expectedSPDIndex { + t.Errorf("expected spdName: %s\tgot: %s", test.expectedSPDIndex, spdName) + } + }) + } +}*/ + +func TestSPDInterfaceKey(t *testing.T) { + tests := []struct { + name string + spdIndex string + ifName string + expectedKey string + }{ + { + name: "valid SPD index & iface name", + spdIndex: "1", + ifName: "if1", + expectedKey: "vpp/spd/1/interface/if1", + }, + { + name: "empty SPD & valid interface", + spdIndex: "", + ifName: "if1", + expectedKey: "vpp/spd//interface/if1", + }, + { + name: "invalid SPD but valid interface", + spdIndex: "spd1", + ifName: "if1", + expectedKey: "vpp/spd//interface/if1", + }, + { + name: "valid SPD but invalid interface", + spdIndex: "1", + ifName: "", + expectedKey: "vpp/spd/1/interface/", + }, + { + name: "invalid parameters", + spdIndex: "", + ifName: "", + expectedKey: "vpp/spd//interface/", + }, + { + name: "Gbe interface", + spdIndex: "1", + ifName: "GigabitEthernet0/a/0", + expectedKey: "vpp/spd/1/interface/GigabitEthernet0/a/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ipsec.SPDInterfaceKey(test.spdIndex, test.ifName) + if key != test.expectedKey { + t.Errorf("failed for: spdIdx=%s idName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.spdIndex, test.ifName, test.expectedKey, key) + } + }) + } +} + +func TestParseSPDInterfaceKey(t *testing.T) { + tests := []struct { + name string + key string + expectedSPDIndex string + expectedIfName string + expectedIsSAIfaceKey bool + }{ + { + name: "valid SPD & iface name", + key: "vpp/spd/1/interface/if1", + expectedSPDIndex: "1", + expectedIfName: "if1", + expectedIsSAIfaceKey: true, + }, + { + name: "invalid SPD but valid interface", + key: "vpp/spd//interface/if1", + expectedSPDIndex: "", + expectedIfName: "if1", + expectedIsSAIfaceKey: true, + }, + { + name: "valid SPD but invalid interface", + key: "vpp/spd/1/interface/", + expectedSPDIndex: "1", + expectedIfName: "", + expectedIsSAIfaceKey: true, + }, + { + name: "Gbe interface", + key: "vpp/spd/1/interface/GigabitEthernet0/8/0", + expectedSPDIndex: "1", + expectedIfName: "GigabitEthernet0/8/0", + expectedIsSAIfaceKey: true, + }, + { + name: "not SPD-interface key", + key: "vpp/config/v2/ipsec/spd/1", + expectedSPDIndex: "", + expectedIfName: "", + expectedIsSAIfaceKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + spdIdx, ifName, isSPDIfaceKey := ipsec.ParseSPDInterfaceKey(test.key) + if isSPDIfaceKey != test.expectedIsSAIfaceKey { + t.Errorf("expected isSPDIfaceKey: %v\tgot: %v", test.expectedIsSAIfaceKey, isSPDIfaceKey) + } + if spdIdx != test.expectedSPDIndex { + t.Errorf("expected spdIdx: %s\tgot: %s", test.expectedSPDIndex, spdIdx) + } + if ifName != test.expectedIfName { + t.Errorf("expected ifName: %s\tgot: %s", test.expectedIfName, ifName) + } + }) + } +} + +func TestSPDPolicyKey(t *testing.T) { + tests := []struct { + name string + spdIndex string + saIndex string + expectedKey string + }{ + { + name: "valid SPD & SA index", + spdIndex: "1", + saIndex: "2", + expectedKey: "vpp/spd/1/sa/2", + }, + { + name: "empty SPD & valid SA", + spdIndex: "", + saIndex: "2", + expectedKey: "vpp/spd//sa/2", + }, + { + name: "invalid SPD and empty SA", + spdIndex: "spd1", + saIndex: "", + expectedKey: "vpp/spd//sa/", + }, + { + name: "valid SPD but invalid SA", + spdIndex: "1", + saIndex: "sa2", + expectedKey: "vpp/spd/1/sa/", + }, + { + name: "invalid parameters", + spdIndex: "", + saIndex: "", + expectedKey: "vpp/spd//sa/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ipsec.SPDPolicyKey(test.spdIndex, test.saIndex) + if key != test.expectedKey { + t.Errorf("failed for: spdIdx=%s saIdx=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.spdIndex, test.saIndex, test.expectedKey, key) + } + }) + } +} + +func TestParseSPDPolicyKey(t *testing.T) { + tests := []struct { + name string + key string + expectedSPDIndex string + expectedSAIndex string + expectedIsSPDPolicyKey bool + }{ + { + name: "valid SPD & SA index", + key: "vpp/spd/1/interface/2", + expectedSPDIndex: "1", + expectedSAIndex: "2", + expectedIsSPDPolicyKey: true, + }, + { + name: "invalid SPD but valid SA", + key: "vpp/spd//interface/2", + expectedSPDIndex: "", + expectedSAIndex: "2", + expectedIsSPDPolicyKey: true, + }, + { + name: "valid SPD but invalid SA", + key: "vpp/spd/1/interface/", + expectedSPDIndex: "1", + expectedSAIndex: "", + expectedIsSPDPolicyKey: true, + }, + { + name: "not SPD-policy key", + key: "vpp/config/v2/ipsec/sa/1", + expectedSPDIndex: "", + expectedSAIndex: "", + expectedIsSPDPolicyKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + spdIdx, saIdx, isSPDPolicyKey := ipsec.ParseSPDInterfaceKey(test.key) + if isSPDPolicyKey != test.expectedIsSPDPolicyKey { + t.Errorf("expected isSPDIfaceKey: %v\tgot: %v", test.expectedIsSPDPolicyKey, isSPDPolicyKey) + } + if spdIdx != test.expectedSPDIndex { + t.Errorf("expected spdIdx: %s\tgot: %s", test.expectedSPDIndex, spdIdx) + } + if saIdx != test.expectedSAIndex { + t.Errorf("expected saIdx: %s\tgot: %s", test.expectedSAIndex, saIdx) + } + }) + } +} + +/*func TestIPSecSAKey(t *testing.T) { + tests := []struct { + name string + saIndex string + expectedKey string + }{ + { + name: "valid SA index", + saIndex: "1", + expectedKey: "vpp/config/v2/ipsec/sa/1", + }, + { + name: "empty SA index", + saIndex: "", + expectedKey: "vpp/config/v2/ipsec/sa/", + }, + { + name: "invalid SA index", + saIndex: "sa1", + expectedKey: "vpp/config/v2/ipsec/sa/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ipsec.SAKey(test.saIndex) + if key != test.expectedKey { + t.Errorf("failed for: saName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.name, test.expectedKey, key) + } + }) + } +} + +func TestParseIPSecSANameFromKey(t *testing.T) { + tests := []struct { + name string + key string + expectedSAIndex string + expectedIsSAKey bool + }{ + { + name: "valid SA index", + key: "vpp/config/v2/ipsec/sa/1", + expectedSAIndex: "1", + expectedIsSAKey: true, + }, + { + name: "empty SA index", + key: "vpp/config/v2/ipsec/sa/", + expectedSAIndex: "", + expectedIsSAKey: true, + }, + { + name: "invalid SPD index", + key: "vpp/config/v2/ipsec/sa/sa1", + expectedSAIndex: "", + expectedIsSAKey: true, + }, + { + name: "not SA key", + key: "vpp/config/v2/ipsec/tunnel/sa1", + expectedSAIndex: "", + expectedIsSAKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + saName, isSAKey := models.Model(&ipsec.SecurityAssociation{}).ParseKey(test.key) + if isSAKey != test.expectedIsSAKey { + t.Errorf("expected isSAKey: %v\tgot: %v", test.expectedIsSAKey, isSAKey) + } + if saName != test.expectedSAIndex { + t.Errorf("expected saName: %s\tgot: %s", test.expectedSAIndex, saName) + } + }) + } +}*/ diff --git a/api/models/vpp/l2/bridge-domain.pb.go b/api/models/vpp/l2/bridge-domain.pb.go new file mode 100644 index 0000000000..5a0894bff4 --- /dev/null +++ b/api/models/vpp/l2/bridge-domain.pb.go @@ -0,0 +1,276 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l2/bridge-domain.proto + +package vpp_l2 // import "github.com/ligato/vpp-agent/api/models/vpp/l2" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type BridgeDomain struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Flood bool `protobuf:"varint,2,opt,name=flood,proto3" json:"flood,omitempty"` + UnknownUnicastFlood bool `protobuf:"varint,3,opt,name=unknown_unicast_flood,json=unknownUnicastFlood,proto3" json:"unknown_unicast_flood,omitempty"` + Forward bool `protobuf:"varint,4,opt,name=forward,proto3" json:"forward,omitempty"` + Learn bool `protobuf:"varint,5,opt,name=learn,proto3" json:"learn,omitempty"` + ArpTermination bool `protobuf:"varint,6,opt,name=arp_termination,json=arpTermination,proto3" json:"arp_termination,omitempty"` + MacAge uint32 `protobuf:"varint,7,opt,name=mac_age,json=macAge,proto3" json:"mac_age,omitempty"` + Interfaces []*BridgeDomain_Interface `protobuf:"bytes,100,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + ArpTerminationTable []*BridgeDomain_ArpTerminationEntry `protobuf:"bytes,102,rep,name=arp_termination_table,json=arpTerminationTable,proto3" json:"arp_termination_table,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BridgeDomain) Reset() { *m = BridgeDomain{} } +func (m *BridgeDomain) String() string { return proto.CompactTextString(m) } +func (*BridgeDomain) ProtoMessage() {} +func (*BridgeDomain) Descriptor() ([]byte, []int) { + return fileDescriptor_bridge_domain_7168cf77536c0bfe, []int{0} +} +func (m *BridgeDomain) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BridgeDomain.Unmarshal(m, b) +} +func (m *BridgeDomain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BridgeDomain.Marshal(b, m, deterministic) +} +func (dst *BridgeDomain) XXX_Merge(src proto.Message) { + xxx_messageInfo_BridgeDomain.Merge(dst, src) +} +func (m *BridgeDomain) XXX_Size() int { + return xxx_messageInfo_BridgeDomain.Size(m) +} +func (m *BridgeDomain) XXX_DiscardUnknown() { + xxx_messageInfo_BridgeDomain.DiscardUnknown(m) +} + +var xxx_messageInfo_BridgeDomain proto.InternalMessageInfo + +func (m *BridgeDomain) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BridgeDomain) GetFlood() bool { + if m != nil { + return m.Flood + } + return false +} + +func (m *BridgeDomain) GetUnknownUnicastFlood() bool { + if m != nil { + return m.UnknownUnicastFlood + } + return false +} + +func (m *BridgeDomain) GetForward() bool { + if m != nil { + return m.Forward + } + return false +} + +func (m *BridgeDomain) GetLearn() bool { + if m != nil { + return m.Learn + } + return false +} + +func (m *BridgeDomain) GetArpTermination() bool { + if m != nil { + return m.ArpTermination + } + return false +} + +func (m *BridgeDomain) GetMacAge() uint32 { + if m != nil { + return m.MacAge + } + return 0 +} + +func (m *BridgeDomain) GetInterfaces() []*BridgeDomain_Interface { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *BridgeDomain) GetArpTerminationTable() []*BridgeDomain_ArpTerminationEntry { + if m != nil { + return m.ArpTerminationTable + } + return nil +} + +func (*BridgeDomain) XXX_MessageName() string { + return "vpp.l2.BridgeDomain" +} + +type BridgeDomain_Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + BridgedVirtualInterface bool `protobuf:"varint,2,opt,name=bridged_virtual_interface,json=bridgedVirtualInterface,proto3" json:"bridged_virtual_interface,omitempty"` + SplitHorizonGroup uint32 `protobuf:"varint,3,opt,name=split_horizon_group,json=splitHorizonGroup,proto3" json:"split_horizon_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BridgeDomain_Interface) Reset() { *m = BridgeDomain_Interface{} } +func (m *BridgeDomain_Interface) String() string { return proto.CompactTextString(m) } +func (*BridgeDomain_Interface) ProtoMessage() {} +func (*BridgeDomain_Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_bridge_domain_7168cf77536c0bfe, []int{0, 0} +} +func (m *BridgeDomain_Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BridgeDomain_Interface.Unmarshal(m, b) +} +func (m *BridgeDomain_Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BridgeDomain_Interface.Marshal(b, m, deterministic) +} +func (dst *BridgeDomain_Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_BridgeDomain_Interface.Merge(dst, src) +} +func (m *BridgeDomain_Interface) XXX_Size() int { + return xxx_messageInfo_BridgeDomain_Interface.Size(m) +} +func (m *BridgeDomain_Interface) XXX_DiscardUnknown() { + xxx_messageInfo_BridgeDomain_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_BridgeDomain_Interface proto.InternalMessageInfo + +func (m *BridgeDomain_Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BridgeDomain_Interface) GetBridgedVirtualInterface() bool { + if m != nil { + return m.BridgedVirtualInterface + } + return false +} + +func (m *BridgeDomain_Interface) GetSplitHorizonGroup() uint32 { + if m != nil { + return m.SplitHorizonGroup + } + return 0 +} + +func (*BridgeDomain_Interface) XXX_MessageName() string { + return "vpp.l2.BridgeDomain.Interface" +} + +type BridgeDomain_ArpTerminationEntry struct { + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + PhysAddress string `protobuf:"bytes,2,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BridgeDomain_ArpTerminationEntry) Reset() { *m = BridgeDomain_ArpTerminationEntry{} } +func (m *BridgeDomain_ArpTerminationEntry) String() string { return proto.CompactTextString(m) } +func (*BridgeDomain_ArpTerminationEntry) ProtoMessage() {} +func (*BridgeDomain_ArpTerminationEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_bridge_domain_7168cf77536c0bfe, []int{0, 1} +} +func (m *BridgeDomain_ArpTerminationEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BridgeDomain_ArpTerminationEntry.Unmarshal(m, b) +} +func (m *BridgeDomain_ArpTerminationEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BridgeDomain_ArpTerminationEntry.Marshal(b, m, deterministic) +} +func (dst *BridgeDomain_ArpTerminationEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BridgeDomain_ArpTerminationEntry.Merge(dst, src) +} +func (m *BridgeDomain_ArpTerminationEntry) XXX_Size() int { + return xxx_messageInfo_BridgeDomain_ArpTerminationEntry.Size(m) +} +func (m *BridgeDomain_ArpTerminationEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BridgeDomain_ArpTerminationEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BridgeDomain_ArpTerminationEntry proto.InternalMessageInfo + +func (m *BridgeDomain_ArpTerminationEntry) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *BridgeDomain_ArpTerminationEntry) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (*BridgeDomain_ArpTerminationEntry) XXX_MessageName() string { + return "vpp.l2.BridgeDomain.ArpTerminationEntry" +} +func init() { + proto.RegisterType((*BridgeDomain)(nil), "vpp.l2.BridgeDomain") + proto.RegisterType((*BridgeDomain_Interface)(nil), "vpp.l2.BridgeDomain.Interface") + proto.RegisterType((*BridgeDomain_ArpTerminationEntry)(nil), "vpp.l2.BridgeDomain.ArpTerminationEntry") +} + +func init() { + proto.RegisterFile("models/vpp/l2/bridge-domain.proto", fileDescriptor_bridge_domain_7168cf77536c0bfe) +} + +var fileDescriptor_bridge_domain_7168cf77536c0bfe = []byte{ + // 455 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x55, 0xb6, 0xae, 0xa5, 0xde, 0x06, 0xc2, 0x65, 0x5a, 0xa8, 0xc4, 0xd4, 0xf1, 0x42, 0x5e, + 0x9a, 0x48, 0x81, 0xa7, 0x21, 0x21, 0x75, 0xe2, 0xf3, 0x35, 0x1a, 0x20, 0x21, 0x24, 0xeb, 0x26, + 0x71, 0x5c, 0x0b, 0xc7, 0xb6, 0x1c, 0xa7, 0xd3, 0xf8, 0x0b, 0xfc, 0x31, 0xfe, 0xc7, 0xfe, 0x08, + 0x8a, 0x9d, 0xf5, 0x03, 0xf5, 0x2d, 0xe7, 0x9e, 0x73, 0xcf, 0x3d, 0xf1, 0xbd, 0xe8, 0xb2, 0x56, + 0x25, 0x15, 0x4d, 0xb2, 0xd2, 0x3a, 0x11, 0x69, 0x92, 0x1b, 0x5e, 0x32, 0x3a, 0x2f, 0x55, 0x0d, + 0x5c, 0xc6, 0xda, 0x28, 0xab, 0xf0, 0x70, 0xa5, 0x75, 0x2c, 0xd2, 0xe9, 0x9c, 0x71, 0xbb, 0x6c, + 0xf3, 0xb8, 0x50, 0x75, 0xc2, 0x14, 0x53, 0x89, 0xa3, 0xf3, 0xb6, 0x72, 0xc8, 0x01, 0xf7, 0xe5, + 0xdb, 0x5e, 0xde, 0x0f, 0xd0, 0xc9, 0xb5, 0xb3, 0x7b, 0xef, 0xdc, 0x30, 0x46, 0x03, 0x09, 0x35, + 0x0d, 0x83, 0x59, 0x10, 0x8d, 0x33, 0xf7, 0x8d, 0x9f, 0xa1, 0xa3, 0x4a, 0x28, 0x55, 0x86, 0x07, + 0xb3, 0x20, 0x7a, 0x94, 0x79, 0x80, 0x53, 0x74, 0xd6, 0xca, 0x5f, 0x52, 0xdd, 0x4a, 0xd2, 0x4a, + 0x5e, 0x40, 0x63, 0x89, 0x57, 0x1d, 0x3a, 0xd5, 0xa4, 0x27, 0xbf, 0x7a, 0xee, 0xa3, 0xeb, 0x09, + 0xd1, 0xa8, 0x52, 0xe6, 0x16, 0x4c, 0x19, 0x0e, 0x9c, 0xea, 0x01, 0x76, 0x33, 0x04, 0x05, 0x23, + 0xc3, 0x23, 0x3f, 0xc3, 0x01, 0xfc, 0x0a, 0x3d, 0x01, 0xa3, 0x89, 0xa5, 0xa6, 0xe6, 0x12, 0x2c, + 0x57, 0x32, 0x1c, 0x3a, 0xfe, 0x31, 0x18, 0x7d, 0xb3, 0xa9, 0xe2, 0x73, 0x34, 0xaa, 0xa1, 0x20, + 0xc0, 0x68, 0x38, 0x9a, 0x05, 0xd1, 0x69, 0x36, 0xac, 0xa1, 0x58, 0x30, 0x8a, 0xdf, 0x21, 0xc4, + 0xa5, 0xa5, 0xa6, 0x82, 0x82, 0x36, 0x61, 0x39, 0x3b, 0x8c, 0x8e, 0xd3, 0x8b, 0xd8, 0x3f, 0x56, + 0xbc, 0xfd, 0xe7, 0xf1, 0x97, 0x07, 0x59, 0xb6, 0xd5, 0x81, 0x7f, 0xa2, 0xb3, 0xff, 0x12, 0x10, + 0x0b, 0xb9, 0xa0, 0x61, 0xe5, 0xac, 0xa2, 0xbd, 0x56, 0x8b, 0x9d, 0x70, 0x1f, 0xa4, 0x35, 0x77, + 0xd9, 0x64, 0x37, 0xf1, 0x4d, 0x67, 0x32, 0xfd, 0x13, 0xa0, 0xf1, 0x7a, 0xee, 0xde, 0xb7, 0xbf, + 0x42, 0xcf, 0xfd, 0xba, 0x4b, 0xb2, 0xe2, 0xc6, 0xb6, 0x20, 0xc8, 0x3a, 0x5d, 0xbf, 0x8f, 0xf3, + 0x5e, 0xf0, 0xcd, 0xf3, 0x1b, 0xbf, 0x18, 0x4d, 0x1a, 0x2d, 0xb8, 0x25, 0x4b, 0x65, 0xf8, 0x6f, + 0x25, 0x09, 0x33, 0xaa, 0xd5, 0x6e, 0x3f, 0xa7, 0xd9, 0x53, 0x47, 0x7d, 0xf6, 0xcc, 0xa7, 0x8e, + 0x98, 0x7e, 0x47, 0x93, 0x3d, 0xc9, 0xf1, 0x0b, 0x84, 0xb8, 0x26, 0x50, 0x96, 0x86, 0x36, 0x4d, + 0x1f, 0x6e, 0xcc, 0xf5, 0xc2, 0x17, 0xf0, 0x25, 0x3a, 0xd1, 0xcb, 0xbb, 0x66, 0x2d, 0x38, 0x70, + 0x82, 0xe3, 0xae, 0xd6, 0x4b, 0xae, 0xaf, 0xfe, 0xde, 0x5f, 0x04, 0x3f, 0xde, 0x6c, 0x9d, 0xa6, + 0xe0, 0x0c, 0xac, 0xea, 0x0e, 0x7a, 0x0e, 0x8c, 0x4a, 0x9b, 0x80, 0xe6, 0xc9, 0xce, 0x95, 0xbf, + 0x5d, 0x69, 0x4d, 0x44, 0x9a, 0x0f, 0xdd, 0xa1, 0xbe, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0xc5, + 0xb4, 0x9e, 0x01, 0x04, 0x03, 0x00, 0x00, +} diff --git a/api/models/vpp/l2/bridge-domain.proto b/api/models/vpp/l2/bridge-domain.proto new file mode 100644 index 0000000000..7d9e0a57e8 --- /dev/null +++ b/api/models/vpp/l2/bridge-domain.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package vpp.l2; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l2;vpp_l2"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message BridgeDomain { + string name = 1; /* bridge domain name (can be any string) */ + bool flood = 2; /* enable/disable broadcast/multicast flooding in the BD */ + bool unknown_unicast_flood = 3; /* enable/disable unknown unicast flood in the BD */ + bool forward = 4; /* enable/disable forwarding on all interfaces in the BD */ + bool learn = 5; /* enable/disable learning on all interfaces in the BD */ + bool arp_termination = 6; /* enable/disable ARP termination in the BD */ + uint32 mac_age = 7; /* MAC aging time in min, 0 for disabled aging */ + + message Interface { + string name = 1; /* interface name belonging to this bridge domain */ + bool bridged_virtual_interface = 2; /* true if this is a BVI interface */ + uint32 split_horizon_group = 3; /* VXLANs in the same BD need the same non-zero SHG */ + } + repeated Interface interfaces = 100; /* list of interfaces */ + + message ArpTerminationEntry { + string ip_address = 1; /* IP address */ + string phys_address = 2; /* MAC address matching to the IP */ + } + repeated ArpTerminationEntry arp_termination_table = 102; /* list of ARP termination entries */ +} diff --git a/api/models/vpp/l2/fib.pb.go b/api/models/vpp/l2/fib.pb.go new file mode 100644 index 0000000000..2fe1f713f5 --- /dev/null +++ b/api/models/vpp/l2/fib.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l2/fib.proto + +package vpp_l2 // import "github.com/ligato/vpp-agent/api/models/vpp/l2" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FIBEntry_Action int32 + +const ( + FIBEntry_FORWARD FIBEntry_Action = 0 + FIBEntry_DROP FIBEntry_Action = 1 +) + +var FIBEntry_Action_name = map[int32]string{ + 0: "FORWARD", + 1: "DROP", +} +var FIBEntry_Action_value = map[string]int32{ + "FORWARD": 0, + "DROP": 1, +} + +func (x FIBEntry_Action) String() string { + return proto.EnumName(FIBEntry_Action_name, int32(x)) +} +func (FIBEntry_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_fib_aa3dfca3809353aa, []int{0, 0} +} + +type FIBEntry struct { + PhysAddress string `protobuf:"bytes,1,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + BridgeDomain string `protobuf:"bytes,2,opt,name=bridge_domain,json=bridgeDomain,proto3" json:"bridge_domain,omitempty"` + Action FIBEntry_Action `protobuf:"varint,3,opt,name=action,proto3,enum=vpp.l2.FIBEntry_Action" json:"action,omitempty"` + OutgoingInterface string `protobuf:"bytes,4,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + StaticConfig bool `protobuf:"varint,5,opt,name=static_config,json=staticConfig,proto3" json:"static_config,omitempty"` + BridgedVirtualInterface bool `protobuf:"varint,6,opt,name=bridged_virtual_interface,json=bridgedVirtualInterface,proto3" json:"bridged_virtual_interface,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FIBEntry) Reset() { *m = FIBEntry{} } +func (m *FIBEntry) String() string { return proto.CompactTextString(m) } +func (*FIBEntry) ProtoMessage() {} +func (*FIBEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_fib_aa3dfca3809353aa, []int{0} +} +func (m *FIBEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FIBEntry.Unmarshal(m, b) +} +func (m *FIBEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FIBEntry.Marshal(b, m, deterministic) +} +func (dst *FIBEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_FIBEntry.Merge(dst, src) +} +func (m *FIBEntry) XXX_Size() int { + return xxx_messageInfo_FIBEntry.Size(m) +} +func (m *FIBEntry) XXX_DiscardUnknown() { + xxx_messageInfo_FIBEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_FIBEntry proto.InternalMessageInfo + +func (m *FIBEntry) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (m *FIBEntry) GetBridgeDomain() string { + if m != nil { + return m.BridgeDomain + } + return "" +} + +func (m *FIBEntry) GetAction() FIBEntry_Action { + if m != nil { + return m.Action + } + return FIBEntry_FORWARD +} + +func (m *FIBEntry) GetOutgoingInterface() string { + if m != nil { + return m.OutgoingInterface + } + return "" +} + +func (m *FIBEntry) GetStaticConfig() bool { + if m != nil { + return m.StaticConfig + } + return false +} + +func (m *FIBEntry) GetBridgedVirtualInterface() bool { + if m != nil { + return m.BridgedVirtualInterface + } + return false +} + +func (*FIBEntry) XXX_MessageName() string { + return "vpp.l2.FIBEntry" +} +func init() { + proto.RegisterType((*FIBEntry)(nil), "vpp.l2.FIBEntry") + proto.RegisterEnum("vpp.l2.FIBEntry_Action", FIBEntry_Action_name, FIBEntry_Action_value) +} + +func init() { proto.RegisterFile("models/vpp/l2/fib.proto", fileDescriptor_fib_aa3dfca3809353aa) } + +var fileDescriptor_fib_aa3dfca3809353aa = []byte{ + // 334 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xcf, 0x4e, 0x32, 0x31, + 0x14, 0xc5, 0xbf, 0xe1, 0xc3, 0x11, 0x0b, 0x1a, 0xec, 0x86, 0xd1, 0x85, 0x22, 0x6e, 0xd8, 0x30, + 0x4d, 0x46, 0x57, 0xb8, 0x02, 0x91, 0x84, 0x15, 0x66, 0x16, 0x9a, 0xb8, 0x99, 0x74, 0xfe, 0x95, + 0x26, 0x43, 0x6f, 0xd3, 0xe9, 0x4c, 0xc2, 0x7b, 0xf9, 0x10, 0xbe, 0x87, 0x2f, 0x62, 0x68, 0x19, + 0x83, 0xbb, 0xf6, 0x77, 0xce, 0xbd, 0xf7, 0xe4, 0x5e, 0x34, 0xd8, 0x42, 0x9a, 0x15, 0x25, 0xa9, + 0xa5, 0x24, 0x45, 0x40, 0x72, 0x1e, 0xfb, 0x52, 0x81, 0x06, 0xec, 0xd6, 0x52, 0xfa, 0x45, 0x70, + 0x3d, 0x61, 0x5c, 0x6f, 0xaa, 0xd8, 0x4f, 0x60, 0x4b, 0x18, 0x30, 0x20, 0x46, 0x8e, 0xab, 0xdc, + 0xfc, 0xcc, 0xc7, 0xbc, 0x6c, 0xd9, 0xe8, 0xb3, 0x85, 0x3a, 0xcb, 0xd5, 0xfc, 0x45, 0x68, 0xb5, + 0xc3, 0x77, 0xa8, 0x27, 0x37, 0xbb, 0x32, 0xa2, 0x69, 0xaa, 0xb2, 0xb2, 0xf4, 0x9c, 0xa1, 0x33, + 0x3e, 0x0b, 0xbb, 0x7b, 0x36, 0xb3, 0x08, 0xdf, 0xa3, 0xf3, 0x58, 0xf1, 0x94, 0x65, 0x51, 0x0a, + 0x5b, 0xca, 0x85, 0xd7, 0x32, 0x9e, 0x9e, 0x85, 0x0b, 0xc3, 0x30, 0x41, 0x2e, 0x4d, 0x34, 0x07, + 0xe1, 0xfd, 0x1f, 0x3a, 0xe3, 0x8b, 0x60, 0xe0, 0xdb, 0x70, 0x7e, 0x33, 0xc9, 0x9f, 0x19, 0x39, + 0x3c, 0xd8, 0xf0, 0x04, 0x61, 0xa8, 0x34, 0x03, 0x2e, 0x58, 0xc4, 0x85, 0xce, 0x54, 0x4e, 0x93, + 0xcc, 0x6b, 0x9b, 0xd6, 0x97, 0x8d, 0xb2, 0x6a, 0x84, 0x7d, 0x88, 0x52, 0x53, 0xcd, 0x93, 0x28, + 0x01, 0x91, 0x73, 0xe6, 0x9d, 0x0c, 0x9d, 0x71, 0x27, 0xec, 0x59, 0xf8, 0x6c, 0x18, 0x9e, 0xa2, + 0x2b, 0x1b, 0x2a, 0x8d, 0x6a, 0xae, 0x74, 0x45, 0x8b, 0xa3, 0xd6, 0xae, 0x29, 0x18, 0x1c, 0x0c, + 0x6f, 0x56, 0xff, 0x1d, 0x30, 0xba, 0x45, 0xae, 0x4d, 0x88, 0xbb, 0xe8, 0x74, 0xb9, 0x0e, 0xdf, + 0x67, 0xe1, 0xa2, 0xff, 0x0f, 0x77, 0x50, 0x7b, 0x11, 0xae, 0x5f, 0xfb, 0xce, 0x7c, 0xfa, 0xf5, + 0x7d, 0xe3, 0x7c, 0x3c, 0x1e, 0xed, 0xba, 0xe0, 0x8c, 0x6a, 0xd8, 0xdf, 0x65, 0x42, 0x59, 0x26, + 0x34, 0xa1, 0x92, 0x93, 0x3f, 0xc7, 0x7a, 0xaa, 0xa5, 0x8c, 0x8a, 0x20, 0x76, 0xcd, 0xe6, 0x1f, + 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x90, 0xdd, 0x2e, 0xcb, 0x01, 0x00, 0x00, +} diff --git a/api/models/vpp/l2/fib.proto b/api/models/vpp/l2/fib.proto new file mode 100644 index 0000000000..f3d470b531 --- /dev/null +++ b/api/models/vpp/l2/fib.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package vpp.l2; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l2;vpp_l2"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message FIBEntry { + string phys_address = 1; /* unique destination MAC address */ + string bridge_domain = 2; /* name of bridge domain this FIB table entry belongs to */ + enum Action { + FORWARD = 0; /* forward the matching frame */ + DROP = 1; /* drop the matching frame */ + }; + Action action = 3; /* action to tke on matching frames */ + string outgoing_interface = 4; /* outgoing interface for matching frames */ + bool static_config = 5; /* true if this is a statically configured FIB entry */ + bool bridged_virtual_interface = 6; /* the MAC address is a bridge virtual interface MAC */ +} diff --git a/api/models/vpp/l2/keys.go b/api/models/vpp/l2/keys.go new file mode 100644 index 0000000000..c069b9c304 --- /dev/null +++ b/api/models/vpp/l2/keys.go @@ -0,0 +1,108 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_l2 + +import ( + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp.l2" + +var ( + ModelBridgeDomain = models.Register(&BridgeDomain{}, models.Spec{ + Module: ModuleName, + Type: "bridge-domain", + Version: "v2", + }) + + ModelFIBEntry = models.Register(&FIBEntry{}, models.Spec{ + Module: ModuleName, + Type: "fib", + Version: "v2", + }, models.WithNameTemplate("{{.BridgeDomain}}/mac/{{.PhysAddress}}")) + + ModelXConnectPair = models.Register(&XConnectPair{}, models.Spec{ + Module: ModuleName, + Type: "xconnect", + Version: "v2", + }, models.WithNameTemplate("{{.ReceiveInterface}}")) +) + +// BridgeDomainKey returns the key used in NB DB to store the configuration of the +// given bridge domain. +func BridgeDomainKey(bdName string) string { + return models.Key(&BridgeDomain{ + Name: bdName, + }) +} + +// FIBKey returns the key used in NB DB to store the configuration of the +// given L2 FIB entry. +func FIBKey(bdName string, fibMac string) string { + return models.Key(&FIBEntry{ + BridgeDomain: bdName, + PhysAddress: fibMac, + }) +} + +// XConnectKey returns the key used in NB DB to store the configuration of the +// given xConnect (identified by RX interface). +func XConnectKey(rxIface string) string { + return models.Key(&XConnectPair{ + ReceiveInterface: rxIface, + }) +} + +/* BD <-> interface binding (derived) */ +const ( + // bdInterfaceKeyTemplate is a template for (derived) key representing binding + // between interface and a bridge domain. + bdInterfaceKeyTemplate = "vpp/bd/{bd}/interface/{iface}" +) + +const ( + // InvalidKeyPart is used in key for parts which are invalid + InvalidKeyPart = "" +) + +/* BD <-> interface binding (derived) */ + +// BDInterfaceKey returns the key used to represent binding between the given interface +// and the bridge domain. +func BDInterfaceKey(bdName string, iface string) string { + if bdName == "" { + bdName = InvalidKeyPart + } + if iface == "" { + iface = InvalidKeyPart + } + key := strings.Replace(bdInterfaceKeyTemplate, "{bd}", bdName, 1) + key = strings.Replace(key, "{iface}", iface, 1) + return key +} + +// ParseBDInterfaceKey parses key representing binding between interface and a bridge +// domain. +func ParseBDInterfaceKey(key string) (bdName string, iface string, isBDIfaceKey bool) { + keyComps := strings.Split(key, "/") + if len(keyComps) >= 5 && keyComps[0] == "vpp" && keyComps[1] == "bd" && keyComps[3] == "interface" { + iface = strings.Join(keyComps[4:], "/") + return keyComps[2], iface, true + } + return "", "", false +} diff --git a/api/models/vpp/l2/keys_test.go b/api/models/vpp/l2/keys_test.go new file mode 100644 index 0000000000..abd1f02d14 --- /dev/null +++ b/api/models/vpp/l2/keys_test.go @@ -0,0 +1,353 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_l2 + +import ( + "testing" +) + +/*func TestBridgeDomainKey(t *testing.T) { + tests := []struct { + name string + bdName string + expectedKey string + }{ + { + name: "valid BD name", + bdName: "bd1", + expectedKey: "vpp/config/v2/bd/bd1", + }, + { + name: "invalid BD name", + bdName: "", + expectedKey: "vpp/config/v2/bd/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := BridgeDomainKey(test.bdName) + if key != test.expectedKey { + t.Errorf("failed for: bdName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.bdName, test.expectedKey, key) + } + }) + } +} + +func TestParseBDNameFromKey(t *testing.T) { + tests := []struct { + name string + key string + expectedBDName string + expectedIsBDKey bool + }{ + { + name: "valid BD name", + key: "vpp/config/v2/bd/bd1", + expectedBDName: "bd1", + expectedIsBDKey: true, + }, + { + name: "invalid BD name", + key: "vpp/config/v2/bd/", + expectedBDName: "", + expectedIsBDKey: true, + }, + { + name: "not BD key", + key: "vpp/config/v2/bd/bd1/fib/aa:aa:aa:aa:aa:aa", + expectedBDName: "", + expectedIsBDKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + bdName, isBDKey := models.Model(&BridgeDomain{}).ParseKey(test.key) + if isBDKey != test.expectedIsBDKey { + t.Errorf("expected isBDKey: %v\tgot: %v", test.expectedIsBDKey, isBDKey) + } + if bdName != test.expectedBDName { + t.Errorf("expected bdName: %s\tgot: %s", test.expectedBDName, bdName) + } + }) + } +}*/ + +func TestBDInterfaceKey(t *testing.T) { + tests := []struct { + name string + bdName string + iface string + expectedKey string + }{ + { + name: "valid BD & iface names", + bdName: "bd1", + iface: "tap0", + expectedKey: "vpp/bd/bd1/interface/tap0", + }, + { + name: "invalid BD but valid interface", + bdName: "", + iface: "tap1", + expectedKey: "vpp/bd//interface/tap1", + }, + { + name: "invalid BD but valid interface", + bdName: "", + iface: "tap1", + expectedKey: "vpp/bd//interface/tap1", + }, + { + name: "valid BD but invalid interface", + bdName: "bd2", + iface: "", + expectedKey: "vpp/bd/bd2/interface/", + }, + { + name: "invalid parameters", + bdName: "", + iface: "", + expectedKey: "vpp/bd//interface/", + }, + { + name: "Gbe interface", + bdName: "bd5", + iface: "GigabitEthernet0/8/0", + expectedKey: "vpp/bd/bd5/interface/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := BDInterfaceKey(test.bdName, test.iface) + if key != test.expectedKey { + t.Errorf("failed for: bdName=%s iface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.bdName, test.iface, test.expectedKey, key) + } + }) + } +} + +func TestParseBDInterfaceKey(t *testing.T) { + tests := []struct { + name string + key string + expectedBDName string + expectedIface string + expectedIsBDIfaceKey bool + }{ + { + name: "valid BD & iface names", + key: "vpp/bd/bd1/interface/tap0", + expectedBDName: "bd1", + expectedIface: "tap0", + expectedIsBDIfaceKey: true, + }, + { + name: "invalid BD but valid interface", + key: "vpp/bd//interface/tap1", + expectedBDName: "", + expectedIface: "tap1", + expectedIsBDIfaceKey: true, + }, + { + name: "valid BD but invalid interface", + key: "vpp/bd/bd2/interface/", + expectedBDName: "bd2", + expectedIface: "", + expectedIsBDIfaceKey: true, + }, + { + name: "Gbe interface", + key: "vpp/bd/bd4/interface/GigabitEthernet0/8/0", + expectedBDName: "bd4", + expectedIface: "GigabitEthernet0/8/0", + expectedIsBDIfaceKey: true, + }, + { + name: "not BD-interface key", + key: "vpp/config/v2/bd/bd1", + expectedBDName: "", + expectedIface: "", + expectedIsBDIfaceKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + bdName, iface, isBDIfaceKey := ParseBDInterfaceKey(test.key) + if isBDIfaceKey != test.expectedIsBDIfaceKey { + t.Errorf("expected isBDIfaceKey: %v\tgot: %v", test.expectedIsBDIfaceKey, isBDIfaceKey) + } + if bdName != test.expectedBDName { + t.Errorf("expected bdName: %s\tgot: %s", test.expectedBDName, bdName) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + }) + } +} + +/*func TestFIBKey(t *testing.T) { + tests := []struct { + name string + bdName string + fibMac string + expectedKey string + }{ + { + name: "valid parameters", + bdName: "bd1", + fibMac: "12:34:56:78:9a:bc", + expectedKey: "vpp/config/v2/bd/bd1/fib/12:34:56:78:9a:bc", + }, + { + name: "invalid bd", + bdName: "", + fibMac: "aa:aa:aa:bb:bb:bb", + expectedKey: "vpp/config/v2/bd//fib/aa:aa:aa:bb:bb:bb", + }, + { + name: "invalid hw address", + bdName: "bd2", + fibMac: "in:va:li:d", + expectedKey: "vpp/config/v2/bd/bd2/fib/", + }, + { + name: "invalid parameters", + bdName: "", + fibMac: "192.168.1.1", + expectedKey: "vpp/config/v2/bd//fib/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := FIBKey(test.bdName, test.fibMac) + if key != test.expectedKey { + t.Errorf("failed for: bdName=%s fibMac=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.bdName, test.fibMac, test.expectedKey, key) + } + }) + } +} + +func TestParseFIBKey(t *testing.T) { + tests := []struct { + name string + key string + expectedBDName string + expectedfibMac string + expectedIsFIBKey bool + }{ + { + name: "valid FIB key", + key: "vpp/config/v2/bd/bd1/fib/12:34:56:78:9a:bc", + expectedBDName: "bd1", + expectedfibMac: "12:34:56:78:9a:bc", + expectedIsFIBKey: true, + }, + { + name: "invalid bd", + key: "vpp/config/v2/bd//fib/aa:bb:cc:dd:ee:ff", + expectedBDName: "", + expectedfibMac: "aa:bb:cc:dd:ee:ff", + expectedIsFIBKey: true, + }, + { + name: "invalid fib", + key: "vpp/config/v2/bd/bd2/fib/", + expectedBDName: "bd2", + expectedfibMac: "", + expectedIsFIBKey: true, + }, + { + name: "invalid params", + key: "vpp/config/v2/bd//fib/", + expectedBDName: "", + expectedfibMac: "", + expectedIsFIBKey: true, + }, + { + name: "not FIB key", + key: "vpp/bd/bd1/interface/tap0", + expectedBDName: "", + expectedfibMac: "", + expectedIsFIBKey: false, + }, + { + name: "not FIB key", + key: "vpp/config/v2/bd/bd1", + expectedBDName: "", + expectedfibMac: "", + expectedIsFIBKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, isFIBKey := models.Model(&FIBEntry{}).ParseKey(test.key) + nameParts := strings.Split(name, "/") + if len(nameParts) != 3 { + t.Fatalf("invalid name: %q", name) + } + bdName, fibMac := nameParts[0], nameParts[2] + if isFIBKey != test.expectedIsFIBKey { + t.Errorf("expected isFIBKey: %v\tgot: %v", test.expectedIsFIBKey, isFIBKey) + } + if bdName != test.expectedBDName { + t.Errorf("expected bdName: %s\tgot: %s", test.expectedBDName, bdName) + } + if fibMac != test.expectedfibMac { + t.Errorf("expected iface: %s\tgot: %s", test.expectedfibMac, fibMac) + } + }) + } +}*/ + +/*func TestXConnectKey(t *testing.T) { + tests := []struct { + name string + rxIface string + expectedKey string + }{ + { + name: "valid interface", + rxIface: "memif0", + expectedKey: "vpp/config/v2/xconnect/memif0", + }, + { + name: "invalid interface", + rxIface: "", + expectedKey: "vpp/config/v2/xconnect/", + }, + { + name: "gbe", + rxIface: "GigabitEthernet0/8/0", + expectedKey: "vpp/config/v2/xconnect/GigabitEthernet0/8/0", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := XConnectKey(test.rxIface) + if key != test.expectedKey { + t.Errorf("failed for: rxIface=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.rxIface, test.expectedKey, key) + } + }) + } +}*/ diff --git a/api/models/vpp/l2/xconnect.pb.go b/api/models/vpp/l2/xconnect.pb.go new file mode 100644 index 0000000000..d9fd550dcb --- /dev/null +++ b/api/models/vpp/l2/xconnect.pb.go @@ -0,0 +1,94 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l2/xconnect.proto + +package vpp_l2 // import "github.com/ligato/vpp-agent/api/models/vpp/l2" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type XConnectPair struct { + ReceiveInterface string `protobuf:"bytes,1,opt,name=receive_interface,json=receiveInterface,proto3" json:"receive_interface,omitempty"` + TransmitInterface string `protobuf:"bytes,2,opt,name=transmit_interface,json=transmitInterface,proto3" json:"transmit_interface,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *XConnectPair) Reset() { *m = XConnectPair{} } +func (m *XConnectPair) String() string { return proto.CompactTextString(m) } +func (*XConnectPair) ProtoMessage() {} +func (*XConnectPair) Descriptor() ([]byte, []int) { + return fileDescriptor_xconnect_8821f5bfda282a42, []int{0} +} +func (m *XConnectPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_XConnectPair.Unmarshal(m, b) +} +func (m *XConnectPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_XConnectPair.Marshal(b, m, deterministic) +} +func (dst *XConnectPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_XConnectPair.Merge(dst, src) +} +func (m *XConnectPair) XXX_Size() int { + return xxx_messageInfo_XConnectPair.Size(m) +} +func (m *XConnectPair) XXX_DiscardUnknown() { + xxx_messageInfo_XConnectPair.DiscardUnknown(m) +} + +var xxx_messageInfo_XConnectPair proto.InternalMessageInfo + +func (m *XConnectPair) GetReceiveInterface() string { + if m != nil { + return m.ReceiveInterface + } + return "" +} + +func (m *XConnectPair) GetTransmitInterface() string { + if m != nil { + return m.TransmitInterface + } + return "" +} + +func (*XConnectPair) XXX_MessageName() string { + return "vpp.l2.XConnectPair" +} +func init() { + proto.RegisterType((*XConnectPair)(nil), "vpp.l2.XConnectPair") +} + +func init() { + proto.RegisterFile("models/vpp/l2/xconnect.proto", fileDescriptor_xconnect_8821f5bfda282a42) +} + +var fileDescriptor_xconnect_8821f5bfda282a42 = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xc9, 0xcd, 0x4f, 0x49, + 0xcd, 0x29, 0xd6, 0x2f, 0x2b, 0x28, 0xd0, 0xcf, 0x31, 0xd2, 0xaf, 0x48, 0xce, 0xcf, 0xcb, 0x4b, + 0x4d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0x2b, 0x28, 0xd0, 0xcb, 0x31, + 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, + 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0x94, + 0xc5, 0xc5, 0x13, 0xe1, 0x0c, 0x31, 0x28, 0x20, 0x31, 0xb3, 0x48, 0x48, 0x9b, 0x4b, 0xb0, 0x28, + 0x35, 0x39, 0x35, 0xb3, 0x2c, 0x35, 0x3e, 0x33, 0xaf, 0x24, 0xb5, 0x28, 0x2d, 0x31, 0x39, 0x55, + 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x48, 0x00, 0x2a, 0xe1, 0x09, 0x13, 0x17, 0xd2, 0xe5, 0x12, + 0x2a, 0x29, 0x4a, 0xcc, 0x2b, 0xce, 0xcd, 0x2c, 0x41, 0x52, 0xcd, 0x04, 0x56, 0x2d, 0x08, 0x93, + 0x81, 0x2b, 0x77, 0xb2, 0x3a, 0xf1, 0x58, 0x8e, 0x31, 0xca, 0x04, 0xc9, 0x81, 0x39, 0x99, 0xe9, + 0x89, 0x25, 0xf9, 0x20, 0x1f, 0xe9, 0x26, 0xa6, 0xa7, 0xe6, 0x95, 0xe8, 0x27, 0x16, 0x64, 0xea, + 0xa3, 0x78, 0xd3, 0xba, 0xac, 0xa0, 0x20, 0x3e, 0xc7, 0x28, 0x89, 0x0d, 0xec, 0x5c, 0x63, 0x40, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x1e, 0xc5, 0xe4, 0x05, 0x01, 0x00, 0x00, +} diff --git a/api/models/vpp/l2/xconnect.proto b/api/models/vpp/l2/xconnect.proto new file mode 100644 index 0000000000..ea880eff4b --- /dev/null +++ b/api/models/vpp/l2/xconnect.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package vpp.l2; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l2;vpp_l2"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message XConnectPair { + string receive_interface = 1; + string transmit_interface = 2; +} diff --git a/api/models/vpp/l3/arp.pb.go b/api/models/vpp/l3/arp.pb.go new file mode 100644 index 0000000000..8e0c5fc6f1 --- /dev/null +++ b/api/models/vpp/l3/arp.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l3/arp.proto + +package vpp_l3 // import "github.com/ligato/vpp-agent/api/models/vpp/l3" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ARPEntry struct { + Interface string `protobuf:"bytes,1,opt,name=interface,proto3" json:"interface,omitempty"` + IpAddress string `protobuf:"bytes,2,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + PhysAddress string `protobuf:"bytes,3,opt,name=phys_address,json=physAddress,proto3" json:"phys_address,omitempty"` + Static bool `protobuf:"varint,4,opt,name=static,proto3" json:"static,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ARPEntry) Reset() { *m = ARPEntry{} } +func (m *ARPEntry) String() string { return proto.CompactTextString(m) } +func (*ARPEntry) ProtoMessage() {} +func (*ARPEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_arp_c5efab006836d39b, []int{0} +} +func (m *ARPEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ARPEntry.Unmarshal(m, b) +} +func (m *ARPEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ARPEntry.Marshal(b, m, deterministic) +} +func (dst *ARPEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ARPEntry.Merge(dst, src) +} +func (m *ARPEntry) XXX_Size() int { + return xxx_messageInfo_ARPEntry.Size(m) +} +func (m *ARPEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ARPEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ARPEntry proto.InternalMessageInfo + +func (m *ARPEntry) GetInterface() string { + if m != nil { + return m.Interface + } + return "" +} + +func (m *ARPEntry) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *ARPEntry) GetPhysAddress() string { + if m != nil { + return m.PhysAddress + } + return "" +} + +func (m *ARPEntry) GetStatic() bool { + if m != nil { + return m.Static + } + return false +} + +func (*ARPEntry) XXX_MessageName() string { + return "vpp.l3.ARPEntry" +} +func init() { + proto.RegisterType((*ARPEntry)(nil), "vpp.l3.ARPEntry") +} + +func init() { proto.RegisterFile("models/vpp/l3/arp.proto", fileDescriptor_arp_c5efab006836d39b) } + +var fileDescriptor_arp_c5efab006836d39b = []byte{ + // 224 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xcd, 0x4f, 0x49, + 0xcd, 0x29, 0xd6, 0x2f, 0x2b, 0x28, 0xd0, 0xcf, 0x31, 0xd6, 0x4f, 0x2c, 0x2a, 0xd0, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0x2b, 0x28, 0xd0, 0xcb, 0x31, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, + 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, + 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xd4, 0xc4, 0xc8, 0xc5, 0xe1, 0x18, + 0x14, 0xe0, 0x9a, 0x57, 0x52, 0x54, 0x29, 0x24, 0xc3, 0xc5, 0x99, 0x99, 0x57, 0x92, 0x5a, 0x94, + 0x96, 0x98, 0x9c, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x84, 0x10, 0x10, 0x92, 0xe5, 0xe2, + 0xca, 0x2c, 0x88, 0x4f, 0x4c, 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x96, 0x60, 0x82, 0x4a, 0x17, 0x38, + 0x42, 0x04, 0x84, 0x14, 0xb9, 0x78, 0x0a, 0x32, 0x2a, 0x8b, 0xe1, 0x0a, 0x98, 0xc1, 0x0a, 0xb8, + 0x41, 0x62, 0x30, 0x25, 0x62, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0x99, 0xc9, 0x12, 0x2c, 0x0a, + 0x8c, 0x1a, 0x1c, 0x41, 0x50, 0x9e, 0x93, 0xd5, 0x89, 0xc7, 0x72, 0x8c, 0x51, 0x26, 0x48, 0x2e, + 0xcf, 0xc9, 0x4c, 0x4f, 0x2c, 0xc9, 0x07, 0xf9, 0x52, 0x37, 0x31, 0x3d, 0x35, 0xaf, 0x44, 0x3f, + 0xb1, 0x20, 0x53, 0x1f, 0xc5, 0xeb, 0xd6, 0x65, 0x05, 0x05, 0xf1, 0x39, 0xc6, 0x49, 0x6c, 0x60, + 0x7f, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x35, 0x78, 0x04, 0xc0, 0x19, 0x01, 0x00, 0x00, +} diff --git a/api/models/vpp/l3/arp.proto b/api/models/vpp/l3/arp.proto new file mode 100644 index 0000000000..f2e18c5cdd --- /dev/null +++ b/api/models/vpp/l3/arp.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package vpp.l3; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l3;vpp_l3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message ARPEntry { + string interface = 1; + string ip_address = 2; + string phys_address = 3; + bool static = 4; +} diff --git a/api/models/vpp/l3/keys.go b/api/models/vpp/l3/keys.go new file mode 100644 index 0000000000..e3bb19837e --- /dev/null +++ b/api/models/vpp/l3/keys.go @@ -0,0 +1,102 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_l3 + +import ( + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp" + +var ( + ModelARPEntry = models.Register(&ARPEntry{}, models.Spec{ + Module: ModuleName, + Type: "arp", + Version: "v2", + }, models.WithNameTemplate( + "{{.Interface}}/{{.IpAddress}}", + )) + + ModelRoute = models.Register(&Route{}, models.Spec{ + Module: ModuleName, + Type: "route", + Version: "v2", + }, models.WithNameTemplate( + `vrf/{{.VrfId}}/dst/{{with ipnet .DstNetwork}}{{printf "%s/%d" .IP .MaskSize}}{{end}}/gw/{{.NextHopAddr}}`, + )) + + ModelProxyARP = models.Register(&ProxyARP{}, models.Spec{ + Module: ModuleName, + Type: "proxyarp-global", + Version: "v2", + }, models.WithNameTemplate("settings")) + + ModelIPScanNeighbor = models.Register(&IPScanNeighbor{}, models.Spec{ + Module: ModuleName, + Type: "ipscanneigh-global", + Version: "v2", + }, models.WithNameTemplate("settings")) +) + +// ProxyARPKey is key for global proxy arp +func ProxyARPKey() string { + return models.Key(&ProxyARP{}) +} + +// ProxyARPKey is key for global ip scan neighbor +func IPScanNeighborKey() string { + return models.Key(&IPScanNeighbor{}) +} + +// RouteKey returns the key used in ETCD to store vpp route for vpp instance. +func RouteKey(vrf uint32, dstNet string, nextHopAddr string) string { + return models.Key(&Route{ + VrfId: vrf, + DstNetwork: dstNet, + NextHopAddr: nextHopAddr, + }) +} + +// ArpEntryKey returns the key to store ARP entry +func ArpEntryKey(iface, ipAddr string) string { + return models.Key(&ARPEntry{ + Interface: iface, + IpAddress: ipAddr, + }) +} + +const ( + proxyARPInterfacePrefix = "vpp/proxyarp/interface/" + proxyARPInterfaceTemplate = proxyARPInterfacePrefix + "{iface}" +) + +// ProxyARPInterfaceKey returns the key used to represent binding for interface with enabled proxy ARP. +func ProxyARPInterfaceKey(iface string) string { + key := proxyARPInterfaceTemplate + key = strings.Replace(key, "{iface}", iface, 1) + return key +} + +// ParseProxyARPInterfaceKey parses key representing binding for interface with enabled proxy ARP. +func ParseProxyARPInterfaceKey(key string) (iface string, isProxyARPInterfaceKey bool) { + suffix := strings.TrimPrefix(key, proxyARPInterfacePrefix) + if suffix != key && suffix != "" { + return suffix, true + } + return "", false +} diff --git a/api/models/vpp/l3/keys_test.go b/api/models/vpp/l3/keys_test.go new file mode 100644 index 0000000000..bb37367c1e --- /dev/null +++ b/api/models/vpp/l3/keys_test.go @@ -0,0 +1,157 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_l3 + +/*func TestRouteKey(t *testing.T) { + tests := []struct { + name string + vrf uint32 + dstNet string + nextHopAddr string + expectedKey string + }{ + { + name: "route-ipv4", + vrf: 0, + dstNet: "10.10.0.0/24", + nextHopAddr: "", + expectedKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/24/gw/0.0.0.0", + }, + { + name: "dst-network-address", + vrf: 0, + dstNet: "10.10.0.255/24", + nextHopAddr: "", + expectedKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/24/gw/0.0.0.0", + }, + { + name: "zero-next-hop", + vrf: 0, + dstNet: "10.10.0.1/24", + nextHopAddr: "0.0.0.0", + expectedKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/24/gw/0.0.0.0", + }, + { + name: "non-zero-vrf", + vrf: 1, + dstNet: "10.10.0.1/24", + nextHopAddr: "0.0.0.0", + expectedKey: "vpp/config/v2/route/vrf/1/dst/10.10.0.0/24/gw/0.0.0.0", + }, + { + name: "invalid-dst-net-empty-gw", + dstNet: "INVALID", + expectedKey: "vpp/config/v2/route/vrf/0/dst///gw/", + }, + { + name: "invalid-next-hop", + dstNet: "10.10.0.1/24", + nextHopAddr: "INVALID", + expectedKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/24/gw/", + }, + { + name: "invalid-dst-net-valid-gw", + dstNet: "INVALID", + nextHopAddr: "1.2.3.4", + expectedKey: "vpp/config/v2/route/vrf/0/dst///gw/1.2.3.4", + }, + { + name: "route-ipv6", + dstNet: "2001:DB8::0001/32", + nextHopAddr: "", + expectedKey: "vpp/config/v2/route/vrf/0/dst/2001:db8::/32/gw/::", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := RouteKey(test.vrf, test.dstNet, test.nextHopAddr) + if key != test.expectedKey { + t.Errorf("failed for: vrf=%d dstNet=%q nextHop=%q\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.vrf, test.dstNet, test.nextHopAddr, test.expectedKey, key) + } + }) + } +} + +func TestParseRouteKey(t *testing.T) { + tests := []struct { + name string + routeKey string + expectedIsRouteKey bool + expectedVrfIndex string + expectedDstNetAddr string + expectedDstNetMask int + expectedNextHopAddr string + }{ + { + name: "route-ipv4", + routeKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/16/gw/0.0.0.0", + expectedIsRouteKey: true, + expectedVrfIndex: "0", + expectedDstNetAddr: "10.10.0.0", + expectedDstNetMask: 16, + expectedNextHopAddr: "0.0.0.0", + }, + { + name: "route-ipv6", + routeKey: "vpp/config/v2/route/vrf/0/dst/2001:db8::/32/gw/::", + expectedIsRouteKey: true, + expectedVrfIndex: "0", + expectedDstNetAddr: "2001:db8::", + expectedDstNetMask: 32, + expectedNextHopAddr: "::", + }, + { + name: "invalid-key", + routeKey: "vpp/config/v2/route/vrf/0/dst/2001:db8::/32/", + expectedIsRouteKey: false, + }, + { + name: "invalid-key-missing-dst", + routeKey: "vpp/config/v2/route/vrf/0/10.10.0.0/16/gw/0.0.0.0", + expectedIsRouteKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, isRouteKey := models.Model(&Route{}).ParseKey(test.routeKey) + nameParts := strings.Split(name, "/") + if len(nameParts) != 7 { + t.Fatalf("invalid name: %q", name) + } + vrfIndex, dstNetAddr, nextHopAddr := nameParts[1], nameParts[3], nameParts[6] + dstNetMask, err := strconv.Atoi(nameParts[4]) + if err != nil { + t.Fatalf("invalid mask: %v", dstNetMask) + } + if isRouteKey != test.expectedIsRouteKey { + t.Errorf("expected isRouteKey: %v\tgot: %v", test.expectedIsRouteKey, isRouteKey) + } + if vrfIndex != test.expectedVrfIndex { + t.Errorf("expected vrfIndex: %q\tgot: %q", test.expectedVrfIndex, vrfIndex) + } + if dstNetAddr != test.expectedDstNetAddr { + t.Errorf("expected dstNetAddr: %q\tgot: %q", test.expectedDstNetAddr, dstNetAddr) + } + if dstNetMask != test.expectedDstNetMask { + t.Errorf("expected dstNetMask: %v\tgot: %v", test.expectedDstNetMask, dstNetMask) + } + if nextHopAddr != test.expectedNextHopAddr { + t.Errorf("expected nextHopAddr: %q\tgot: %q", test.expectedNextHopAddr, nextHopAddr) + } + }) + } +}*/ diff --git a/api/models/vpp/l3/l3.pb.go b/api/models/vpp/l3/l3.pb.go new file mode 100644 index 0000000000..06924e0289 --- /dev/null +++ b/api/models/vpp/l3/l3.pb.go @@ -0,0 +1,315 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l3/l3.proto + +package vpp_l3 // import "github.com/ligato/vpp-agent/api/models/vpp/l3" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type IPScanNeighbor_Mode int32 + +const ( + IPScanNeighbor_DISABLED IPScanNeighbor_Mode = 0 + IPScanNeighbor_IPv4 IPScanNeighbor_Mode = 1 + IPScanNeighbor_IPv6 IPScanNeighbor_Mode = 2 + IPScanNeighbor_BOTH IPScanNeighbor_Mode = 3 +) + +var IPScanNeighbor_Mode_name = map[int32]string{ + 0: "DISABLED", + 1: "IPv4", + 2: "IPv6", + 3: "BOTH", +} +var IPScanNeighbor_Mode_value = map[string]int32{ + "DISABLED": 0, + "IPv4": 1, + "IPv6": 2, + "BOTH": 3, +} + +func (x IPScanNeighbor_Mode) String() string { + return proto.EnumName(IPScanNeighbor_Mode_name, int32(x)) +} +func (IPScanNeighbor_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_l3_0d071b38be421a84, []int{1, 0} +} + +type ProxyARP struct { + Interfaces []*ProxyARP_Interface `protobuf:"bytes,1,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + Ranges []*ProxyARP_Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProxyARP) Reset() { *m = ProxyARP{} } +func (m *ProxyARP) String() string { return proto.CompactTextString(m) } +func (*ProxyARP) ProtoMessage() {} +func (*ProxyARP) Descriptor() ([]byte, []int) { + return fileDescriptor_l3_0d071b38be421a84, []int{0} +} +func (m *ProxyARP) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProxyARP.Unmarshal(m, b) +} +func (m *ProxyARP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProxyARP.Marshal(b, m, deterministic) +} +func (dst *ProxyARP) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyARP.Merge(dst, src) +} +func (m *ProxyARP) XXX_Size() int { + return xxx_messageInfo_ProxyARP.Size(m) +} +func (m *ProxyARP) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyARP.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyARP proto.InternalMessageInfo + +func (m *ProxyARP) GetInterfaces() []*ProxyARP_Interface { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *ProxyARP) GetRanges() []*ProxyARP_Range { + if m != nil { + return m.Ranges + } + return nil +} + +func (*ProxyARP) XXX_MessageName() string { + return "vpp.l3.ProxyARP" +} + +type ProxyARP_Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProxyARP_Interface) Reset() { *m = ProxyARP_Interface{} } +func (m *ProxyARP_Interface) String() string { return proto.CompactTextString(m) } +func (*ProxyARP_Interface) ProtoMessage() {} +func (*ProxyARP_Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_l3_0d071b38be421a84, []int{0, 0} +} +func (m *ProxyARP_Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProxyARP_Interface.Unmarshal(m, b) +} +func (m *ProxyARP_Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProxyARP_Interface.Marshal(b, m, deterministic) +} +func (dst *ProxyARP_Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyARP_Interface.Merge(dst, src) +} +func (m *ProxyARP_Interface) XXX_Size() int { + return xxx_messageInfo_ProxyARP_Interface.Size(m) +} +func (m *ProxyARP_Interface) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyARP_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyARP_Interface proto.InternalMessageInfo + +func (m *ProxyARP_Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (*ProxyARP_Interface) XXX_MessageName() string { + return "vpp.l3.ProxyARP.Interface" +} + +type ProxyARP_Range struct { + FirstIpAddr string `protobuf:"bytes,1,opt,name=first_ip_addr,json=firstIpAddr,proto3" json:"first_ip_addr,omitempty"` + LastIpAddr string `protobuf:"bytes,2,opt,name=last_ip_addr,json=lastIpAddr,proto3" json:"last_ip_addr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProxyARP_Range) Reset() { *m = ProxyARP_Range{} } +func (m *ProxyARP_Range) String() string { return proto.CompactTextString(m) } +func (*ProxyARP_Range) ProtoMessage() {} +func (*ProxyARP_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_l3_0d071b38be421a84, []int{0, 1} +} +func (m *ProxyARP_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProxyARP_Range.Unmarshal(m, b) +} +func (m *ProxyARP_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProxyARP_Range.Marshal(b, m, deterministic) +} +func (dst *ProxyARP_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyARP_Range.Merge(dst, src) +} +func (m *ProxyARP_Range) XXX_Size() int { + return xxx_messageInfo_ProxyARP_Range.Size(m) +} +func (m *ProxyARP_Range) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyARP_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyARP_Range proto.InternalMessageInfo + +func (m *ProxyARP_Range) GetFirstIpAddr() string { + if m != nil { + return m.FirstIpAddr + } + return "" +} + +func (m *ProxyARP_Range) GetLastIpAddr() string { + if m != nil { + return m.LastIpAddr + } + return "" +} + +func (*ProxyARP_Range) XXX_MessageName() string { + return "vpp.l3.ProxyARP.Range" +} + +type IPScanNeighbor struct { + Mode IPScanNeighbor_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=vpp.l3.IPScanNeighbor_Mode" json:"mode,omitempty"` + ScanInterval uint32 `protobuf:"varint,2,opt,name=scan_interval,json=scanInterval,proto3" json:"scan_interval,omitempty"` + MaxProcTime uint32 `protobuf:"varint,3,opt,name=max_proc_time,json=maxProcTime,proto3" json:"max_proc_time,omitempty"` + MaxUpdate uint32 `protobuf:"varint,4,opt,name=max_update,json=maxUpdate,proto3" json:"max_update,omitempty"` + ScanIntDelay uint32 `protobuf:"varint,5,opt,name=scan_int_delay,json=scanIntDelay,proto3" json:"scan_int_delay,omitempty"` + StaleThreshold uint32 `protobuf:"varint,6,opt,name=stale_threshold,json=staleThreshold,proto3" json:"stale_threshold,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IPScanNeighbor) Reset() { *m = IPScanNeighbor{} } +func (m *IPScanNeighbor) String() string { return proto.CompactTextString(m) } +func (*IPScanNeighbor) ProtoMessage() {} +func (*IPScanNeighbor) Descriptor() ([]byte, []int) { + return fileDescriptor_l3_0d071b38be421a84, []int{1} +} +func (m *IPScanNeighbor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IPScanNeighbor.Unmarshal(m, b) +} +func (m *IPScanNeighbor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IPScanNeighbor.Marshal(b, m, deterministic) +} +func (dst *IPScanNeighbor) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPScanNeighbor.Merge(dst, src) +} +func (m *IPScanNeighbor) XXX_Size() int { + return xxx_messageInfo_IPScanNeighbor.Size(m) +} +func (m *IPScanNeighbor) XXX_DiscardUnknown() { + xxx_messageInfo_IPScanNeighbor.DiscardUnknown(m) +} + +var xxx_messageInfo_IPScanNeighbor proto.InternalMessageInfo + +func (m *IPScanNeighbor) GetMode() IPScanNeighbor_Mode { + if m != nil { + return m.Mode + } + return IPScanNeighbor_DISABLED +} + +func (m *IPScanNeighbor) GetScanInterval() uint32 { + if m != nil { + return m.ScanInterval + } + return 0 +} + +func (m *IPScanNeighbor) GetMaxProcTime() uint32 { + if m != nil { + return m.MaxProcTime + } + return 0 +} + +func (m *IPScanNeighbor) GetMaxUpdate() uint32 { + if m != nil { + return m.MaxUpdate + } + return 0 +} + +func (m *IPScanNeighbor) GetScanIntDelay() uint32 { + if m != nil { + return m.ScanIntDelay + } + return 0 +} + +func (m *IPScanNeighbor) GetStaleThreshold() uint32 { + if m != nil { + return m.StaleThreshold + } + return 0 +} + +func (*IPScanNeighbor) XXX_MessageName() string { + return "vpp.l3.IPScanNeighbor" +} +func init() { + proto.RegisterType((*ProxyARP)(nil), "vpp.l3.ProxyARP") + proto.RegisterType((*ProxyARP_Interface)(nil), "vpp.l3.ProxyARP.Interface") + proto.RegisterType((*ProxyARP_Range)(nil), "vpp.l3.ProxyARP.Range") + proto.RegisterType((*IPScanNeighbor)(nil), "vpp.l3.IPScanNeighbor") + proto.RegisterEnum("vpp.l3.IPScanNeighbor_Mode", IPScanNeighbor_Mode_name, IPScanNeighbor_Mode_value) +} + +func init() { proto.RegisterFile("models/vpp/l3/l3.proto", fileDescriptor_l3_0d071b38be421a84) } + +var fileDescriptor_l3_0d071b38be421a84 = []byte{ + // 449 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcd, 0x8a, 0xdb, 0x30, + 0x10, 0x80, 0xeb, 0xc4, 0x1b, 0x92, 0xc9, 0x4f, 0x83, 0x0e, 0x8b, 0x49, 0x69, 0x1b, 0xd2, 0x42, + 0xf7, 0xb2, 0x36, 0xc4, 0x4b, 0x0f, 0xdb, 0x53, 0x42, 0x0a, 0x35, 0x74, 0x5b, 0xe3, 0x4d, 0x2f, + 0xbd, 0x18, 0xc5, 0x56, 0x1c, 0x83, 0x6c, 0x09, 0x59, 0x31, 0xd9, 0x77, 0xea, 0x83, 0xf4, 0x3d, + 0x0a, 0x7d, 0x8e, 0xa2, 0x49, 0x5c, 0x12, 0xf6, 0x36, 0xfa, 0xf4, 0xcd, 0x8c, 0x46, 0x0c, 0x5c, + 0x17, 0x22, 0x65, 0xbc, 0xf2, 0x6a, 0x29, 0x3d, 0xee, 0x7b, 0xdc, 0x77, 0xa5, 0x12, 0x5a, 0x90, + 0x4e, 0x2d, 0xa5, 0xcb, 0xfd, 0xc9, 0x6d, 0x96, 0xeb, 0xdd, 0x7e, 0xe3, 0x26, 0xa2, 0xf0, 0x32, + 0x91, 0x09, 0x0f, 0xaf, 0x37, 0xfb, 0x2d, 0x9e, 0xf0, 0x80, 0xd1, 0x31, 0x6d, 0xf6, 0xd7, 0x82, + 0x6e, 0xa8, 0xc4, 0xe1, 0x69, 0x11, 0x85, 0xe4, 0x1e, 0x20, 0x2f, 0x35, 0x53, 0x5b, 0x9a, 0xb0, + 0xca, 0xb1, 0xa6, 0xed, 0x9b, 0xfe, 0x7c, 0xe2, 0x1e, 0x0b, 0xbb, 0x8d, 0xe5, 0x06, 0x8d, 0x12, + 0x9d, 0xd9, 0xc4, 0x85, 0x8e, 0xa2, 0x65, 0xc6, 0x2a, 0xa7, 0x85, 0x79, 0xd7, 0xcf, 0xf2, 0x22, + 0x73, 0x1d, 0x9d, 0xac, 0xc9, 0x5b, 0xe8, 0xfd, 0x2f, 0x44, 0x08, 0xd8, 0x25, 0x2d, 0x98, 0x63, + 0x4d, 0xad, 0x9b, 0x5e, 0x84, 0xf1, 0xe4, 0x01, 0xae, 0x30, 0x83, 0xcc, 0x60, 0xb8, 0xcd, 0x55, + 0xa5, 0xe3, 0x5c, 0xc6, 0x34, 0x4d, 0xd5, 0xc9, 0xea, 0x23, 0x0c, 0xe4, 0x22, 0x4d, 0x15, 0x99, + 0xc2, 0x80, 0xd3, 0x33, 0xa5, 0x85, 0x0a, 0x18, 0x76, 0x34, 0x66, 0xbf, 0x5a, 0x30, 0x0a, 0xc2, + 0xc7, 0x84, 0x96, 0xdf, 0x58, 0x9e, 0xed, 0x36, 0x42, 0x11, 0x0f, 0x6c, 0xf3, 0x99, 0x58, 0x6f, + 0x34, 0x7f, 0xd5, 0x3c, 0xf8, 0xd2, 0x72, 0x1f, 0x44, 0xca, 0x22, 0x14, 0xc9, 0x3b, 0x18, 0x56, + 0x09, 0x2d, 0x63, 0x1c, 0xbb, 0xa6, 0x1c, 0xdb, 0x0c, 0xa3, 0x81, 0x81, 0xc1, 0x89, 0x99, 0xe7, + 0x16, 0xf4, 0x10, 0x4b, 0x25, 0x92, 0x58, 0xe7, 0x05, 0x73, 0xda, 0x28, 0xf5, 0x0b, 0x7a, 0x08, + 0x95, 0x48, 0xd6, 0x79, 0xc1, 0xc8, 0x6b, 0x00, 0xe3, 0xec, 0x65, 0x4a, 0x35, 0x73, 0x6c, 0x14, + 0x7a, 0x05, 0x3d, 0xfc, 0x40, 0x40, 0xde, 0xc3, 0xa8, 0xe9, 0x13, 0xa7, 0x8c, 0xd3, 0x27, 0xe7, + 0xea, 0xa2, 0xd1, 0xca, 0x30, 0xf2, 0x01, 0x5e, 0x56, 0x9a, 0x72, 0x16, 0xeb, 0x9d, 0x62, 0xd5, + 0x4e, 0xf0, 0xd4, 0xe9, 0xa0, 0x36, 0x42, 0xbc, 0x6e, 0xe8, 0x6c, 0x0e, 0xb6, 0x19, 0x82, 0x0c, + 0xa0, 0xbb, 0x0a, 0x1e, 0x17, 0xcb, 0xaf, 0x9f, 0x57, 0xe3, 0x17, 0xa4, 0x0b, 0x76, 0x10, 0xd6, + 0x77, 0x63, 0xeb, 0x14, 0x7d, 0x1c, 0xb7, 0x4c, 0xb4, 0xfc, 0xbe, 0xfe, 0x32, 0x6e, 0x2f, 0xef, + 0x7f, 0xff, 0x79, 0x63, 0xfd, 0xbc, 0x3b, 0x5b, 0x26, 0x9e, 0x67, 0x54, 0x0b, 0xb3, 0x77, 0xb7, + 0x34, 0x63, 0xa5, 0xf6, 0xa8, 0xcc, 0xbd, 0x8b, 0x65, 0xfc, 0x54, 0x4b, 0x19, 0x73, 0x7f, 0xd3, + 0xc1, 0xd5, 0xf2, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x5c, 0x0c, 0x36, 0xab, 0x02, 0x00, + 0x00, +} diff --git a/api/models/vpp/l3/l3.proto b/api/models/vpp/l3/l3.proto new file mode 100644 index 0000000000..4d52ab9c78 --- /dev/null +++ b/api/models/vpp/l3/l3.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package vpp.l3; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l3;vpp_l3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message ProxyARP { + message Interface { + string name = 1; + } + repeated Interface interfaces = 1; + + message Range { + string first_ip_addr = 1; + string last_ip_addr = 2; + } + repeated Range ranges = 2; +} + +message IPScanNeighbor { + enum Mode { + DISABLED = 0; + IPv4 = 1; + IPv6 = 2; + BOTH = 3; + } + Mode mode = 1; + + uint32 scan_interval = 2; + uint32 max_proc_time = 3; + uint32 max_update = 4; + uint32 scan_int_delay = 5; + uint32 stale_threshold = 6; +} diff --git a/api/models/vpp/l3/route.pb.go b/api/models/vpp/l3/route.pb.go new file mode 100644 index 0000000000..db9d29cba8 --- /dev/null +++ b/api/models/vpp/l3/route.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/l3/route.proto + +package vpp_l3 // import "github.com/ligato/vpp-agent/api/models/vpp/l3" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Route_RouteType int32 + +const ( + Route_INTRA_VRF Route_RouteType = 0 + Route_INTER_VRF Route_RouteType = 1 + Route_DROP Route_RouteType = 2 +) + +var Route_RouteType_name = map[int32]string{ + 0: "INTRA_VRF", + 1: "INTER_VRF", + 2: "DROP", +} +var Route_RouteType_value = map[string]int32{ + "INTRA_VRF": 0, + "INTER_VRF": 1, + "DROP": 2, +} + +func (x Route_RouteType) String() string { + return proto.EnumName(Route_RouteType_name, int32(x)) +} +func (Route_RouteType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_route_dcfac60fd0ffdb2f, []int{0, 0} +} + +type Route struct { + Type Route_RouteType `protobuf:"varint,10,opt,name=type,proto3,enum=vpp.l3.Route_RouteType" json:"type,omitempty"` + VrfId uint32 `protobuf:"varint,1,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + DstNetwork string `protobuf:"bytes,3,opt,name=dst_network,json=dstNetwork,proto3" json:"dst_network,omitempty"` + NextHopAddr string `protobuf:"bytes,4,opt,name=next_hop_addr,json=nextHopAddr,proto3" json:"next_hop_addr,omitempty"` + OutgoingInterface string `protobuf:"bytes,5,opt,name=outgoing_interface,json=outgoingInterface,proto3" json:"outgoing_interface,omitempty"` + Weight uint32 `protobuf:"varint,6,opt,name=weight,proto3" json:"weight,omitempty"` + Preference uint32 `protobuf:"varint,7,opt,name=preference,proto3" json:"preference,omitempty"` + // (a poor man's primary and backup) + ViaVrfId uint32 `protobuf:"varint,8,opt,name=via_vrf_id,json=viaVrfId,proto3" json:"via_vrf_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Route) Reset() { *m = Route{} } +func (m *Route) String() string { return proto.CompactTextString(m) } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_route_dcfac60fd0ffdb2f, []int{0} +} +func (m *Route) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Route.Unmarshal(m, b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Route.Marshal(b, m, deterministic) +} +func (dst *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(dst, src) +} +func (m *Route) XXX_Size() int { + return xxx_messageInfo_Route.Size(m) +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *Route) GetType() Route_RouteType { + if m != nil { + return m.Type + } + return Route_INTRA_VRF +} + +func (m *Route) GetVrfId() uint32 { + if m != nil { + return m.VrfId + } + return 0 +} + +func (m *Route) GetDstNetwork() string { + if m != nil { + return m.DstNetwork + } + return "" +} + +func (m *Route) GetNextHopAddr() string { + if m != nil { + return m.NextHopAddr + } + return "" +} + +func (m *Route) GetOutgoingInterface() string { + if m != nil { + return m.OutgoingInterface + } + return "" +} + +func (m *Route) GetWeight() uint32 { + if m != nil { + return m.Weight + } + return 0 +} + +func (m *Route) GetPreference() uint32 { + if m != nil { + return m.Preference + } + return 0 +} + +func (m *Route) GetViaVrfId() uint32 { + if m != nil { + return m.ViaVrfId + } + return 0 +} + +func (*Route) XXX_MessageName() string { + return "vpp.l3.Route" +} +func init() { + proto.RegisterType((*Route)(nil), "vpp.l3.Route") + proto.RegisterEnum("vpp.l3.Route_RouteType", Route_RouteType_name, Route_RouteType_value) +} + +func init() { proto.RegisterFile("models/vpp/l3/route.proto", fileDescriptor_route_dcfac60fd0ffdb2f) } + +var fileDescriptor_route_dcfac60fd0ffdb2f = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x41, 0x4f, 0xfa, 0x30, + 0x18, 0xc6, 0xff, 0xe3, 0x0f, 0x13, 0x5e, 0x82, 0xc1, 0x26, 0xea, 0x34, 0x06, 0x09, 0x27, 0x12, + 0xc3, 0x96, 0x38, 0x4f, 0x7a, 0xc2, 0xa8, 0x91, 0x0b, 0x9a, 0x86, 0x70, 0xf0, 0xd2, 0x0c, 0xda, + 0x95, 0xc6, 0xb1, 0x36, 0xa5, 0x1b, 0xf2, 0x0d, 0x8d, 0x5f, 0xc3, 0x2f, 0x62, 0xd6, 0x81, 0xc1, + 0x4b, 0xd3, 0xe7, 0xfd, 0x3d, 0x7d, 0xd3, 0x27, 0x0f, 0x9c, 0x2d, 0x25, 0x65, 0xc9, 0x2a, 0xc8, + 0x95, 0x0a, 0x92, 0x30, 0xd0, 0x32, 0x33, 0xcc, 0x57, 0x5a, 0x1a, 0x89, 0xdc, 0x5c, 0x29, 0x3f, + 0x09, 0xcf, 0x07, 0x5c, 0x98, 0x45, 0x36, 0xf3, 0xe7, 0x72, 0x19, 0x70, 0xc9, 0x65, 0x60, 0xf1, + 0x2c, 0x8b, 0xad, 0xb2, 0xc2, 0xde, 0xca, 0x67, 0xbd, 0xaf, 0x0a, 0xd4, 0x70, 0xb1, 0x06, 0x5d, + 0x41, 0xd5, 0x6c, 0x14, 0xf3, 0xa0, 0xeb, 0xf4, 0x0f, 0xaf, 0x4f, 0xfd, 0x72, 0x9f, 0x6f, 0x61, + 0x79, 0x4e, 0x36, 0x8a, 0x61, 0x6b, 0x42, 0xc7, 0xe0, 0xe6, 0x3a, 0x26, 0x82, 0x7a, 0x4e, 0xd7, + 0xe9, 0xb7, 0x70, 0x2d, 0xd7, 0xf1, 0x88, 0xa2, 0x4b, 0x68, 0xd2, 0x95, 0x21, 0x29, 0x33, 0x6b, + 0xa9, 0xdf, 0xbd, 0xff, 0x5d, 0xa7, 0xdf, 0xc0, 0x40, 0x57, 0x66, 0x5c, 0x4e, 0x50, 0x0f, 0x5a, + 0x29, 0xfb, 0x30, 0x64, 0x21, 0x15, 0x89, 0x28, 0xd5, 0x5e, 0xd5, 0x5a, 0x9a, 0xc5, 0xf0, 0x59, + 0xaa, 0x21, 0xa5, 0x1a, 0x0d, 0x00, 0xc9, 0xcc, 0x70, 0x29, 0x52, 0x4e, 0x44, 0x6a, 0x98, 0x8e, + 0xa3, 0x39, 0xf3, 0x6a, 0xd6, 0x78, 0xb4, 0x23, 0xa3, 0x1d, 0x40, 0x27, 0xe0, 0xae, 0x99, 0xe0, + 0x0b, 0xe3, 0xb9, 0xf6, 0x2b, 0x5b, 0x85, 0x3a, 0x00, 0x4a, 0xb3, 0x98, 0x69, 0x96, 0xce, 0x99, + 0x77, 0x60, 0xd9, 0xde, 0x04, 0x5d, 0x00, 0xe4, 0x22, 0x22, 0xdb, 0x18, 0x75, 0xcb, 0xeb, 0xb9, + 0x88, 0xa6, 0x45, 0x92, 0x5e, 0x08, 0x8d, 0xdf, 0xcc, 0xa8, 0x05, 0x8d, 0xd1, 0x78, 0x82, 0x87, + 0x64, 0x8a, 0x9f, 0xda, 0xff, 0xb6, 0xf2, 0x11, 0x5b, 0xe9, 0xa0, 0x3a, 0x54, 0x1f, 0xf0, 0xcb, + 0x6b, 0xbb, 0x72, 0x7f, 0xfb, 0xf9, 0xdd, 0x71, 0xde, 0x6e, 0xf6, 0x1a, 0x48, 0x04, 0x8f, 0x8c, + 0x2c, 0xfa, 0x1a, 0x44, 0x9c, 0xa5, 0x26, 0x88, 0x94, 0x08, 0xfe, 0x94, 0x78, 0x97, 0x2b, 0x45, + 0x92, 0x70, 0xe6, 0xda, 0x3e, 0xc2, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xab, 0x8a, 0x88, 0x2f, + 0xe3, 0x01, 0x00, 0x00, +} diff --git a/api/models/vpp/l3/route.proto b/api/models/vpp/l3/route.proto new file mode 100644 index 0000000000..db921950cd --- /dev/null +++ b/api/models/vpp/l3/route.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package vpp.l3; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/l3;vpp_l3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message Route { + enum RouteType { + INTRA_VRF = 0; /* Forwarding is being done in the specified vrf_id only, or according to + the specified outgoing interface. */ + INTER_VRF = 1; /* Forwarding is being done by lookup into a different VRF, + specified as via_vrf_id field. In case of these routes, the outgoing + interface should not be specified. The next hop IP address + does not have to be specified either, in that case VPP does full + recursive lookup in the via_vrf_id VRF. */ + DROP = 2; /* Drops the network communication designated for specific IP address */ + } + RouteType type = 10; + uint32 vrf_id = 1; /* VRF identifier, field required for remote client. This value should be + consistent with VRF ID in static route key. If it is not, value from + kley will be preffered and this field will be overriden. */ + string dst_network = 3; /* ip address + prefix in format
/ */ + string next_hop_addr = 4; /* next hop address */ + string outgoing_interface = 5; /* outgoing interface name */ + uint32 weight = 6; /* weight (used for unequal cost load balncing) */ + uint32 preference = 7; /* The preference of the path. Lowest preference is preferred. */ + /* Only paths with the best preference contribute to forwarding. */ + /* (a poor man's primary and backup) */ + uint32 via_vrf_id = 8; /* Specifies VRF ID for the next hop lookup / recursive lookup */ +} diff --git a/api/models/vpp/nat/keys.go b/api/models/vpp/nat/keys.go new file mode 100644 index 0000000000..b27736f5bd --- /dev/null +++ b/api/models/vpp/nat/keys.go @@ -0,0 +1,105 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_nat + +import ( + "strings" + + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp.nat" + +var ( + ModelNat44Global = models.Register(&Nat44Global{}, models.Spec{ + Module: ModuleName, + Type: "nat44-global", + Version: "v2", + }, models.WithNameTemplate("settings")) + ModelDNat44 = models.Register(&DNat44{}, models.Spec{ + Module: ModuleName, + Type: "dnat44", + Version: "v2", + }, models.WithNameTemplate("{{.Label}}")) +) + +// GlobalNAT44Key returns key for Nat44Global. +func GlobalNAT44Key() string { + return models.Key(&Nat44Global{}) +} + +// DNAT44Key returns the key used in NB DB to store the configuration of the +// given DNAT-44 configuration. +func DNAT44Key(label string) string { + return models.Key(&DNat44{ + Label: label, + }) +} + +/* NAT44 interface */ +const ( + // interfaceNAT44KeyPrefix is a common prefix for (derived) keys each representing + // NAT44 configuration for a single interface. + interfaceNAT44KeyPrefix = "vpp/nat44/interface/" + + // interfaceNAT44KeyTemplate is a template for (derived) key representing + // NAT44 configuration for a single interface. + interfaceNAT44KeyTemplate = interfaceNAT44KeyPrefix + "{iface}/feature/{feature}" + + // NAT interface features + inFeature = "in" + outFeature = "out" +) + +const ( + // InvalidKeyPart is used in key for parts which are invalid + InvalidKeyPart = "" +) + +/* NAT44 interface */ + +// InterfaceNAT44Key returns (derived) key representing NAT44 configuration +// for a given interface. +func InterfaceNAT44Key(iface string, isInside bool) string { + if iface == "" { + iface = InvalidKeyPart + } + key := strings.Replace(interfaceNAT44KeyTemplate, "{iface}", iface, 1) + feature := inFeature + if !isInside { + feature = outFeature + } + key = strings.Replace(key, "{feature}", feature, 1) + return key +} + +// ParseInterfaceNAT44Key parses interface name and the assigned NAT44 feature +// from Interface-NAT44 key. +func ParseInterfaceNAT44Key(key string) (iface string, isInside bool, isInterfaceNAT44Key bool) { + trim := strings.TrimPrefix(key, interfaceNAT44KeyPrefix) + if trim != key && trim != "" { + fibComps := strings.Split(trim, "/") + if len(fibComps) >= 3 && fibComps[len(fibComps)-2] == "feature" { + isInside := true + if fibComps[len(fibComps)-1] == outFeature { + isInside = false + } + iface := strings.Join(fibComps[:len(fibComps)-2], "/") + return iface, isInside, true + } + } + return "", false, false +} diff --git a/api/models/vpp/nat/keys_test.go b/api/models/vpp/nat/keys_test.go new file mode 100644 index 0000000000..5e100459d4 --- /dev/null +++ b/api/models/vpp/nat/keys_test.go @@ -0,0 +1,178 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_nat + +import ( + "testing" +) + +/*func TestDNAT44Key(t *testing.T) { + tests := []struct { + name string + label string + expectedKey string + }{ + { + name: "valid DNAT44 label", + label: "dnat1", + expectedKey: "vpp/config/v2/nat44/dnat/dnat1", + }, + { + name: "invalid DNAT44 label", + label: "", + expectedKey: "vpp/config/v2/nat44/dnat/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := DNAT44Key(test.label) + if key != test.expectedKey { + t.Errorf("failed for: label=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.label, test.expectedKey, key) + } + }) + } +}*/ + +func TestInterfaceNAT44Key(t *testing.T) { + tests := []struct { + name string + iface string + isInside bool + expectedKey string + }{ + { + name: "interface-with-IN-feature", + iface: "tap0", + isInside: true, + expectedKey: "vpp/nat44/interface/tap0/feature/in", + }, + { + name: "interface-with-OUT-feature", + iface: "tap1", + isInside: false, + expectedKey: "vpp/nat44/interface/tap1/feature/out", + }, + { + name: "gbe-interface-OUT", + iface: "GigabitEthernet0/8/0", + isInside: false, + expectedKey: "vpp/nat44/interface/GigabitEthernet0/8/0/feature/out", + }, + { + name: "gbe-interface-IN", + iface: "GigabitEthernet0/8/0", + isInside: true, + expectedKey: "vpp/nat44/interface/GigabitEthernet0/8/0/feature/in", + }, + { + name: "invalid-interface-with-IN-feature", + iface: "", + isInside: true, + expectedKey: "vpp/nat44/interface//feature/in", + }, + { + name: "invalid-interface-with-OUT-feature", + iface: "", + isInside: false, + expectedKey: "vpp/nat44/interface//feature/out", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := InterfaceNAT44Key(test.iface, test.isInside) + if key != test.expectedKey { + t.Errorf("failed for: iface=%s isInside=%t\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.iface, test.isInside, test.expectedKey, key) + } + }) + } +} + +func TestParseInterfaceNAT44Key(t *testing.T) { + tests := []struct { + name string + key string + expectedIface string + expectedIsInside bool + expectedIsInterfaceNAT44Key bool + }{ + { + name: "interface-with-IN-feature", + key: "vpp/nat44/interface/tap0/feature/in", + expectedIface: "tap0", + expectedIsInside: true, + expectedIsInterfaceNAT44Key: true, + }, + { + name: "interface-with-OUT-feature", + key: "vpp/nat44/interface/tap1/feature/out", + expectedIface: "tap1", + expectedIsInside: false, + expectedIsInterfaceNAT44Key: true, + }, + { + name: "gbe-interface-OUT", + key: "vpp/nat44/interface/GigabitEthernet0/8/0/feature/out", + expectedIface: "GigabitEthernet0/8/0", + expectedIsInside: false, + expectedIsInterfaceNAT44Key: true, + }, + { + name: "gbe-interface-IN", + key: "vpp/nat44/interface/GigabitEthernet0/8/0/feature/in", + expectedIface: "GigabitEthernet0/8/0", + expectedIsInside: true, + expectedIsInterfaceNAT44Key: true, + }, + { + name: "invalid-interface", + key: "vpp/nat44/interface//feature/in", + expectedIface: "", + expectedIsInside: true, + expectedIsInterfaceNAT44Key: true, + }, + { + name: "not interface key 1", + key: "vpp/nat44/address/192.168.1.1", + expectedIface: "", + expectedIsInside: false, + expectedIsInterfaceNAT44Key: false, + }, + { + name: "not interface key 2", + key: "vpp/config/v2/nat44/dnat/dnat1", + expectedIface: "", + expectedIsInside: false, + expectedIsInterfaceNAT44Key: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + iface, isInside, isInterfaceNAT44Key := ParseInterfaceNAT44Key(test.key) + if isInterfaceNAT44Key != test.expectedIsInterfaceNAT44Key { + t.Errorf("expected isInterfaceNAT44Key: %v\tgot: %v", test.expectedIsInterfaceNAT44Key, isInterfaceNAT44Key) + } + if iface != test.expectedIface { + t.Errorf("expected iface: %s\tgot: %s", test.expectedIface, iface) + } + if isInside != test.expectedIsInside { + t.Errorf("expected isInside: %t\tgot: %t", test.expectedIsInside, isInside) + } + }) + } +} diff --git a/api/models/vpp/nat/nat.pb.go b/api/models/vpp/nat/nat.pb.go new file mode 100644 index 0000000000..572a90c625 --- /dev/null +++ b/api/models/vpp/nat/nat.pb.go @@ -0,0 +1,678 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/nat/nat.proto + +package vpp_nat // import "github.com/ligato/vpp-agent/api/models/vpp/nat" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type DNat44_Protocol int32 + +const ( + DNat44_TCP DNat44_Protocol = 0 + DNat44_UDP DNat44_Protocol = 1 + DNat44_ICMP DNat44_Protocol = 2 +) + +var DNat44_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "ICMP", +} +var DNat44_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "ICMP": 2, +} + +func (x DNat44_Protocol) String() string { + return proto.EnumName(DNat44_Protocol_name, int32(x)) +} +func (DNat44_Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1, 0} +} + +type DNat44_StaticMapping_TwiceNatMode int32 + +const ( + DNat44_StaticMapping_DISABLED DNat44_StaticMapping_TwiceNatMode = 0 + DNat44_StaticMapping_ENABLED DNat44_StaticMapping_TwiceNatMode = 1 + DNat44_StaticMapping_SELF DNat44_StaticMapping_TwiceNatMode = 2 +) + +var DNat44_StaticMapping_TwiceNatMode_name = map[int32]string{ + 0: "DISABLED", + 1: "ENABLED", + 2: "SELF", +} +var DNat44_StaticMapping_TwiceNatMode_value = map[string]int32{ + "DISABLED": 0, + "ENABLED": 1, + "SELF": 2, +} + +func (x DNat44_StaticMapping_TwiceNatMode) String() string { + return proto.EnumName(DNat44_StaticMapping_TwiceNatMode_name, int32(x)) +} +func (DNat44_StaticMapping_TwiceNatMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1, 0, 0} +} + +type Nat44Global struct { + Forwarding bool `protobuf:"varint,1,opt,name=forwarding,proto3" json:"forwarding,omitempty"` + NatInterfaces []*Nat44Global_Interface `protobuf:"bytes,2,rep,name=nat_interfaces,json=natInterfaces,proto3" json:"nat_interfaces,omitempty"` + AddressPool []*Nat44Global_Address `protobuf:"bytes,3,rep,name=address_pool,json=addressPool,proto3" json:"address_pool,omitempty"` + VirtualReassembly *VirtualReassembly `protobuf:"bytes,4,opt,name=virtual_reassembly,json=virtualReassembly,proto3" json:"virtual_reassembly,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nat44Global) Reset() { *m = Nat44Global{} } +func (m *Nat44Global) String() string { return proto.CompactTextString(m) } +func (*Nat44Global) ProtoMessage() {} +func (*Nat44Global) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{0} +} +func (m *Nat44Global) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nat44Global.Unmarshal(m, b) +} +func (m *Nat44Global) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nat44Global.Marshal(b, m, deterministic) +} +func (dst *Nat44Global) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nat44Global.Merge(dst, src) +} +func (m *Nat44Global) XXX_Size() int { + return xxx_messageInfo_Nat44Global.Size(m) +} +func (m *Nat44Global) XXX_DiscardUnknown() { + xxx_messageInfo_Nat44Global.DiscardUnknown(m) +} + +var xxx_messageInfo_Nat44Global proto.InternalMessageInfo + +func (m *Nat44Global) GetForwarding() bool { + if m != nil { + return m.Forwarding + } + return false +} + +func (m *Nat44Global) GetNatInterfaces() []*Nat44Global_Interface { + if m != nil { + return m.NatInterfaces + } + return nil +} + +func (m *Nat44Global) GetAddressPool() []*Nat44Global_Address { + if m != nil { + return m.AddressPool + } + return nil +} + +func (m *Nat44Global) GetVirtualReassembly() *VirtualReassembly { + if m != nil { + return m.VirtualReassembly + } + return nil +} + +func (*Nat44Global) XXX_MessageName() string { + return "vpp.nat.Nat44Global" +} + +type Nat44Global_Interface struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsInside bool `protobuf:"varint,2,opt,name=is_inside,json=isInside,proto3" json:"is_inside,omitempty"` + OutputFeature bool `protobuf:"varint,3,opt,name=output_feature,json=outputFeature,proto3" json:"output_feature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nat44Global_Interface) Reset() { *m = Nat44Global_Interface{} } +func (m *Nat44Global_Interface) String() string { return proto.CompactTextString(m) } +func (*Nat44Global_Interface) ProtoMessage() {} +func (*Nat44Global_Interface) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{0, 0} +} +func (m *Nat44Global_Interface) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nat44Global_Interface.Unmarshal(m, b) +} +func (m *Nat44Global_Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nat44Global_Interface.Marshal(b, m, deterministic) +} +func (dst *Nat44Global_Interface) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nat44Global_Interface.Merge(dst, src) +} +func (m *Nat44Global_Interface) XXX_Size() int { + return xxx_messageInfo_Nat44Global_Interface.Size(m) +} +func (m *Nat44Global_Interface) XXX_DiscardUnknown() { + xxx_messageInfo_Nat44Global_Interface.DiscardUnknown(m) +} + +var xxx_messageInfo_Nat44Global_Interface proto.InternalMessageInfo + +func (m *Nat44Global_Interface) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Nat44Global_Interface) GetIsInside() bool { + if m != nil { + return m.IsInside + } + return false +} + +func (m *Nat44Global_Interface) GetOutputFeature() bool { + if m != nil { + return m.OutputFeature + } + return false +} + +func (*Nat44Global_Interface) XXX_MessageName() string { + return "vpp.nat.Nat44Global.Interface" +} + +type Nat44Global_Address struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + VrfId uint32 `protobuf:"varint,2,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + TwiceNat bool `protobuf:"varint,3,opt,name=twice_nat,json=twiceNat,proto3" json:"twice_nat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nat44Global_Address) Reset() { *m = Nat44Global_Address{} } +func (m *Nat44Global_Address) String() string { return proto.CompactTextString(m) } +func (*Nat44Global_Address) ProtoMessage() {} +func (*Nat44Global_Address) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{0, 1} +} +func (m *Nat44Global_Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nat44Global_Address.Unmarshal(m, b) +} +func (m *Nat44Global_Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nat44Global_Address.Marshal(b, m, deterministic) +} +func (dst *Nat44Global_Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nat44Global_Address.Merge(dst, src) +} +func (m *Nat44Global_Address) XXX_Size() int { + return xxx_messageInfo_Nat44Global_Address.Size(m) +} +func (m *Nat44Global_Address) XXX_DiscardUnknown() { + xxx_messageInfo_Nat44Global_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Nat44Global_Address proto.InternalMessageInfo + +func (m *Nat44Global_Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Nat44Global_Address) GetVrfId() uint32 { + if m != nil { + return m.VrfId + } + return 0 +} + +func (m *Nat44Global_Address) GetTwiceNat() bool { + if m != nil { + return m.TwiceNat + } + return false +} + +func (*Nat44Global_Address) XXX_MessageName() string { + return "vpp.nat.Nat44Global.Address" +} + +type DNat44 struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + StMappings []*DNat44_StaticMapping `protobuf:"bytes,2,rep,name=st_mappings,json=stMappings,proto3" json:"st_mappings,omitempty"` + IdMappings []*DNat44_IdentityMapping `protobuf:"bytes,3,rep,name=id_mappings,json=idMappings,proto3" json:"id_mappings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNat44) Reset() { *m = DNat44{} } +func (m *DNat44) String() string { return proto.CompactTextString(m) } +func (*DNat44) ProtoMessage() {} +func (*DNat44) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1} +} +func (m *DNat44) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DNat44.Unmarshal(m, b) +} +func (m *DNat44) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DNat44.Marshal(b, m, deterministic) +} +func (dst *DNat44) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNat44.Merge(dst, src) +} +func (m *DNat44) XXX_Size() int { + return xxx_messageInfo_DNat44.Size(m) +} +func (m *DNat44) XXX_DiscardUnknown() { + xxx_messageInfo_DNat44.DiscardUnknown(m) +} + +var xxx_messageInfo_DNat44 proto.InternalMessageInfo + +func (m *DNat44) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *DNat44) GetStMappings() []*DNat44_StaticMapping { + if m != nil { + return m.StMappings + } + return nil +} + +func (m *DNat44) GetIdMappings() []*DNat44_IdentityMapping { + if m != nil { + return m.IdMappings + } + return nil +} + +func (*DNat44) XXX_MessageName() string { + return "vpp.nat.DNat44" +} + +type DNat44_StaticMapping struct { + ExternalInterface string `protobuf:"bytes,1,opt,name=external_interface,json=externalInterface,proto3" json:"external_interface,omitempty"` + ExternalIp string `protobuf:"bytes,2,opt,name=external_ip,json=externalIp,proto3" json:"external_ip,omitempty"` + ExternalPort uint32 `protobuf:"varint,3,opt,name=external_port,json=externalPort,proto3" json:"external_port,omitempty"` + LocalIps []*DNat44_StaticMapping_LocalIP `protobuf:"bytes,4,rep,name=local_ips,json=localIps,proto3" json:"local_ips,omitempty"` + Protocol DNat44_Protocol `protobuf:"varint,5,opt,name=protocol,proto3,enum=vpp.nat.DNat44_Protocol" json:"protocol,omitempty"` + TwiceNat DNat44_StaticMapping_TwiceNatMode `protobuf:"varint,6,opt,name=twice_nat,json=twiceNat,proto3,enum=vpp.nat.DNat44_StaticMapping_TwiceNatMode" json:"twice_nat,omitempty"` + SessionAffinity uint32 `protobuf:"varint,7,opt,name=session_affinity,json=sessionAffinity,proto3" json:"session_affinity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNat44_StaticMapping) Reset() { *m = DNat44_StaticMapping{} } +func (m *DNat44_StaticMapping) String() string { return proto.CompactTextString(m) } +func (*DNat44_StaticMapping) ProtoMessage() {} +func (*DNat44_StaticMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1, 0} +} +func (m *DNat44_StaticMapping) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DNat44_StaticMapping.Unmarshal(m, b) +} +func (m *DNat44_StaticMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DNat44_StaticMapping.Marshal(b, m, deterministic) +} +func (dst *DNat44_StaticMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNat44_StaticMapping.Merge(dst, src) +} +func (m *DNat44_StaticMapping) XXX_Size() int { + return xxx_messageInfo_DNat44_StaticMapping.Size(m) +} +func (m *DNat44_StaticMapping) XXX_DiscardUnknown() { + xxx_messageInfo_DNat44_StaticMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_DNat44_StaticMapping proto.InternalMessageInfo + +func (m *DNat44_StaticMapping) GetExternalInterface() string { + if m != nil { + return m.ExternalInterface + } + return "" +} + +func (m *DNat44_StaticMapping) GetExternalIp() string { + if m != nil { + return m.ExternalIp + } + return "" +} + +func (m *DNat44_StaticMapping) GetExternalPort() uint32 { + if m != nil { + return m.ExternalPort + } + return 0 +} + +func (m *DNat44_StaticMapping) GetLocalIps() []*DNat44_StaticMapping_LocalIP { + if m != nil { + return m.LocalIps + } + return nil +} + +func (m *DNat44_StaticMapping) GetProtocol() DNat44_Protocol { + if m != nil { + return m.Protocol + } + return DNat44_TCP +} + +func (m *DNat44_StaticMapping) GetTwiceNat() DNat44_StaticMapping_TwiceNatMode { + if m != nil { + return m.TwiceNat + } + return DNat44_StaticMapping_DISABLED +} + +func (m *DNat44_StaticMapping) GetSessionAffinity() uint32 { + if m != nil { + return m.SessionAffinity + } + return 0 +} + +func (*DNat44_StaticMapping) XXX_MessageName() string { + return "vpp.nat.DNat44.StaticMapping" +} + +type DNat44_StaticMapping_LocalIP struct { + VrfId uint32 `protobuf:"varint,1,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + LocalIp string `protobuf:"bytes,2,opt,name=local_ip,json=localIp,proto3" json:"local_ip,omitempty"` + LocalPort uint32 `protobuf:"varint,3,opt,name=local_port,json=localPort,proto3" json:"local_port,omitempty"` + Probability uint32 `protobuf:"varint,4,opt,name=probability,proto3" json:"probability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNat44_StaticMapping_LocalIP) Reset() { *m = DNat44_StaticMapping_LocalIP{} } +func (m *DNat44_StaticMapping_LocalIP) String() string { return proto.CompactTextString(m) } +func (*DNat44_StaticMapping_LocalIP) ProtoMessage() {} +func (*DNat44_StaticMapping_LocalIP) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1, 0, 0} +} +func (m *DNat44_StaticMapping_LocalIP) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DNat44_StaticMapping_LocalIP.Unmarshal(m, b) +} +func (m *DNat44_StaticMapping_LocalIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DNat44_StaticMapping_LocalIP.Marshal(b, m, deterministic) +} +func (dst *DNat44_StaticMapping_LocalIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNat44_StaticMapping_LocalIP.Merge(dst, src) +} +func (m *DNat44_StaticMapping_LocalIP) XXX_Size() int { + return xxx_messageInfo_DNat44_StaticMapping_LocalIP.Size(m) +} +func (m *DNat44_StaticMapping_LocalIP) XXX_DiscardUnknown() { + xxx_messageInfo_DNat44_StaticMapping_LocalIP.DiscardUnknown(m) +} + +var xxx_messageInfo_DNat44_StaticMapping_LocalIP proto.InternalMessageInfo + +func (m *DNat44_StaticMapping_LocalIP) GetVrfId() uint32 { + if m != nil { + return m.VrfId + } + return 0 +} + +func (m *DNat44_StaticMapping_LocalIP) GetLocalIp() string { + if m != nil { + return m.LocalIp + } + return "" +} + +func (m *DNat44_StaticMapping_LocalIP) GetLocalPort() uint32 { + if m != nil { + return m.LocalPort + } + return 0 +} + +func (m *DNat44_StaticMapping_LocalIP) GetProbability() uint32 { + if m != nil { + return m.Probability + } + return 0 +} + +func (*DNat44_StaticMapping_LocalIP) XXX_MessageName() string { + return "vpp.nat.DNat44.StaticMapping.LocalIP" +} + +type DNat44_IdentityMapping struct { + VrfId uint32 `protobuf:"varint,1,opt,name=vrf_id,json=vrfId,proto3" json:"vrf_id,omitempty"` + Interface string `protobuf:"bytes,2,opt,name=interface,proto3" json:"interface,omitempty"` + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Port uint32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + Protocol DNat44_Protocol `protobuf:"varint,5,opt,name=protocol,proto3,enum=vpp.nat.DNat44_Protocol" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNat44_IdentityMapping) Reset() { *m = DNat44_IdentityMapping{} } +func (m *DNat44_IdentityMapping) String() string { return proto.CompactTextString(m) } +func (*DNat44_IdentityMapping) ProtoMessage() {} +func (*DNat44_IdentityMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{1, 1} +} +func (m *DNat44_IdentityMapping) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DNat44_IdentityMapping.Unmarshal(m, b) +} +func (m *DNat44_IdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DNat44_IdentityMapping.Marshal(b, m, deterministic) +} +func (dst *DNat44_IdentityMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNat44_IdentityMapping.Merge(dst, src) +} +func (m *DNat44_IdentityMapping) XXX_Size() int { + return xxx_messageInfo_DNat44_IdentityMapping.Size(m) +} +func (m *DNat44_IdentityMapping) XXX_DiscardUnknown() { + xxx_messageInfo_DNat44_IdentityMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_DNat44_IdentityMapping proto.InternalMessageInfo + +func (m *DNat44_IdentityMapping) GetVrfId() uint32 { + if m != nil { + return m.VrfId + } + return 0 +} + +func (m *DNat44_IdentityMapping) GetInterface() string { + if m != nil { + return m.Interface + } + return "" +} + +func (m *DNat44_IdentityMapping) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *DNat44_IdentityMapping) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *DNat44_IdentityMapping) GetProtocol() DNat44_Protocol { + if m != nil { + return m.Protocol + } + return DNat44_TCP +} + +func (*DNat44_IdentityMapping) XXX_MessageName() string { + return "vpp.nat.DNat44.IdentityMapping" +} + +type VirtualReassembly struct { + Timeout uint32 `protobuf:"varint,1,opt,name=timeout,proto3" json:"timeout,omitempty"` + MaxReassemblies uint32 `protobuf:"varint,2,opt,name=max_reassemblies,json=maxReassemblies,proto3" json:"max_reassemblies,omitempty"` + MaxFragments uint32 `protobuf:"varint,3,opt,name=max_fragments,json=maxFragments,proto3" json:"max_fragments,omitempty"` + DropFragments bool `protobuf:"varint,4,opt,name=drop_fragments,json=dropFragments,proto3" json:"drop_fragments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VirtualReassembly) Reset() { *m = VirtualReassembly{} } +func (m *VirtualReassembly) String() string { return proto.CompactTextString(m) } +func (*VirtualReassembly) ProtoMessage() {} +func (*VirtualReassembly) Descriptor() ([]byte, []int) { + return fileDescriptor_nat_f475b98be573ded8, []int{2} +} +func (m *VirtualReassembly) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VirtualReassembly.Unmarshal(m, b) +} +func (m *VirtualReassembly) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VirtualReassembly.Marshal(b, m, deterministic) +} +func (dst *VirtualReassembly) XXX_Merge(src proto.Message) { + xxx_messageInfo_VirtualReassembly.Merge(dst, src) +} +func (m *VirtualReassembly) XXX_Size() int { + return xxx_messageInfo_VirtualReassembly.Size(m) +} +func (m *VirtualReassembly) XXX_DiscardUnknown() { + xxx_messageInfo_VirtualReassembly.DiscardUnknown(m) +} + +var xxx_messageInfo_VirtualReassembly proto.InternalMessageInfo + +func (m *VirtualReassembly) GetTimeout() uint32 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *VirtualReassembly) GetMaxReassemblies() uint32 { + if m != nil { + return m.MaxReassemblies + } + return 0 +} + +func (m *VirtualReassembly) GetMaxFragments() uint32 { + if m != nil { + return m.MaxFragments + } + return 0 +} + +func (m *VirtualReassembly) GetDropFragments() bool { + if m != nil { + return m.DropFragments + } + return false +} + +func (*VirtualReassembly) XXX_MessageName() string { + return "vpp.nat.VirtualReassembly" +} +func init() { + proto.RegisterType((*Nat44Global)(nil), "vpp.nat.Nat44Global") + proto.RegisterType((*Nat44Global_Interface)(nil), "vpp.nat.Nat44Global.Interface") + proto.RegisterType((*Nat44Global_Address)(nil), "vpp.nat.Nat44Global.Address") + proto.RegisterType((*DNat44)(nil), "vpp.nat.DNat44") + proto.RegisterType((*DNat44_StaticMapping)(nil), "vpp.nat.DNat44.StaticMapping") + proto.RegisterType((*DNat44_StaticMapping_LocalIP)(nil), "vpp.nat.DNat44.StaticMapping.LocalIP") + proto.RegisterType((*DNat44_IdentityMapping)(nil), "vpp.nat.DNat44.IdentityMapping") + proto.RegisterType((*VirtualReassembly)(nil), "vpp.nat.VirtualReassembly") + proto.RegisterEnum("vpp.nat.DNat44_Protocol", DNat44_Protocol_name, DNat44_Protocol_value) + proto.RegisterEnum("vpp.nat.DNat44_StaticMapping_TwiceNatMode", DNat44_StaticMapping_TwiceNatMode_name, DNat44_StaticMapping_TwiceNatMode_value) +} + +func init() { proto.RegisterFile("models/vpp/nat/nat.proto", fileDescriptor_nat_f475b98be573ded8) } + +var fileDescriptor_nat_f475b98be573ded8 = []byte{ + // 831 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x51, 0x6f, 0xe4, 0x34, + 0x10, 0xbe, 0x74, 0xb7, 0x4d, 0x76, 0xd2, 0xed, 0x6d, 0x2d, 0x90, 0xc2, 0x72, 0xd7, 0x5b, 0x2d, + 0x3a, 0x54, 0x90, 0xba, 0x2b, 0xf5, 0x2a, 0x5e, 0x40, 0x40, 0x7b, 0x6d, 0x4f, 0x91, 0xda, 0x6a, + 0x95, 0x1e, 0x20, 0xf1, 0x12, 0x79, 0x37, 0xce, 0x62, 0x29, 0xb1, 0x2d, 0xdb, 0xbb, 0xd7, 0x4a, + 0xfc, 0x1a, 0x04, 0xcf, 0xfc, 0x03, 0x9e, 0xf9, 0x1f, 0xfc, 0x11, 0x14, 0xdb, 0xc9, 0xe6, 0x0a, + 0xf4, 0x81, 0x87, 0x48, 0x9e, 0xcf, 0xdf, 0x8c, 0x67, 0xbe, 0x99, 0xd8, 0x10, 0x95, 0x3c, 0x23, + 0x85, 0x9a, 0xae, 0x85, 0x98, 0x32, 0xac, 0xab, 0x6f, 0x22, 0x24, 0xd7, 0x1c, 0xf9, 0x6b, 0x21, + 0x26, 0x0c, 0xeb, 0xe1, 0xd1, 0x92, 0xea, 0x9f, 0x56, 0xf3, 0xc9, 0x82, 0x97, 0xd3, 0x25, 0x5f, + 0xf2, 0xa9, 0xd9, 0x9f, 0xaf, 0x72, 0x63, 0x19, 0xc3, 0xac, 0xac, 0xdf, 0xf8, 0x8f, 0x0e, 0x84, + 0x37, 0x58, 0x9f, 0x9c, 0xbc, 0x29, 0xf8, 0x1c, 0x17, 0xe8, 0x00, 0x20, 0xe7, 0xf2, 0x1d, 0x96, + 0x19, 0x65, 0xcb, 0xc8, 0x1b, 0x79, 0x87, 0x41, 0xd2, 0x42, 0xd0, 0x05, 0xec, 0x31, 0xac, 0x53, + 0xca, 0x34, 0x91, 0x39, 0x5e, 0x10, 0x15, 0x6d, 0x8d, 0x3a, 0x87, 0xe1, 0xf1, 0xc1, 0xc4, 0x25, + 0x30, 0x69, 0x45, 0x9b, 0xc4, 0x35, 0x2d, 0xe9, 0x33, 0xac, 0x1b, 0x4b, 0xa1, 0x6f, 0x60, 0x17, + 0x67, 0x99, 0x24, 0x4a, 0xa5, 0x82, 0xf3, 0x22, 0xea, 0x98, 0x20, 0xcf, 0xfe, 0x35, 0xc8, 0xa9, + 0x25, 0x26, 0xa1, 0xf3, 0x98, 0x71, 0x5e, 0xa0, 0x18, 0xd0, 0x9a, 0x4a, 0xbd, 0xc2, 0x45, 0x2a, + 0x09, 0x56, 0x8a, 0x94, 0xf3, 0xe2, 0x3e, 0xea, 0x8e, 0xbc, 0xc3, 0xf0, 0x78, 0xd8, 0x84, 0xf9, + 0xde, 0x52, 0x92, 0x86, 0x91, 0xec, 0xaf, 0x1f, 0x42, 0xc3, 0x05, 0xf4, 0x9a, 0xcc, 0x10, 0x82, + 0x2e, 0xc3, 0x25, 0x31, 0x95, 0xf7, 0x12, 0xb3, 0x46, 0x1f, 0x43, 0x8f, 0xaa, 0x94, 0x32, 0x45, + 0x33, 0x12, 0x6d, 0x19, 0x49, 0x02, 0xaa, 0x62, 0x63, 0xa3, 0x97, 0xb0, 0xc7, 0x57, 0x5a, 0xac, + 0x74, 0x9a, 0x13, 0xac, 0x57, 0x92, 0x44, 0x1d, 0xc3, 0xe8, 0x5b, 0xf4, 0xd2, 0x82, 0xc3, 0x1f, + 0xc0, 0x77, 0x75, 0xa0, 0x08, 0x7c, 0x57, 0x89, 0x3b, 0xa5, 0x36, 0xd1, 0x87, 0xb0, 0xb3, 0x96, + 0x79, 0x4a, 0x33, 0x73, 0x4a, 0x3f, 0xd9, 0x5e, 0xcb, 0x3c, 0xce, 0xaa, 0xf3, 0xf5, 0x3b, 0xba, + 0x20, 0x29, 0xc3, 0xda, 0x45, 0x0f, 0x0c, 0x70, 0x83, 0xf5, 0xf8, 0x17, 0x1f, 0x76, 0xce, 0x8d, + 0x5c, 0xe8, 0x03, 0xd8, 0x2e, 0xf0, 0x9c, 0x14, 0x2e, 0xac, 0x35, 0xd0, 0xd7, 0x10, 0x2a, 0x9d, + 0x96, 0x58, 0x08, 0xca, 0x96, 0x75, 0xbb, 0x9e, 0x37, 0x12, 0x59, 0xdf, 0xc9, 0xad, 0xc6, 0x9a, + 0x2e, 0xae, 0x2d, 0x2b, 0x01, 0xa5, 0xdd, 0x52, 0xa1, 0x6f, 0x21, 0xa4, 0xd9, 0xc6, 0xdf, 0x76, + 0xea, 0xc5, 0x43, 0xff, 0x38, 0x23, 0x4c, 0x53, 0x7d, 0xdf, 0x44, 0xa0, 0x59, 0x1d, 0x61, 0xf8, + 0x5b, 0x17, 0xfa, 0xef, 0xc5, 0x47, 0x47, 0x80, 0xc8, 0x9d, 0x26, 0x92, 0xe1, 0x62, 0x33, 0x4a, + 0x2e, 0xed, 0xfd, 0x7a, 0x67, 0xd3, 0x94, 0x17, 0x10, 0x6e, 0xe8, 0xc2, 0x88, 0xd3, 0x4b, 0xa0, + 0xe1, 0x09, 0xf4, 0x09, 0xf4, 0x1b, 0x82, 0xe0, 0xd2, 0xaa, 0xd4, 0x4f, 0x76, 0x6b, 0x70, 0xc6, + 0xa5, 0x46, 0x67, 0xd0, 0x2b, 0xf8, 0xc2, 0x84, 0x50, 0x51, 0xd7, 0x94, 0xf1, 0xf2, 0x51, 0x19, + 0x26, 0x57, 0x15, 0x3d, 0x9e, 0x25, 0x81, 0xf1, 0x8b, 0x85, 0x42, 0x27, 0x10, 0x98, 0xff, 0x66, + 0xc1, 0x8b, 0x68, 0x7b, 0xe4, 0x1d, 0xee, 0x1d, 0x47, 0x0f, 0x43, 0xcc, 0xdc, 0x7e, 0xd2, 0x30, + 0xd1, 0x9b, 0x76, 0x03, 0x77, 0x8c, 0xdb, 0xe7, 0x8f, 0x9f, 0xfc, 0xd6, 0xb5, 0xf7, 0x9a, 0x67, + 0x64, 0xd3, 0x6c, 0xf4, 0x19, 0x0c, 0x14, 0x51, 0x8a, 0x72, 0x96, 0xe2, 0x3c, 0xa7, 0x8c, 0xea, + 0xfb, 0xc8, 0x37, 0xa5, 0x3e, 0x75, 0xf8, 0xa9, 0x83, 0x87, 0x3f, 0x83, 0xef, 0xd2, 0x6f, 0x8d, + 0x95, 0xd7, 0x1e, 0xab, 0x8f, 0x20, 0xa8, 0xf5, 0x70, 0x92, 0xfa, 0xae, 0x4e, 0xf4, 0x1c, 0xc0, + 0x6e, 0xb5, 0xc4, 0xb4, 0xe2, 0x19, 0x25, 0x47, 0x10, 0x0a, 0xc9, 0xe7, 0x78, 0x4e, 0x8b, 0x2a, + 0x83, 0xae, 0xd9, 0x6f, 0x43, 0xe3, 0x57, 0xb0, 0xdb, 0x2e, 0x01, 0xed, 0x42, 0x70, 0x1e, 0xdf, + 0x9e, 0x9e, 0x5d, 0x5d, 0x9c, 0x0f, 0x9e, 0xa0, 0x10, 0xfc, 0x8b, 0x1b, 0x6b, 0x78, 0x28, 0x80, + 0xee, 0xed, 0xc5, 0xd5, 0xe5, 0x60, 0x6b, 0xf8, 0xbb, 0x07, 0x4f, 0x1f, 0xcc, 0xd1, 0x7f, 0xe5, + 0xfe, 0x0c, 0x7a, 0x9b, 0xb9, 0xb1, 0xc9, 0x6f, 0x80, 0x2a, 0x7d, 0x2a, 0xd2, 0xfa, 0x27, 0xeb, + 0xb8, 0x6d, 0x51, 0xff, 0x80, 0x08, 0xba, 0xa6, 0x2e, 0x9b, 0xb7, 0x59, 0xff, 0xbf, 0xc6, 0x8e, + 0x3f, 0x85, 0xa0, 0x46, 0x91, 0x0f, 0x9d, 0xb7, 0xaf, 0x67, 0x83, 0x27, 0xd5, 0xe2, 0xbb, 0xf3, + 0x99, 0xad, 0x2c, 0x7e, 0x7d, 0x3d, 0x1b, 0x6c, 0x8d, 0x7f, 0xf5, 0x60, 0xff, 0x1f, 0x77, 0x51, + 0x75, 0x11, 0x68, 0x5a, 0x12, 0xbe, 0xd2, 0xae, 0xb8, 0xda, 0xac, 0xfa, 0x5c, 0xe2, 0xbb, 0xcd, + 0xcd, 0x46, 0xcd, 0x3d, 0x6b, 0xfa, 0x5c, 0xe2, 0xbb, 0xa4, 0x05, 0x57, 0xa3, 0x5f, 0x51, 0x73, + 0x89, 0x97, 0x25, 0x61, 0x5a, 0xd5, 0xa3, 0x5f, 0xe2, 0xbb, 0xcb, 0x1a, 0xab, 0x2e, 0xa9, 0x4c, + 0x72, 0xd1, 0x62, 0x75, 0xed, 0x25, 0x55, 0xa1, 0x0d, 0xed, 0xec, 0xab, 0x3f, 0xff, 0x3a, 0xf0, + 0x7e, 0xfc, 0xa2, 0xf5, 0x82, 0x14, 0x74, 0x89, 0x35, 0xaf, 0xde, 0x9b, 0x23, 0xbc, 0x24, 0x4c, + 0x4f, 0xb1, 0xa0, 0xd3, 0xf7, 0x1f, 0xa1, 0x2f, 0xd7, 0x42, 0x54, 0x83, 0x3d, 0xdf, 0x31, 0xb2, + 0xbc, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xcb, 0xd0, 0x42, 0xa5, 0x06, 0x00, 0x00, +} diff --git a/api/models/vpp/nat/nat.proto b/api/models/vpp/nat/nat.proto new file mode 100644 index 0000000000..91df16548a --- /dev/null +++ b/api/models/vpp/nat/nat.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package vpp.nat; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/nat;vpp_nat"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message Nat44Global { + bool forwarding = 1; /* Enable/disable forwarding. */ + + message Interface { /* Local network interfaces enabled for NAT44. */ + string name = 1; /* (logical) Interface name. */ + bool is_inside = 2; /* Distinguish between inside/outside interface. */ + bool output_feature = 3; /* Enable/disable output feature. */ + } + repeated Interface nat_interfaces = 2; + + message Address { + string address = 1; /* IPv4 address. */ + uint32 vrf_id = 2; /* VRF (table) ID. */ + bool twice_nat = 3; /* Enable/disable twice NAT. */ + } + repeated Address address_pool = 3; + + VirtualReassembly virtual_reassembly = 4; /* Virtual reassembly for IPv4 */ +} + +message DNat44 { + string label = 1; /* Unique identifier for the DNAT configuration. */ + + enum Protocol { /* Available protocols. */ + TCP = 0; + UDP = 1; + ICMP = 2; /* ICMP is not permitted for load balanced entries. */ + }; + + message StaticMapping { /* A list of static mappings in DNAT. */ + string external_interface = 1; /* Interface to use external IP from; preferred over external_ip. */ + string external_ip = 2; /* External address. */ + uint32 external_port = 3; /* Port (do not set for address mapping). */ + + message LocalIP { + uint32 vrf_id = 1; /* VRF (table) ID. */ + string local_ip = 2; /* Local IP address). */ + uint32 local_port = 3; /* port (do not set for address mapping). */ + uint32 probability = 4; /* Probability mode. */ + } + repeated LocalIP local_ips = 4; /* List of local IP addresses. If there is more than one entry, + Load ballancer is enabled. */ + Protocol protocol = 5; /* Protocol used for static mapping. */ + + enum TwiceNatMode { /* Available twice-NAT modes */ + DISABLED = 0; + ENABLED = 1; + SELF = 2; + }; + TwiceNatMode twice_nat = 6; /* Enable/disable (self-)twice NAT. */ + uint32 session_affinity = 7; /* Session affinity if 0 disabled, otherwise client + IP affinity sticky time in seconds */ + } + repeated StaticMapping st_mappings = 2; + + message IdentityMapping { /* A list of identity mappings in DNAT. */ + uint32 vrf_id = 1; /* VRF (table) ID. */ + string interface = 2; /* Name of the interface to use address from; preferred over ip_address. */ + string ip_address = 3; /* IP address. */ + uint32 port = 4; /* Port (do not set for address mapping). */ + Protocol protocol = 5; /* Protocol used for identity mapping. */ + + } + repeated IdentityMapping id_mappings = 3; +} + +message VirtualReassembly { /* NAT virtual reassembly */ + uint32 timeout = 1; /* Reassembly timeout */ + uint32 max_reassemblies = 2; /* Maximum number of concurrent reassemblies */ + uint32 max_fragments = 3; /* Maximum number of fragments per reassembly */ + bool drop_fragments = 4; /* If set to true fragments are dropped, translated otherwise*/ +} diff --git a/api/models/vpp/punt/keys.go b/api/models/vpp/punt/keys.go new file mode 100644 index 0000000000..4adb0ee71b --- /dev/null +++ b/api/models/vpp/punt/keys.go @@ -0,0 +1,56 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_punt + +import ( + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp" + +var ( + ModelToHost = models.Register(&ToHost{}, models.Spec{ + Module: ModuleName, + Type: "tohost", + Version: "v2", + }, models.WithNameTemplate( + "l3/{{.L3Protocol}}/l4/{{.L4Protocol}}/port/{{.Port}}", + )) + ModelIPRedirect = models.Register(&IPRedirect{}, models.Spec{ + Module: ModuleName, + Type: "ipredirect", + Version: "v2", + }, models.WithNameTemplate( + "l3/{{.L3Protocol}}/tx/{{.TxInterface}}", + )) +) + +// ToHostKey returns key representing punt to host/socket configuration. +func ToHostKey(l3Proto L3Protocol, l4Proto L4Protocol, port uint32) string { + return models.Key(&ToHost{ + L3Protocol: l3Proto, + L4Protocol: l4Proto, + Port: port, + }) +} + +// IPRedirectKey returns key representing IP punt redirect configuration. +func IPRedirectKey(l3Proto L3Protocol, txIf string) string { + return models.Key(&IPRedirect{ + L3Protocol: l3Proto, + TxInterface: txIf, + }) +} diff --git a/api/models/vpp/punt/keys_test.go b/api/models/vpp/punt/keys_test.go new file mode 100644 index 0000000000..af1e956059 --- /dev/null +++ b/api/models/vpp/punt/keys_test.go @@ -0,0 +1,232 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_punt + +/*func TestPuntToHostKey(t *testing.T) { + tests := []struct { + name string + l3Protocol L3Protocol + l4Protocol L4Protocol + port uint32 + expectedKey string + }{ + { + name: "valid Punt case (IPv4/UDP)", + l3Protocol: L3Protocol_IPv4, + l4Protocol: L4Protocol_UDP, + port: 9000, + expectedKey: "vpp/config/v2/punt/tohost/l3/IPv4/l4/UDP/port/9000", + }, + { + name: "valid Punt case (IPv4/TCP)", + l3Protocol: L3Protocol_IPv4, + l4Protocol: L4Protocol_TCP, + port: 9000, + expectedKey: "vpp/config/v2/punt/tohost/l3/IPv4/l4/TCP/port/9000", + }, + { + name: "valid Punt case (IPv6/UDP)", + l3Protocol: L3Protocol_IPv6, + l4Protocol: L4Protocol_UDP, + port: 9000, + expectedKey: "vpp/config/v2/punt/tohost/l3/IPv6/l4/UDP/port/9000", + }, + { + name: "valid Punt case (IPv6/TCP)", + l3Protocol: L3Protocol_IPv6, + l4Protocol: L4Protocol_TCP, + port: 0, + expectedKey: "vpp/config/v2/punt/tohost/l3/IPv6/l4/TCP/port/", + }, + { + name: "invalid Punt case (zero port)", + l3Protocol: L3Protocol_IPv4, + l4Protocol: L4Protocol_UDP, + port: 0, + expectedKey: "vpp/config/v2/punt/tohost/l3/IPv4/l4/UDP/port/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := ToHostKey(test.l3Protocol, test.l4Protocol, test.port) + if key != test.expectedKey { + t.Errorf("failed for: puntName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.name, test.expectedKey, key) + } + }) + } +}*/ + +/*func TestParsePuntToHostKey(t *testing.T) { + tests := []struct { + name string + key string + expectedL3 L3Protocol + expectedL4 L4Protocol + expectedPort uint32 + isPuntToHostKey bool + }{ + { + name: "valid Punt key", + key: "vpp/config/v2/punt/tohost/l3/IPv4/l4/TCP/port/9000", + expectedL3: L3Protocol(4), + expectedL4: L4Protocol(6), + expectedPort: 9000, + isPuntToHostKey: true, + }, + { + name: "invalid Punt L3", + key: "vpp/config/v2/punt/tohost/l3/4/l4/TCP/port/9000", + expectedL3: L3Protocol(0), + expectedL4: L4Protocol(6), + expectedPort: 9000, + isPuntToHostKey: true, + }, + { + name: "invalid Punt L3 and L4", + key: "vpp/config/v2/punt/tohost/l3/4/l4/6/port/9000", + expectedL3: L3Protocol(0), + expectedL4: L4Protocol(0), + expectedPort: 9000, + isPuntToHostKey: true, + }, + { + name: "invalid Punt L4 and port", + key: "vpp/config/v2/punt/tohost/l3/IPv6/l4/17/port/port1", + expectedL3: L3Protocol(6), + expectedL4: L4Protocol(0), + expectedPort: 0, + isPuntToHostKey: true, + }, + { + name: "invalid all", + key: "vpp/config/v2/punt/tohost/l3/4/l4/17/port/port1", + expectedL3: L3Protocol(0), + expectedL4: L4Protocol(0), + expectedPort: 0, + isPuntToHostKey: true, + }, + { + name: "not a Punt to host key", + key: "vpp/config/v2/punt/ipredirect/l3/IPv6/tx/if1", + expectedL3: L3Protocol(0), + expectedL4: L4Protocol(0), + expectedPort: 0, + isPuntToHostKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + l3Proto, l4Proto, port, isPuntToHostKey := ParsePuntToHostKey(test.key) + if l3Proto != test.expectedL3 { + t.Errorf("expected l3PuntKey: %v\tgot: %v", test.expectedL3, l3Proto) + } + if l4Proto != test.expectedL4 { + t.Errorf("expected l4PuntKey: %v\tgot: %v", test.expectedL4, l4Proto) + } + if port != test.expectedPort { + t.Errorf("expected portPuntKey: %v\tgot: %v", test.expectedPort, port) + } + if isPuntToHostKey != test.isPuntToHostKey { + t.Errorf("expected isPuntKey: %v\tgot: %v", test.isPuntToHostKey, isPuntToHostKey) + } + }) + } +}*/ + +/*func TestIPredirectKey(t *testing.T) { + tests := []struct { + name string + l3Protocol L3Protocol + txInterface string + expectedKey string + }{ + { + name: "valid IP redirect case (IPv4)", + l3Protocol: L3Protocol_IPv4, + txInterface: "if1", + expectedKey: "vpp/config/v2/punt/ipredirect/l3/IPv4/tx/if1", + }, + { + name: "valid IP redirect case (IPv6)", + l3Protocol: L3Protocol_IPv6, + txInterface: "if1", + expectedKey: "vpp/config/v2/punt/ipredirect/l3/IPv6/tx/if1", + }, + { + name: "invalid IP redirect case (undefined interface)", + l3Protocol: L3Protocol_IPv4, + txInterface: "", + expectedKey: "vpp/config/v2/punt/ipredirect/l3/IPv4/tx/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := IPRedirectKey(test.l3Protocol, test.txInterface) + if key != test.expectedKey { + t.Errorf("failed for: puntName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.name, test.expectedKey, key) + } + }) + } +}*/ + +/*func TestParseIPRedirectKey(t *testing.T) { + tests := []struct { + name string + key string + expectedL3 L3Protocol + expectedIf string + isIPRedirectKey bool + }{ + { + name: "valid IP redirect key (IPv4)", + key: "vpp/config/v2/punt/ipredirect/l3/IPv4/tx/if1", + expectedL3: L3Protocol(4), + expectedIf: "if1", + isIPRedirectKey: true, + }, + { + name: "valid IP redirect key (IPv6)", + key: "vpp/config/v2/punt/ipredirect/l3/IPv6/tx/if1", + expectedL3: L3Protocol(6), + expectedIf: "if1", + isIPRedirectKey: true, + }, + { + name: "invalid IP redirect key (invalid interface)", + key: "vpp/config/v2/punt/ipredirect/l3/IPv4/tx/", + expectedL3: L3Protocol(4), + expectedIf: "", + isIPRedirectKey: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + l3Proto, ifName, isIPRedirectKey := ParseIPRedirectKey(test.key) + if l3Proto != test.expectedL3 { + t.Errorf("expected l3IPRedirectKey L3: %v\tgot: %v", test.expectedL3, l3Proto) + } + if ifName != test.expectedIf { + t.Errorf("expected l3IPRedirectKey ifName: %v\tgot: %v", test.expectedIf, ifName) + } + if isIPRedirectKey != test.isIPRedirectKey { + t.Errorf("expected isIPRedirectKey: %v\tgot: %v", test.isIPRedirectKey, isIPRedirectKey) + } + }) + } +}*/ diff --git a/api/models/vpp/punt/punt.pb.go b/api/models/vpp/punt/punt.pb.go new file mode 100644 index 0000000000..a6c6e684fd --- /dev/null +++ b/api/models/vpp/punt/punt.pb.go @@ -0,0 +1,247 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/punt/punt.proto + +package vpp_punt // import "github.com/ligato/vpp-agent/api/models/vpp/punt" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type L3Protocol int32 + +const ( + L3Protocol_UNDEFINED_L3 L3Protocol = 0 + L3Protocol_IPv4 L3Protocol = 4 + L3Protocol_IPv6 L3Protocol = 6 + L3Protocol_ALL L3Protocol = 10 +) + +var L3Protocol_name = map[int32]string{ + 0: "UNDEFINED_L3", + 4: "IPv4", + 6: "IPv6", + 10: "ALL", +} +var L3Protocol_value = map[string]int32{ + "UNDEFINED_L3": 0, + "IPv4": 4, + "IPv6": 6, + "ALL": 10, +} + +func (x L3Protocol) String() string { + return proto.EnumName(L3Protocol_name, int32(x)) +} +func (L3Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_punt_2718300ec8712d86, []int{0} +} + +type L4Protocol int32 + +const ( + L4Protocol_UNDEFINED_L4 L4Protocol = 0 + L4Protocol_TCP L4Protocol = 6 + L4Protocol_UDP L4Protocol = 17 +) + +var L4Protocol_name = map[int32]string{ + 0: "UNDEFINED_L4", + 6: "TCP", + 17: "UDP", +} +var L4Protocol_value = map[string]int32{ + "UNDEFINED_L4": 0, + "TCP": 6, + "UDP": 17, +} + +func (x L4Protocol) String() string { + return proto.EnumName(L4Protocol_name, int32(x)) +} +func (L4Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_punt_2718300ec8712d86, []int{1} +} + +// IPRedirect allows otherwise dropped packet which destination IP address matching some of the VPP addresses +// to redirect to the defined next hop address via the TX interface +type IPRedirect struct { + L3Protocol L3Protocol `protobuf:"varint,1,opt,name=l3_protocol,json=l3Protocol,proto3,enum=vpp.punt.L3Protocol" json:"l3_protocol,omitempty"` + RxInterface string `protobuf:"bytes,2,opt,name=rx_interface,json=rxInterface,proto3" json:"rx_interface,omitempty"` + TxInterface string `protobuf:"bytes,3,opt,name=tx_interface,json=txInterface,proto3" json:"tx_interface,omitempty"` + NextHop string `protobuf:"bytes,4,opt,name=next_hop,json=nextHop,proto3" json:"next_hop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IPRedirect) Reset() { *m = IPRedirect{} } +func (m *IPRedirect) String() string { return proto.CompactTextString(m) } +func (*IPRedirect) ProtoMessage() {} +func (*IPRedirect) Descriptor() ([]byte, []int) { + return fileDescriptor_punt_2718300ec8712d86, []int{0} +} +func (m *IPRedirect) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IPRedirect.Unmarshal(m, b) +} +func (m *IPRedirect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IPRedirect.Marshal(b, m, deterministic) +} +func (dst *IPRedirect) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPRedirect.Merge(dst, src) +} +func (m *IPRedirect) XXX_Size() int { + return xxx_messageInfo_IPRedirect.Size(m) +} +func (m *IPRedirect) XXX_DiscardUnknown() { + xxx_messageInfo_IPRedirect.DiscardUnknown(m) +} + +var xxx_messageInfo_IPRedirect proto.InternalMessageInfo + +func (m *IPRedirect) GetL3Protocol() L3Protocol { + if m != nil { + return m.L3Protocol + } + return L3Protocol_UNDEFINED_L3 +} + +func (m *IPRedirect) GetRxInterface() string { + if m != nil { + return m.RxInterface + } + return "" +} + +func (m *IPRedirect) GetTxInterface() string { + if m != nil { + return m.TxInterface + } + return "" +} + +func (m *IPRedirect) GetNextHop() string { + if m != nil { + return m.NextHop + } + return "" +} + +func (*IPRedirect) XXX_MessageName() string { + return "vpp.punt.IPRedirect" +} + +// allows otherwise dropped packet which destination IP address matching some of the VPP interface IP addresses to be +// punted to the host. L3 and L4 protocols can be used for filtering +type ToHost struct { + L3Protocol L3Protocol `protobuf:"varint,2,opt,name=l3_protocol,json=l3Protocol,proto3,enum=vpp.punt.L3Protocol" json:"l3_protocol,omitempty"` + L4Protocol L4Protocol `protobuf:"varint,3,opt,name=l4_protocol,json=l4Protocol,proto3,enum=vpp.punt.L4Protocol" json:"l4_protocol,omitempty"` + Port uint32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + SocketPath string `protobuf:"bytes,5,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ToHost) Reset() { *m = ToHost{} } +func (m *ToHost) String() string { return proto.CompactTextString(m) } +func (*ToHost) ProtoMessage() {} +func (*ToHost) Descriptor() ([]byte, []int) { + return fileDescriptor_punt_2718300ec8712d86, []int{1} +} +func (m *ToHost) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ToHost.Unmarshal(m, b) +} +func (m *ToHost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ToHost.Marshal(b, m, deterministic) +} +func (dst *ToHost) XXX_Merge(src proto.Message) { + xxx_messageInfo_ToHost.Merge(dst, src) +} +func (m *ToHost) XXX_Size() int { + return xxx_messageInfo_ToHost.Size(m) +} +func (m *ToHost) XXX_DiscardUnknown() { + xxx_messageInfo_ToHost.DiscardUnknown(m) +} + +var xxx_messageInfo_ToHost proto.InternalMessageInfo + +func (m *ToHost) GetL3Protocol() L3Protocol { + if m != nil { + return m.L3Protocol + } + return L3Protocol_UNDEFINED_L3 +} + +func (m *ToHost) GetL4Protocol() L4Protocol { + if m != nil { + return m.L4Protocol + } + return L4Protocol_UNDEFINED_L4 +} + +func (m *ToHost) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ToHost) GetSocketPath() string { + if m != nil { + return m.SocketPath + } + return "" +} + +func (*ToHost) XXX_MessageName() string { + return "vpp.punt.ToHost" +} +func init() { + proto.RegisterType((*IPRedirect)(nil), "vpp.punt.IPRedirect") + proto.RegisterType((*ToHost)(nil), "vpp.punt.ToHost") + proto.RegisterEnum("vpp.punt.L3Protocol", L3Protocol_name, L3Protocol_value) + proto.RegisterEnum("vpp.punt.L4Protocol", L4Protocol_name, L4Protocol_value) +} + +func init() { proto.RegisterFile("models/vpp/punt/punt.proto", fileDescriptor_punt_2718300ec8712d86) } + +var fileDescriptor_punt_2718300ec8712d86 = []byte{ + // 372 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x6e, 0xa2, 0x50, + 0x14, 0xc6, 0x45, 0x19, 0x75, 0x8e, 0xce, 0x84, 0xb9, 0x99, 0x05, 0xe3, 0x62, 0x6a, 0x5d, 0x19, + 0x13, 0xa1, 0x29, 0xb4, 0x69, 0x62, 0xd2, 0xa4, 0xad, 0x36, 0x92, 0x10, 0x43, 0x88, 0x6e, 0xba, + 0x21, 0x88, 0x57, 0x20, 0x45, 0xee, 0x0d, 0x5e, 0x89, 0x0f, 0xd4, 0x5d, 0x5f, 0xa4, 0xef, 0xd1, + 0x17, 0x69, 0xb8, 0xf8, 0x87, 0xe8, 0xa6, 0x1b, 0xf2, 0x7d, 0xe7, 0x7c, 0x3f, 0xf2, 0x05, 0x0e, + 0xb4, 0x56, 0x64, 0x81, 0xa3, 0xb5, 0x9a, 0x52, 0xaa, 0xd2, 0x4d, 0xcc, 0xf8, 0x43, 0xa1, 0x09, + 0x61, 0x04, 0xd5, 0x53, 0x4a, 0x95, 0xcc, 0xb7, 0xfa, 0x7e, 0xc8, 0x82, 0xcd, 0x5c, 0xf1, 0xc8, + 0x4a, 0xf5, 0x89, 0x4f, 0x54, 0x1e, 0x98, 0x6f, 0x96, 0xdc, 0x71, 0xc3, 0x55, 0x0e, 0x76, 0xde, + 0x04, 0x00, 0xc3, 0xb2, 0xf1, 0x22, 0x4c, 0xb0, 0xc7, 0xd0, 0x0d, 0x34, 0x22, 0xcd, 0xe1, 0x2b, + 0x8f, 0x44, 0xb2, 0xd0, 0x16, 0xba, 0xbf, 0xaf, 0xff, 0x2a, 0xfb, 0xb7, 0x2b, 0xa6, 0x66, 0xed, + 0x76, 0x36, 0x44, 0x07, 0x8d, 0x2e, 0xa1, 0x99, 0x6c, 0x9d, 0x30, 0x66, 0x38, 0x59, 0xba, 0x1e, + 0x96, 0xcb, 0x6d, 0xa1, 0xfb, 0xd3, 0x6e, 0x24, 0x5b, 0x63, 0x3f, 0xca, 0x22, 0xac, 0x18, 0xa9, + 0xe4, 0x11, 0x56, 0x88, 0xfc, 0x83, 0x7a, 0x8c, 0xb7, 0xcc, 0x09, 0x08, 0x95, 0x45, 0xbe, 0xae, + 0x65, 0x7e, 0x4c, 0x68, 0xe7, 0x5d, 0x80, 0xea, 0x94, 0x8c, 0xc9, 0xfa, 0xac, 0x62, 0xf9, 0x9b, + 0x15, 0x33, 0x4c, 0x3f, 0x62, 0x95, 0x33, 0x4c, 0x2f, 0x60, 0x07, 0x8d, 0x10, 0x88, 0x94, 0x24, + 0x8c, 0xf7, 0xf9, 0x65, 0x73, 0x8d, 0x2e, 0xa0, 0xb1, 0x26, 0xde, 0x2b, 0x66, 0x0e, 0x75, 0x59, + 0x20, 0xff, 0xe0, 0x55, 0x21, 0x1f, 0x59, 0x2e, 0x0b, 0x7a, 0x03, 0x80, 0x63, 0x0b, 0x24, 0x41, + 0x73, 0x36, 0x19, 0x8e, 0x9e, 0x8d, 0xc9, 0x68, 0xe8, 0x98, 0x9a, 0x54, 0x42, 0x75, 0x10, 0x0d, + 0x2b, 0xd5, 0x25, 0x71, 0xa7, 0x6e, 0xa5, 0x2a, 0xaa, 0x41, 0xe5, 0xc1, 0x34, 0x25, 0xe8, 0x5d, + 0x01, 0x1c, 0xbb, 0x9c, 0xc0, 0xba, 0x54, 0xca, 0x82, 0xd3, 0x27, 0x2b, 0x27, 0x66, 0x43, 0x4b, + 0xfa, 0xf3, 0x78, 0xff, 0xf1, 0xf9, 0x5f, 0x78, 0xb9, 0x2b, 0xfc, 0xf8, 0x28, 0xf4, 0x5d, 0x46, + 0xb2, 0x4b, 0xe9, 0xbb, 0x3e, 0x8e, 0x99, 0xea, 0xd2, 0x50, 0x3d, 0x39, 0x9f, 0x41, 0x4a, 0xa9, + 0x93, 0x89, 0x79, 0x95, 0x7f, 0x15, 0xed, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x11, 0xab, 0x16, 0x1e, + 0x61, 0x02, 0x00, 0x00, +} diff --git a/api/models/vpp/punt/punt.proto b/api/models/vpp/punt/punt.proto new file mode 100644 index 0000000000..44f47c7ee6 --- /dev/null +++ b/api/models/vpp/punt/punt.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package vpp.punt; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/punt;vpp_punt"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +enum L3Protocol { + UNDEFINED_L3 = 0; + IPv4 = 4; + IPv6 = 6; + ALL = 10; +} + +enum L4Protocol { + UNDEFINED_L4 = 0; + TCP = 6; + UDP = 17; +} + +/* IPRedirect allows otherwise dropped packet which destination IP address matching some of the VPP addresses +to redirect to the defined next hop address via the TX interface */ +message IPRedirect { + L3Protocol l3_protocol = 1; /* L3 protocol */ + string rx_interface = 2; /* Receive interface name. Optional, only redirect traffic incoming from this interface */ + string tx_interface = 3; /* Transmit interface name */ + string next_hop = 4; /* Next hop IP where the traffic is redirected */ +} + +/* allows otherwise dropped packet which destination IP address matching some of the VPP interface IP addresses to be +punted to the host. L3 and L4 protocols can be used for filtering */ +message ToHost { + L3Protocol l3_protocol = 2; /* L3 destination protocol a packet has to match in order to be punted */ + L4Protocol l4_protocol = 3; /* L4 destination protocol a packet has to match. Currently VPP only supports UDP */ + uint32 port = 4; /* Destination port */ + + string socket_path = 5; /* Optional, use unix domain socket to punt packets to the host. */ +} diff --git a/api/models/vpp/stn/keys.go b/api/models/vpp/stn/keys.go new file mode 100644 index 0000000000..b9e89a1c90 --- /dev/null +++ b/api/models/vpp/stn/keys.go @@ -0,0 +1,39 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_stn + +import ( + "github.com/ligato/vpp-agent/pkg/models" +) + +// ModuleName is the module name used for models. +const ModuleName = "vpp.stn" + +var ( + ModelRule = models.Register(&Rule{}, models.Spec{ + Module: ModuleName, + Type: "rule", + Version: "v2", + }, models.WithNameTemplate("{{.Interface}}/ip/{{.IpAddress}}")) +) + +// Key returns the prefix used in the ETCD to store a VPP STN config +// of a particular STN rule in selected VPP instance. +func Key(ifName, ipAddr string) string { + return models.Key(&Rule{ + Interface: ifName, + IpAddress: ipAddr, + }) +} diff --git a/api/models/vpp/stn/keys_test.go b/api/models/vpp/stn/keys_test.go new file mode 100644 index 0000000000..1e16090a0a --- /dev/null +++ b/api/models/vpp/stn/keys_test.go @@ -0,0 +1,119 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp_stn_test + +/*func TestSTNKey(t *testing.T) { + tests := []struct { + name string + stnInterface string + stnIP string + expectedKey string + }{ + { + name: "valid STN case", + stnInterface: "if1", + stnIP: "10.0.0.1", + expectedKey: "vpp/config/v2/stn/rule/if1/ip/10.0.0.1", + }, + { + name: "invalid STN case (undefined interface)", + stnInterface: "", + stnIP: "10.0.0.1", + expectedKey: "vpp/config/v2/stn/rule//ip/10.0.0.1", + }, + { + name: "invalid STN case (undefined address)", + stnInterface: "if1", + stnIP: "", + expectedKey: "vpp/config/v2/stn/rule/if1/ip/", + }, + { + name: "invalid STN case (IP address with mask provided)", + stnInterface: "if1", + stnIP: "10.0.0.1/24", + expectedKey: "vpp/config/v2/stn/rule/if1/ip/", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := stn.Key(test.stnInterface, test.stnIP) + if key != test.expectedKey { + t.Errorf("failed for: stnName=%s\n"+ + "expected key:\n\t%q\ngot key:\n\t%q", + test.name, test.expectedKey, key) + } + }) + } +}*/ + +/*func TestParseSTNKey(t *testing.T) { + tests := []struct { + name string + key string + expectedIfName string + expectedIP string + expectedIsSTNKey bool + }{ + { + name: "valid STN key", + key: "vpp/config/v2/stn/rule/if1/ip/10.0.0.1", + expectedIfName: "if1", + expectedIP: "10.0.0.1", + expectedIsSTNKey: true, + }, + { + name: "invalid if", + key: "vpp/config/v2/stn/rule//ip/10.0.0.1", + expectedIfName: "", + expectedIP: "10.0.0.1", + expectedIsSTNKey: true, + }, + { + name: "invalid STN", + key: "vpp/config/v2/stn/rule/if1/ip/", + expectedIfName: "if1", + expectedIP: "", + expectedIsSTNKey: true, + }, + { + name: "invalid all", + key: "vpp/config/v2/stn/rule//ip/", + expectedIfName: "", + expectedIP: "", + expectedIsSTNKey: true, + }, + { + name: "not STN key", + key: "vpp/config/v2/bd/bd1", + expectedIfName: "", + expectedIP: "", + expectedIsSTNKey: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifName, ip, isSTNKey := stn.ParseKey(test.key) + if isSTNKey != test.expectedIsSTNKey { + t.Errorf("expected isFIBKey: %v\tgot: %v", test.expectedIsSTNKey, isSTNKey) + } + if ifName != test.expectedIfName { + t.Errorf("expected ifName: %s\tgot: %s", test.expectedIfName, ifName) + } + if ip != test.expectedIP { + t.Errorf("expected IP: %s\tgot: %s", test.expectedIP, ip) + } + }) + } +}*/ diff --git a/api/models/vpp/stn/stn.pb.go b/api/models/vpp/stn/stn.pb.go new file mode 100644 index 0000000000..07a682a063 --- /dev/null +++ b/api/models/vpp/stn/stn.pb.go @@ -0,0 +1,91 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/stn/stn.proto + +package vpp_stn // import "github.com/ligato/vpp-agent/api/models/vpp/stn" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Rule struct { + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Interface string `protobuf:"bytes,2,opt,name=interface,proto3" json:"interface,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_stn_c27ff3c5a845be7c, []int{0} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Rule.Unmarshal(m, b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) +} +func (dst *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(dst, src) +} +func (m *Rule) XXX_Size() int { + return xxx_messageInfo_Rule.Size(m) +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *Rule) GetInterface() string { + if m != nil { + return m.Interface + } + return "" +} + +func (*Rule) XXX_MessageName() string { + return "vpp.stn.Rule" +} +func init() { + proto.RegisterType((*Rule)(nil), "vpp.stn.Rule") +} + +func init() { proto.RegisterFile("models/vpp/stn/stn.proto", fileDescriptor_stn_c27ff3c5a845be7c) } + +var fileDescriptor_stn_c27ff3c5a845be7c = []byte{ + // 186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xc8, 0xcd, 0x4f, 0x49, + 0xcd, 0x29, 0xd6, 0x2f, 0x2b, 0x28, 0xd0, 0x2f, 0x2e, 0xc9, 0x03, 0x61, 0xbd, 0x82, 0xa2, 0xfc, + 0x92, 0x7c, 0x21, 0xf6, 0xb2, 0x82, 0x02, 0xbd, 0xe2, 0x92, 0x3c, 0x29, 0xdd, 0xf4, 0xcc, 0x92, + 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x7d, 0xb0, 0x7c, 0x52, + 0x69, 0x1a, 0x98, 0x07, 0xe6, 0x80, 0x59, 0x10, 0x7d, 0x4a, 0xce, 0x5c, 0x2c, 0x41, 0xa5, 0x39, + 0xa9, 0x42, 0xb2, 0x5c, 0x5c, 0x99, 0x05, 0xf1, 0x89, 0x29, 0x29, 0x45, 0xa9, 0xc5, 0xc5, 0x12, + 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x99, 0x05, 0x8e, 0x10, 0x01, 0x21, 0x19, 0x2e, 0xce, + 0xcc, 0xbc, 0x92, 0xd4, 0xa2, 0xb4, 0xc4, 0xe4, 0x54, 0x09, 0x26, 0xa8, 0x2c, 0x4c, 0xc0, 0xc9, + 0xe6, 0xc4, 0x63, 0x39, 0xc6, 0x28, 0x33, 0x24, 0x9b, 0x73, 0x32, 0xd3, 0x13, 0x4b, 0xf2, 0x41, + 0xee, 0xd4, 0x4d, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0x47, 0x75, 0xbc, 0x75, + 0x59, 0x41, 0x41, 0x7c, 0x71, 0x49, 0x5e, 0x12, 0x1b, 0xd8, 0x25, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xeb, 0xdf, 0x94, 0x44, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/api/models/vpp/stn/stn.proto b/api/models/vpp/stn/stn.proto new file mode 100644 index 0000000000..3139b1bebe --- /dev/null +++ b/api/models/vpp/stn/stn.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package vpp.stn; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp/stn;vpp_stn"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message Rule { + string ip_address = 1; + string interface = 2; +} diff --git a/api/models/vpp/vpp.pb.go b/api/models/vpp/vpp.pb.go new file mode 100644 index 0000000000..3794b25ddd --- /dev/null +++ b/api/models/vpp/vpp.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: models/vpp/vpp.proto + +package vpp // import "github.com/ligato/vpp-agent/api/models/vpp" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import acl "github.com/ligato/vpp-agent/api/models/vpp/acl" +import interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" +import ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" +import l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" +import l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" +import nat "github.com/ligato/vpp-agent/api/models/vpp/nat" +import punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ConfigData struct { + Interfaces []*interfaces.Interface `protobuf:"bytes,10,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + Acls []*acl.ACL `protobuf:"bytes,20,rep,name=acls,proto3" json:"acls,omitempty"` + BridgeDomains []*l2.BridgeDomain `protobuf:"bytes,30,rep,name=bridge_domains,json=bridgeDomains,proto3" json:"bridge_domains,omitempty"` + Fibs []*l2.FIBEntry `protobuf:"bytes,31,rep,name=fibs,proto3" json:"fibs,omitempty"` + XconnectPairs []*l2.XConnectPair `protobuf:"bytes,32,rep,name=xconnect_pairs,json=xconnectPairs,proto3" json:"xconnect_pairs,omitempty"` + Routes []*l3.Route `protobuf:"bytes,40,rep,name=routes,proto3" json:"routes,omitempty"` + Arps []*l3.ARPEntry `protobuf:"bytes,41,rep,name=arps,proto3" json:"arps,omitempty"` + ProxyArp *l3.ProxyARP `protobuf:"bytes,42,opt,name=proxy_arp,json=proxyArp,proto3" json:"proxy_arp,omitempty"` + IpscanNeighbor *l3.IPScanNeighbor `protobuf:"bytes,43,opt,name=ipscan_neighbor,json=ipscanNeighbor,proto3" json:"ipscan_neighbor,omitempty"` + Nat44Global *nat.Nat44Global `protobuf:"bytes,50,opt,name=nat44_global,json=nat44Global,proto3" json:"nat44_global,omitempty"` + Dnat44S []*nat.DNat44 `protobuf:"bytes,51,rep,name=dnat44s,proto3" json:"dnat44s,omitempty"` + IpsecSpds []*ipsec.SecurityPolicyDatabase `protobuf:"bytes,60,rep,name=ipsec_spds,json=ipsecSpds,proto3" json:"ipsec_spds,omitempty"` + IpsecSas []*ipsec.SecurityAssociation `protobuf:"bytes,61,rep,name=ipsec_sas,json=ipsecSas,proto3" json:"ipsec_sas,omitempty"` + PuntIpredirects []*punt.IPRedirect `protobuf:"bytes,70,rep,name=punt_ipredirects,json=puntIpredirects,proto3" json:"punt_ipredirects,omitempty"` + PuntTohosts []*punt.ToHost `protobuf:"bytes,71,rep,name=punt_tohosts,json=puntTohosts,proto3" json:"punt_tohosts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigData) Reset() { *m = ConfigData{} } +func (m *ConfigData) String() string { return proto.CompactTextString(m) } +func (*ConfigData) ProtoMessage() {} +func (*ConfigData) Descriptor() ([]byte, []int) { + return fileDescriptor_vpp_fc4bcc4ffdd9fa6c, []int{0} +} +func (m *ConfigData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigData.Unmarshal(m, b) +} +func (m *ConfigData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigData.Marshal(b, m, deterministic) +} +func (dst *ConfigData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigData.Merge(dst, src) +} +func (m *ConfigData) XXX_Size() int { + return xxx_messageInfo_ConfigData.Size(m) +} +func (m *ConfigData) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigData proto.InternalMessageInfo + +func (m *ConfigData) GetInterfaces() []*interfaces.Interface { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *ConfigData) GetAcls() []*acl.ACL { + if m != nil { + return m.Acls + } + return nil +} + +func (m *ConfigData) GetBridgeDomains() []*l2.BridgeDomain { + if m != nil { + return m.BridgeDomains + } + return nil +} + +func (m *ConfigData) GetFibs() []*l2.FIBEntry { + if m != nil { + return m.Fibs + } + return nil +} + +func (m *ConfigData) GetXconnectPairs() []*l2.XConnectPair { + if m != nil { + return m.XconnectPairs + } + return nil +} + +func (m *ConfigData) GetRoutes() []*l3.Route { + if m != nil { + return m.Routes + } + return nil +} + +func (m *ConfigData) GetArps() []*l3.ARPEntry { + if m != nil { + return m.Arps + } + return nil +} + +func (m *ConfigData) GetProxyArp() *l3.ProxyARP { + if m != nil { + return m.ProxyArp + } + return nil +} + +func (m *ConfigData) GetIpscanNeighbor() *l3.IPScanNeighbor { + if m != nil { + return m.IpscanNeighbor + } + return nil +} + +func (m *ConfigData) GetNat44Global() *nat.Nat44Global { + if m != nil { + return m.Nat44Global + } + return nil +} + +func (m *ConfigData) GetDnat44S() []*nat.DNat44 { + if m != nil { + return m.Dnat44S + } + return nil +} + +func (m *ConfigData) GetIpsecSpds() []*ipsec.SecurityPolicyDatabase { + if m != nil { + return m.IpsecSpds + } + return nil +} + +func (m *ConfigData) GetIpsecSas() []*ipsec.SecurityAssociation { + if m != nil { + return m.IpsecSas + } + return nil +} + +func (m *ConfigData) GetPuntIpredirects() []*punt.IPRedirect { + if m != nil { + return m.PuntIpredirects + } + return nil +} + +func (m *ConfigData) GetPuntTohosts() []*punt.ToHost { + if m != nil { + return m.PuntTohosts + } + return nil +} + +type Notification struct { + Interface *interfaces.InterfaceNotification `protobuf:"bytes,1,opt,name=interface,proto3" json:"interface,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Notification) Reset() { *m = Notification{} } +func (m *Notification) String() string { return proto.CompactTextString(m) } +func (*Notification) ProtoMessage() {} +func (*Notification) Descriptor() ([]byte, []int) { + return fileDescriptor_vpp_fc4bcc4ffdd9fa6c, []int{1} +} +func (m *Notification) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Notification.Unmarshal(m, b) +} +func (m *Notification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Notification.Marshal(b, m, deterministic) +} +func (dst *Notification) XXX_Merge(src proto.Message) { + xxx_messageInfo_Notification.Merge(dst, src) +} +func (m *Notification) XXX_Size() int { + return xxx_messageInfo_Notification.Size(m) +} +func (m *Notification) XXX_DiscardUnknown() { + xxx_messageInfo_Notification.DiscardUnknown(m) +} + +var xxx_messageInfo_Notification proto.InternalMessageInfo + +func (m *Notification) GetInterface() *interfaces.InterfaceNotification { + if m != nil { + return m.Interface + } + return nil +} + +func init() { + proto.RegisterType((*ConfigData)(nil), "vpp.ConfigData") + proto.RegisterType((*Notification)(nil), "vpp.Notification") +} + +func init() { proto.RegisterFile("models/vpp/vpp.proto", fileDescriptor_vpp_fc4bcc4ffdd9fa6c) } + +var fileDescriptor_vpp_fc4bcc4ffdd9fa6c = []byte{ + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x5f, 0x6b, 0xdb, 0x3c, + 0x14, 0xc6, 0x29, 0x2d, 0x7d, 0x1b, 0x35, 0xfd, 0x83, 0x28, 0x7d, 0xd5, 0x30, 0xba, 0xb4, 0xac, + 0xd0, 0x6e, 0xd4, 0x19, 0x49, 0x61, 0x8c, 0x6e, 0x74, 0x69, 0xba, 0x76, 0x81, 0x51, 0x8c, 0xd2, + 0x8b, 0xb1, 0x1b, 0x23, 0xcb, 0x8e, 0x2b, 0x70, 0x25, 0x21, 0x29, 0xa5, 0xf9, 0x34, 0xfb, 0xaa, + 0x43, 0xc7, 0x76, 0xed, 0x40, 0x76, 0x61, 0xe3, 0xa3, 0xdf, 0xf3, 0x1c, 0x4b, 0x47, 0xe7, 0xa0, + 0xbd, 0x27, 0x95, 0xa4, 0xb9, 0xed, 0x3d, 0x6b, 0xed, 0x9f, 0x40, 0x1b, 0xe5, 0x14, 0x5e, 0x7d, + 0xd6, 0xba, 0x43, 0x1a, 0x88, 0xf1, 0xdc, 0x3f, 0x05, 0xee, 0x9c, 0x34, 0x88, 0x90, 0x2e, 0x35, + 0x53, 0xc6, 0x53, 0x5b, 0x7f, 0x96, 0xb2, 0xa3, 0xe5, 0x32, 0xeb, 0x98, 0xab, 0x24, 0x6f, 0x9a, + 0x12, 0x6d, 0x53, 0x5e, 0xbc, 0x97, 0x24, 0xc8, 0xfb, 0xbd, 0xd8, 0x88, 0x24, 0x4b, 0xcf, 0x13, + 0xf5, 0xc4, 0x84, 0x2c, 0x25, 0xff, 0x2f, 0x4a, 0xa6, 0x22, 0x5e, 0x92, 0x39, 0xef, 0xf7, 0x5e, + 0xb8, 0x92, 0x32, 0xe5, 0x6e, 0x99, 0x6d, 0xd0, 0x63, 0xa6, 0x3c, 0x79, 0x67, 0x7f, 0x11, 0xe4, + 0x83, 0x72, 0xfd, 0x60, 0x71, 0xdd, 0xa8, 0xd9, 0xeb, 0x19, 0x9a, 0x75, 0x92, 0xcc, 0xf9, 0xa7, + 0x24, 0x9d, 0x06, 0xd1, 0x33, 0xe9, 0xe0, 0x55, 0xb0, 0xe3, 0x3f, 0xeb, 0x08, 0x8d, 0x94, 0x9c, + 0x8a, 0xec, 0x86, 0x39, 0x86, 0x3f, 0x23, 0x54, 0x97, 0x88, 0xa0, 0xee, 0xea, 0xe9, 0x66, 0xff, + 0x20, 0xf0, 0x37, 0x52, 0x2f, 0x07, 0xe3, 0xea, 0x93, 0x36, 0xc4, 0xb8, 0x8b, 0xd6, 0x18, 0xcf, + 0x2d, 0xd9, 0x03, 0x53, 0x1b, 0x4c, 0xfe, 0xae, 0x86, 0xa3, 0x9f, 0x14, 0x08, 0xbe, 0x44, 0xdb, + 0x45, 0xed, 0xa2, 0xa2, 0x76, 0x96, 0x1c, 0x82, 0x76, 0x0f, 0xb4, 0x79, 0x3f, 0xb8, 0x06, 0x7a, + 0x03, 0x90, 0x6e, 0xc5, 0x8d, 0xc8, 0xe2, 0x77, 0x68, 0x6d, 0x2a, 0x62, 0x4b, 0xde, 0x82, 0x65, + 0xb7, 0xb2, 0xdc, 0x8e, 0xaf, 0xbf, 0x4b, 0x67, 0xe6, 0x14, 0xa8, 0xff, 0x45, 0x55, 0xe2, 0x48, + 0x33, 0x61, 0x2c, 0xe9, 0x2e, 0xfe, 0xe2, 0xd7, 0xa8, 0xa0, 0x21, 0x13, 0x86, 0x6e, 0x55, 0x5a, + 0x1f, 0x59, 0x7c, 0x82, 0xd6, 0xa1, 0xa0, 0x96, 0x9c, 0x82, 0x69, 0xab, 0x30, 0x0d, 0x02, 0xea, + 0x57, 0x69, 0x09, 0xfd, 0x4e, 0x98, 0xd1, 0x96, 0x9c, 0x35, 0x77, 0x32, 0x08, 0x86, 0x34, 0x2c, + 0x77, 0xe2, 0x29, 0x3e, 0x47, 0x2d, 0x6d, 0xd4, 0xcb, 0x3c, 0x62, 0x46, 0x93, 0xf7, 0xdd, 0x95, + 0xa6, 0x34, 0xf4, 0x60, 0x48, 0x43, 0xba, 0x01, 0x92, 0xa1, 0xd1, 0xf8, 0x0a, 0xed, 0x08, 0x6d, + 0x39, 0x93, 0x91, 0x4c, 0x45, 0xf6, 0x18, 0x2b, 0x43, 0x3e, 0x80, 0x69, 0xbf, 0x32, 0x8d, 0xc3, + 0x09, 0x67, 0xf2, 0xbe, 0xa4, 0x74, 0xbb, 0x90, 0x57, 0x31, 0xfe, 0x84, 0xda, 0x92, 0xb9, 0x8b, + 0x8b, 0x28, 0xcb, 0x55, 0xcc, 0x72, 0xd2, 0x07, 0x77, 0x71, 0x6e, 0xdf, 0x0a, 0xf7, 0x1e, 0xde, + 0x01, 0xa3, 0x9b, 0xb2, 0x0e, 0xf0, 0x19, 0xfa, 0x2f, 0x81, 0xd8, 0x92, 0x01, 0x9c, 0x68, 0xe7, + 0xd5, 0x73, 0x03, 0x26, 0x5a, 0x71, 0xfc, 0x0d, 0x21, 0x98, 0x8b, 0xc8, 0xea, 0xc4, 0x92, 0x2f, + 0xa0, 0x3e, 0x2a, 0xba, 0x03, 0xc6, 0x65, 0x92, 0xf2, 0x99, 0x11, 0x6e, 0x1e, 0xaa, 0x5c, 0xf0, + 0xb9, 0x6f, 0xa8, 0x98, 0xd9, 0x94, 0xb6, 0x80, 0x4e, 0x74, 0xe2, 0xef, 0xa7, 0x55, 0x66, 0x60, + 0x96, 0x7c, 0x85, 0x04, 0x87, 0x4b, 0x12, 0x0c, 0xad, 0x55, 0x5c, 0x30, 0x27, 0x94, 0xa4, 0x1b, + 0x85, 0x9b, 0x59, 0x7c, 0x85, 0x76, 0x7d, 0xe7, 0x46, 0x42, 0x9b, 0x34, 0x11, 0x26, 0xe5, 0xce, + 0x92, 0xdb, 0xc6, 0xf5, 0x42, 0x5b, 0x8f, 0x43, 0x5a, 0x42, 0xba, 0xe3, 0x17, 0xc6, 0xb5, 0x18, + 0x0f, 0x50, 0x1b, 0x12, 0x38, 0xf5, 0xa8, 0xac, 0xb3, 0xe4, 0xae, 0x71, 0x83, 0x60, 0x7e, 0x50, + 0x3f, 0x94, 0x75, 0x74, 0xd3, 0x07, 0x0f, 0x85, 0xe8, 0x78, 0x82, 0xda, 0xf7, 0xca, 0x89, 0xa9, + 0xe0, 0xb0, 0x1f, 0x3c, 0x42, 0xad, 0xd7, 0xae, 0x27, 0x2b, 0x50, 0xe5, 0x93, 0x7f, 0x4e, 0x48, + 0xd3, 0x49, 0x6b, 0xdf, 0xf5, 0xc7, 0xdf, 0x41, 0x26, 0xdc, 0xe3, 0x2c, 0x0e, 0xb8, 0x7a, 0xea, + 0xe5, 0x22, 0x63, 0x4e, 0xf9, 0xf9, 0x3c, 0x67, 0x59, 0x2a, 0x5d, 0x8f, 0x69, 0xd1, 0xab, 0x87, + 0xf6, 0xf2, 0x59, 0xeb, 0x78, 0x1d, 0xe6, 0x75, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x73, + 0xb3, 0xef, 0x2a, 0x05, 0x00, 0x00, +} diff --git a/api/models/vpp/vpp.proto b/api/models/vpp/vpp.proto new file mode 100644 index 0000000000..22674703fc --- /dev/null +++ b/api/models/vpp/vpp.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package vpp; + +option go_package = "github.com/ligato/vpp-agent/api/models/vpp;vpp"; + +import "models/vpp/acl/acl.proto"; +import "models/vpp/interfaces/interface.proto"; +import "models/vpp/interfaces/state.proto"; +import "models/vpp/ipsec/ipsec.proto"; +import "models/vpp/l2/bridge-domain.proto"; +import "models/vpp/l2/fib.proto"; +import "models/vpp/l2/xconnect.proto"; +import "models/vpp/l3/arp.proto"; +import "models/vpp/l3/l3.proto"; +import "models/vpp/l3/route.proto"; +import "models/vpp/nat/nat.proto"; +import "models/vpp/punt/punt.proto"; + +message ConfigData { + repeated interfaces.Interface interfaces = 10; + + repeated acl.ACL acls = 20; + + repeated l2.BridgeDomain bridge_domains = 30; + repeated l2.FIBEntry fibs = 31; + repeated l2.XConnectPair xconnect_pairs = 32; + + repeated l3.Route routes = 40; + repeated l3.ARPEntry arps = 41; + l3.ProxyARP proxy_arp = 42; + l3.IPScanNeighbor ipscan_neighbor = 43; + + nat.Nat44Global nat44_global = 50; + repeated nat.DNat44 dnat44s = 51; + + repeated ipsec.SecurityPolicyDatabase ipsec_spds = 60; + repeated ipsec.SecurityAssociation ipsec_sas = 61; + + repeated punt.IPRedirect punt_ipredirects = 70; + repeated punt.ToHost punt_tohosts = 71; +} + +message Notification { + interfaces.InterfaceNotification interface = 1; +} diff --git a/api/models/vpp/vpp_types.go b/api/models/vpp/vpp_types.go new file mode 100644 index 0000000000..87915a743b --- /dev/null +++ b/api/models/vpp/vpp_types.go @@ -0,0 +1,60 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vpp + +import ( + "github.com/ligato/vpp-agent/api/models/vpp/acl" + "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + "github.com/ligato/vpp-agent/api/models/vpp/l2" + "github.com/ligato/vpp-agent/api/models/vpp/l3" + "github.com/ligato/vpp-agent/api/models/vpp/nat" + "github.com/ligato/vpp-agent/api/models/vpp/punt" + "github.com/ligato/vpp-agent/api/models/vpp/stn" +) + +type ( + // ACL + ACL = vpp_acl.ACL + + // Interfaces + Interface = vpp_interfaces.Interface + + // L2 + BridgeDomain = vpp_l2.BridgeDomain + L2FIB = vpp_l2.FIBEntry + XConnect = vpp_l2.XConnectPair + + // L3 + Route = vpp_l3.Route + ARPEntry = vpp_l3.ARPEntry + IPScanNeigh = vpp_l3.IPScanNeighbor + ProxyARP = vpp_l3.ProxyARP + + // IPSec + IPSecSPD = vpp_ipsec.SecurityPolicyDatabase + IPSecSA = vpp_ipsec.SecurityAssociation + + // NAT + NAT44Global = vpp_nat.Nat44Global + DNAT44 = vpp_nat.DNat44 + + // STN + STNRule = vpp_stn.Rule + + // Punt + PuntIPRedirect = vpp_punt.IPRedirect + PuntToHost = vpp_punt.ToHost +) diff --git a/app/vpp_agent.go b/app/vpp_agent.go deleted file mode 100644 index 8d9bd16723..0000000000 --- a/app/vpp_agent.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package app - -import ( - "sync" - - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/db/keyval/bolt" - - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" - "github.com/ligato/cn-infra/datasync/msgsync" - "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/db/keyval/consul" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/db/keyval/redis" - "github.com/ligato/cn-infra/health/probe" - "github.com/ligato/cn-infra/health/statuscheck" - "github.com/ligato/cn-infra/logging/logmanager" - "github.com/ligato/cn-infra/messaging/kafka" - "github.com/ligato/vpp-agent/plugins/linux" - "github.com/ligato/vpp-agent/plugins/rest" - "github.com/ligato/vpp-agent/plugins/telemetry" - "github.com/ligato/vpp-agent/plugins/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/rpc" -) - -// VPPAgent defines plugins which will be loaded and their order. -// Note: the plugin itself is loaded after all its dependencies. It means that the VPP plugin is first in the list -// despite it needs to be loaded after the linux plugin. -type VPPAgent struct { - LogManager *logmanager.Plugin - - ETCDDataSync *kvdbsync.Plugin - ConsulDataSync *kvdbsync.Plugin - RedisDataSync *kvdbsync.Plugin - BoltDataSync *kvdbsync.Plugin - - VPP *vpp.Plugin - Linux *linux.Plugin - - GRPCService *rpc.Plugin - RESTAPI *rest.Plugin - Probe *probe.Plugin - Telemetry *telemetry.Plugin -} - -// New creates new VPPAgent instance. -func New() *VPPAgent { - etcdDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin)) - consulDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&consul.DefaultPlugin)) - redisDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&redis.DefaultPlugin)) - boltDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&bolt.DefaultPlugin)) - - watchers := datasync.KVProtoWatchers{ - local.Get(), - etcdDataSync, - consulDataSync, - } - writers := datasync.KVProtoWriters{ - etcdDataSync, - consulDataSync, - } - statuscheck.DefaultPlugin.Transport = writers - - ifStatePub := msgsync.NewPlugin( - msgsync.UseMessaging(&kafka.DefaultPlugin), - msgsync.UseConf(msgsync.Config{ - Topic: "if_state", - }), - ) - - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Publish = writers - deps.Watcher = watchers - deps.IfStatePub = ifStatePub - deps.DataSyncs = map[string]datasync.KeyProtoValWriter{ - "etcd": etcdDataSync, - "redis": redisDataSync, - } - })) - linuxPlugin := linux.NewPlugin(linux.UseDeps(func(deps *linux.Deps) { - deps.VPP = vppPlugin - deps.Watcher = watchers - })) - vppPlugin.Deps.Linux = linuxPlugin - - var watchEventsMutex sync.Mutex - vppPlugin.Deps.WatchEventsMutex = &watchEventsMutex - linuxPlugin.Deps.WatchEventsMutex = &watchEventsMutex - - restPlugin := rest.NewPlugin(rest.UseDeps(func(deps *rest.Deps) { - deps.VPP = vppPlugin - deps.Linux = linuxPlugin - })) - - grpcPlugin := rpc.NewPlugin( - rpc.UseDeps(func(deps *rpc.Deps) { - deps.Brokers = map[string]keyval.KvProtoPlugin{ - "bolt": boltDataSync.KvPlugin, - } - deps.VPP = vppPlugin - deps.Linux = linuxPlugin - })) - - return &VPPAgent{ - LogManager: &logmanager.DefaultPlugin, - ETCDDataSync: etcdDataSync, - ConsulDataSync: consulDataSync, - RedisDataSync: redisDataSync, - BoltDataSync: boltDataSync, - VPP: vppPlugin, - Linux: linuxPlugin, - GRPCService: grpcPlugin, - RESTAPI: restPlugin, - Probe: &probe.DefaultPlugin, - Telemetry: &telemetry.DefaultPlugin, - } -} - -// Init initializes main plugin. -func (VPPAgent) Init() error { - return nil -} - -// AfterInit executes resync. -func (VPPAgent) AfterInit() error { - // manually start resync after all plugins started - resync.DefaultPlugin.DoResync() - return nil -} - -// Close could close used resources. -func (VPPAgent) Close() error { - return nil -} - -// String returns name of the plugin. -func (VPPAgent) String() string { - return "VPPAgent" -} diff --git a/client/client_api.go b/client/client_api.go new file mode 100644 index 0000000000..30ad1a7b0f --- /dev/null +++ b/client/client_api.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + + "github.com/gogo/protobuf/proto" + api "github.com/ligato/vpp-agent/api/genericmanager" +) + +// ConfigClient defines the client-side interface for config. +type ConfigClient interface { + // KnownModels retrieves list of known modules. + KnownModels() ([]api.ModelInfo, error) + + // ChangeRequest returns transaction for changing config. + ChangeRequest() ChangeRequest + + // ResyncConfig overwrites existing config. + ResyncConfig(items ...proto.Message) error + + // GetConfig retrieves current config into dsts. + GetConfig(dsts ...interface{}) error +} + +// ChangeRequest is interface for config change request. +type ChangeRequest interface { + // Update appends updates for given items to the request. + Update(items ...proto.Message) ChangeRequest + + // Delete appends deletes for given items to the request. + Delete(items ...proto.Message) ChangeRequest + + // Send sends the request. + Send(ctx context.Context) error +} diff --git a/client/local_client.go b/client/local_client.go new file mode 100644 index 0000000000..b2b9f53c48 --- /dev/null +++ b/client/local_client.go @@ -0,0 +1,128 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/datasync/kvdbsync/local" + "github.com/ligato/cn-infra/datasync/syncbase" + "github.com/ligato/cn-infra/db/keyval" + + api "github.com/ligato/vpp-agent/api/genericmanager" + "github.com/ligato/vpp-agent/pkg/models" +) + +// LocalClient is global client for direct local access. +var LocalClient = NewClient(&txnFactory{local.DefaultRegistry}) + +type client struct { + txnFactory ProtoTxnFactory +} + +// NewClient returns new instance that uses given registry for data propagation. +func NewClient(factory ProtoTxnFactory) ConfigClient { + return &client{factory} +} + +func (c *client) KnownModels() ([]api.ModelInfo, error) { + var modules []api.ModelInfo + for _, info := range models.RegisteredModels() { + modules = append(modules, *info) + } + return modules, nil +} + +func (c *client) ResyncConfig(items ...proto.Message) error { + txn := c.txnFactory.NewTxn(true) + + for _, item := range items { + key, err := models.GetKey(item) + if err != nil { + return err + } + txn.Put(key, item) + } + + return txn.Commit() +} + +func (c *client) GetConfig(dsts ...interface{}) error { + + return nil +} + +func (c *client) ChangeRequest() ChangeRequest { + return &changeRequest{txn: c.txnFactory.NewTxn(false)} +} + +type changeRequest struct { + txn keyval.ProtoTxn + err error +} + +func (r *changeRequest) Update(items ...proto.Message) ChangeRequest { + if r.err != nil { + return r + } + for _, item := range items { + key, err := models.GetKey(item) + if err != nil { + r.err = err + return r + } + r.txn.Put(key, item) + } + return r +} + +func (r *changeRequest) Delete(items ...proto.Message) ChangeRequest { + if r.err != nil { + return r + } + for _, item := range items { + key, err := models.GetKey(item) + if err != nil { + r.err = err + return r + } + r.txn.Delete(key) + } + return r +} + +func (r *changeRequest) Send(ctx context.Context) error { + if r.err != nil { + return r.err + } + return r.txn.Commit() +} + +// ProtoTxnFactory defines interface for keyval transaction provider. +type ProtoTxnFactory interface { + NewTxn(resync bool) keyval.ProtoTxn +} + +type txnFactory struct { + registry *syncbase.Registry +} + +func (p *txnFactory) NewTxn(resync bool) keyval.ProtoTxn { + if resync { + return local.NewProtoTxn(p.registry.PropagateResync) + } + return local.NewProtoTxn(p.registry.PropagateChanges) +} diff --git a/client/remoteclient/grpc_client.go b/client/remoteclient/grpc_client.go new file mode 100644 index 0000000000..786e4c441e --- /dev/null +++ b/client/remoteclient/grpc_client.go @@ -0,0 +1,151 @@ +package remoteclient + +import ( + "context" + "fmt" + + "github.com/gogo/protobuf/proto" + + api "github.com/ligato/vpp-agent/api/genericmanager" + "github.com/ligato/vpp-agent/client" + "github.com/ligato/vpp-agent/pkg/models" + "github.com/ligato/vpp-agent/pkg/util" +) + +type grpcClient struct { + remote api.GenericManagerClient +} + +// NewClientGRPC returns new instance that uses given service client for requests. +func NewClientGRPC(client api.GenericManagerClient) client.ConfigClient { + return &grpcClient{client} +} + +func (c *grpcClient) KnownModels() ([]api.ModelInfo, error) { + ctx := context.Background() + + resp, err := c.remote.Capabilities(ctx, &api.CapabilitiesRequest{}) + if err != nil { + return nil, err + } + + var modules []api.ModelInfo + for _, info := range resp.KnownModels { + modules = append(modules, *info) + } + + return modules, nil +} + +func (c *grpcClient) ChangeRequest() client.ChangeRequest { + return &setConfigRequest{ + client: c.remote, + req: &api.SetConfigRequest{}, + } +} + +func (c *grpcClient) ResyncConfig(items ...proto.Message) error { + req := &api.SetConfigRequest{ + OverwriteAll: true, + } + + for _, protoModel := range items { + item, err := models.MarshalItem(protoModel) + if err != nil { + return err + } + req.Updates = append(req.Updates, &api.UpdateItem{ + Item: item, + }) + } + + _, err := c.remote.SetConfig(context.Background(), req) + return err +} + +func (c *grpcClient) GetConfig(dsts ...interface{}) error { + ctx := context.Background() + + resp, err := c.remote.GetConfig(ctx, &api.GetConfigRequest{}) + if err != nil { + return err + } + + fmt.Printf("GetConfig: %+v\n", resp) + + protos := map[string]proto.Message{} + for _, item := range resp.Items { + val, err := models.UnmarshalItem(item.Item) + if err != nil { + return err + } + var key string + if data := item.Item.GetData(); data != nil { + key, err = models.GetKey(val) + } else { + // protos[item.Item.Key] = val + key, err = models.ItemKey(item.Item) + } + if err != nil { + return err + } + protos[key] = val + } + + util.PlaceProtos(protos, dsts...) + + return nil +} + +type setConfigRequest struct { + client api.GenericManagerClient + req *api.SetConfigRequest + err error +} + +func (r *setConfigRequest) Update(items ...proto.Message) client.ChangeRequest { + if r.err != nil { + return r + } + for _, protoModel := range items { + item, err := models.MarshalItem(protoModel) + if err != nil { + r.err = err + return r + } + r.req.Updates = append(r.req.Updates, &api.UpdateItem{ + Item: item, + }) + } + return r +} + +func (r *setConfigRequest) Delete(items ...proto.Message) client.ChangeRequest { + if r.err != nil { + return r + } + for _, protoModel := range items { + item, err := models.MarshalItem(protoModel) + if err != nil { + if err != nil { + r.err = err + return r + } + } + r.req.Updates = append(r.req.Updates, &api.UpdateItem{ + /*Item: &api.Item{ + Key: item.Key, + },*/ + Item: item, + }) + } + return r +} + +func (r *setConfigRequest) Send(ctx context.Context) (err error) { + if r.err != nil { + return r.err + } + _, err = r.client.SetConfig(ctx, r.req) + return err +} diff --git a/client/txn.go b/client/txn.go new file mode 100644 index 0000000000..908cc060c6 --- /dev/null +++ b/client/txn.go @@ -0,0 +1,68 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/vpp-agent/pkg/models" +) + +// Txn is the +type Txn struct { + items map[string]proto.Message +} + +// NewTxn +func NewTxn(commitFunc func() error) *Txn { + return &Txn{ + items: make(map[string]proto.Message), + } +} + +// Update updates +func (t *Txn) Update(item proto.Message) { + t.items[models.Key(item)] = item +} + +func (t *Txn) Delete(item proto.Message) { + t.items[models.Key(item)] = nil +} + +func (t *Txn) Commit(ctx context.Context) { + +} + +// FindItem returns item with given ID from the request items. +// If the found is true the model with such ID is found +// and if the model is nil the item represents delete. +func (t *Txn) FindItem(id string) (model proto.Message, found bool) { + item, ok := t.items[id] + return item, ok +} + +// Items returns map of items defined for the request, +// where key represents model ID and nil value represents delete. +// NOTE: Do not alter the returned map directly. +func (t *Txn) ListItems() map[string]proto.Message { + return t.items +} + +// RemoveItem removes an item from the transaction. +// This will revert any Update or Delete done for the item. +func (t *Txn) RemoveItem(model proto.Message) { + delete(t.items, models.Key(model)) +} diff --git a/clientv1/README.md b/clientv1/README.md deleted file mode 100644 index 053a8005b7..0000000000 --- a/clientv1/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Client v1 - -Client v1 (i.e. the first version) defines an API that allows to manage -configuration of default plugins and the Linux plugin. -How the configuration is transported between APIs and the plugins -is fully abstracted from the user. - -The API calls can be split into two groups: - - **resync** applies a given (full) configuration. An existing - configuration, if present, is replaced. The name is an abbreviation - of *resynchronization*. It is used initially and after any system - event that may leave the configuration out-of-sync while the set - of outdated configuration options is impossible to determine locally - (e.g. temporarily lost connection to data store). - - **data change** allows to deliver incremental changes - of a configuration. - -There are two implementations: - - **local client** runs inside the same process as the agent - and delivers configuration through go channels directly - to the plugins. - - **remote client** stores the configuration using the given - `keyval.broker`. diff --git a/clientv1/doc.go b/clientv1/doc.go deleted file mode 100644 index 5282f57791..0000000000 --- a/clientv1/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package clientv1 provides clients for local and remote management of VPP -// and Linux configuration via VPP Agent plugins. -package clientv1 diff --git a/clientv1/linux/data_change_api.go b/clientv1/linux/data_change_api.go deleted file mode 100644 index 0afd876428..0000000000 --- a/clientv1/linux/data_change_api.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linuxclient - -import ( - vpp_clientv1 "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/linux/model/l3" - vpp_acl "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - vpp_bfd "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - vpp_intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - vpp_l2 "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - vpp_l3 "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - vpp_l4 "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - vpp_stn "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// DataChangeDSL defines the Domain Specific Language (DSL) for data change -// of both Linux and VPP configuration. -// Use this interface to make your implementation independent of the local -// and any remote client. -// Every DSL statement (apart from Send) returns the receiver (possibly wrapped -// to change the scope of DSL), allowing the calls to be chained together -// conveniently in a single statement. -type DataChangeDSL interface { - // Put initiates a chained sequence of data change DSL statements, declaring - // new configurable objects or changing existing ones, e.g.: - // Put().LinuxInterface(&veth).VppInterface(&afpacket).BD(&BD) ... Send() - // The set of available objects to be created or changed is defined by PutDSL. - Put() PutDSL - - // Delete initiates a chained sequence of data change DSL statements, - // removing existing configurable objects (by name), e.g: - // Delete().LinuxInterface(vethName).VppInterface(afpacketName).BD(BDName) ... Send() - // The set of available objects to be removed is defined by DeleteDSL. - Delete() DeleteDSL - - // Send propagates requested changes to the plugins. - Send() vpp_clientv1.Reply -} - -// PutDSL is a subset of data change DSL statements, used to declare new -// Linux or VPP configuration or change existing one. -type PutDSL interface { - // LinuxInterface adds a request to create or update Linux network interface. - LinuxInterface(val *interfaces.LinuxInterfaces_Interface) PutDSL - // LinuxArpEntry adds a request to crete or update Linux ARP entry - LinuxArpEntry(val *l3.LinuxStaticArpEntries_ArpEntry) PutDSL - // LinuxRoute adds a request to crete or update Linux route - LinuxRoute(val *l3.LinuxStaticRoutes_Route) PutDSL - - // VppInterface adds a request to create or update VPP network interface. - VppInterface(val *vpp_intf.Interfaces_Interface) PutDSL - // VppIPSecSPD adds a request to create or update VPP security policy database - VppIPSecSPD(val *ipsec.SecurityPolicyDatabases_SPD) PutDSL - // VppIPSecSA adds a request to create or update VPP security association - VppIPSecSA(val *ipsec.SecurityAssociations_SA) PutDSL - // VppIPSecTunnel adds a request to create or update VPP IPSec tunnel interface - VppIPSecTunnel(val *ipsec.TunnelInterfaces_Tunnel) PutDSL - // BfdSession adds a request to create or update VPP bidirectional - // forwarding detection session. - BfdSession(val *vpp_bfd.SingleHopBFD_Session) PutDSL - // BfdAuthKeys adds a request to create or update VPP bidirectional - // forwarding detection key. - BfdAuthKeys(val *vpp_bfd.SingleHopBFD_Key) PutDSL - // BfdEchoFunction adds a request to create or update VPP bidirectional - // forwarding detection echo function. - BfdEchoFunction(val *vpp_bfd.SingleHopBFD_EchoFunction) PutDSL - // BD adds a request to create or update VPP Bridge Domain. - BD(val *vpp_l2.BridgeDomains_BridgeDomain) PutDSL - // BDFIB adds a request to create or update VPP L2 Forwarding Information Base. - BDFIB(fib *vpp_l2.FibTable_FibEntry) PutDSL - // XConnect adds a request to create or update VPP Cross Connect. - XConnect(val *vpp_l2.XConnectPairs_XConnectPair) PutDSL - // StaticRoute adds a request to create or update VPP L3 Static Route. - StaticRoute(val *vpp_l3.StaticRoutes_Route) PutDSL - // ACL adds a request to create or update VPP Access Control List. - ACL(acl *vpp_acl.AccessLists_Acl) PutDSL - // Arp adds a request to create or update VPP L3 ARP. - Arp(arp *vpp_l3.ArpTable_ArpEntry) PutDSL - // ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces - ProxyArpInterfaces(pArpIfs *vpp_l3.ProxyArpInterfaces_InterfaceList) PutDSL - // ProxyArpRanges adds a request to create or update VPP L3 proxy ARP ranges - ProxyArpRanges(pArpRng *vpp_l3.ProxyArpRanges_RangeList) PutDSL - // PuntSocketRegister adds request to register a new punt to host entry - PuntSocketRegister(puntCfg *punt.Punt) PutDSL - // L4Features adds a request to enable or disable L4 features - L4Features(val *vpp_l4.L4Features) PutDSL - // AppNamespace adds a request to create or update VPP Application namespace - AppNamespace(appNs *vpp_l4.AppNamespaces_AppNamespace) PutDSL - // StnRule adds a request to create or update VPP Stn rule. - StnRule(stn *vpp_stn.STN_Rule) PutDSL - // NAT44Global adds a request to set global configuration for NAT44 - NAT44Global(nat *nat.Nat44Global) PutDSL - // NAT44DNat adds a request to create a new DNAT configuration - NAT44DNat(dnat *nat.Nat44DNat_DNatConfig) PutDSL - - // Delete changes the DSL mode to allow removing an existing configuration. - // See documentation for DataChangeDSL.Delete(). - Delete() DeleteDSL - - // Send propagates requested changes to the plugins. - Send() vpp_clientv1.Reply -} - -// DeleteDSL is a subset of data change DSL statements, used to remove -// existing Linux or VPP configuration. -type DeleteDSL interface { - // LinuxInterface adds a request to delete an existing Linux network - // interface. - LinuxInterface(ifaceName string) DeleteDSL - // LinuxArpEntry adds a request to crete or update Linux ARP entry - LinuxArpEntry(entryName string) DeleteDSL - // LinuxRoute adds a request to crete or update Linux route - LinuxRoute(routeName string) DeleteDSL - - // VppInterface adds a request to delete an existing VPP network interface. - VppInterface(ifaceName string) DeleteDSL - // VppIPSecSPD adds a request to create or update VPP security policy database - VppIPSecSPD(spdName string) DeleteDSL - // VppIPSecSA adds a request to create or update VPP security association - VppIPSecSA(saName string) DeleteDSL - // VppIPSecTunnel adds a request to create or update VPP IPSec tunnel interface - VppIPSecTunnel(tunName string) DeleteDSL - // BfdSession adds a request to delete an existing VPP bidirectional - // forwarding detection session. - BfdSession(bfdSessionIfaceName string) DeleteDSL - // BfdAuthKeys adds a request to delete an existing VPP bidirectional - // forwarding detection key. - BfdAuthKeys(bfdKey string) DeleteDSL - // BfdEchoFunction adds a request to delete an existing VPP bidirectional - // forwarding detection echo function. - BfdEchoFunction(bfdEchoName string) DeleteDSL - // BD adds a request to delete an existing VPP Bridge Domain. - BD(bdName string) DeleteDSL - // FIB adds a request to delete an existing VPP L2 Forwarding Information - // Base. - BDFIB(bdName string, mac string) DeleteDSL - // XConnect adds a request to delete an existing VPP Cross Connect. - XConnect(rxIfaceName string) DeleteDSL - // StaticRoute adds a request to delete an existing VPP L3 Static Route. - StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) DeleteDSL - // ACL adds a request to delete an existing VPP Access Control List. - ACL(aclName string) DeleteDSL - // L4Features adds a request to enable or disable L4 features - L4Features() DeleteDSL - // AppNamespace adds a request to delete VPP Application namespace - // Note: current version does not support application namespace deletion - AppNamespace(id string) DeleteDSL - // Arp adds a request to delete an existing VPP L3 ARP. - Arp(ifaceName string, ipAddr string) DeleteDSL - // ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces - ProxyArpInterfaces(label string) DeleteDSL - // ProxyArpRanges adds a request to delete an existing VPP L3 proxy ARP ranges - ProxyArpRanges(label string) DeleteDSL - // StnRule adds a request to delete an existing VPP Stn rule. - StnRule(ruleName string) DeleteDSL - // NAT44Global adds a request to remove global configuration for NAT44 - NAT44Global() DeleteDSL - // NAT44DNat adds a request to delete a new DNAT configuration - NAT44DNat(label string) DeleteDSL - // PuntSocketDeregister adds request to de-register an existing punt to host entry - PuntSocketDeregister(puntName string) DeleteDSL - - // Put changes the DSL mode to allow configuration editing. - // See documentation for DataChangeDSL.Put(). - Put() PutDSL - - // Send propagates requested changes to the plugins. - Send() vpp_clientv1.Reply -} diff --git a/clientv1/linux/data_resync_api.go b/clientv1/linux/data_resync_api.go deleted file mode 100644 index 200b6d65b6..0000000000 --- a/clientv1/linux/data_resync_api.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linuxclient - -import ( - "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - - vpp_clientv1 "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/linux/model/l3" - vpp_acl "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - vpp_bfd "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - vpp_intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - vpp_l2 "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - vpp_l3 "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - vpp_l4 "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - vpp_stn "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// DataResyncDSL defines the Domain Specific Language (DSL) for data RESYNC -// of both Linux and VPP configuration. -// Use this interface to make your implementation independent of the local -// and any remote client. -// Each method (apart from Send) returns the receiver, allowing the calls -// to be chained together conveniently in a single statement. -type DataResyncDSL interface { - // LinuxInterface adds Linux interface to the RESYNC request. - LinuxInterface(intf *interfaces.LinuxInterfaces_Interface) DataResyncDSL - // LinuxInterface adds Linux ARP entry to the RESYNC request. - LinuxArpEntry(intf *l3.LinuxStaticArpEntries_ArpEntry) DataResyncDSL - // LinuxInterface adds Linux route to the RESYNC request. - LinuxRoute(intf *l3.LinuxStaticRoutes_Route) DataResyncDSL - - // VppInterface adds VPP interface to the RESYNC request. - VppInterface(intf *vpp_intf.Interfaces_Interface) DataResyncDSL - // VppIPSecSPD adds VPP security policy database to the RESYNC request. - VppIPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) DataResyncDSL - // VppIPSecSA adds VPP security association to the RESYNC request. - VppIPSecSA(sa *ipsec.SecurityAssociations_SA) DataResyncDSL - // VppIPSecTunnel adds VPP IPSec tunnel to the RESYNC request. - VppIPSecTunnel(tunnel *ipsec.TunnelInterfaces_Tunnel) DataResyncDSL - // BfdSession adds VPP bidirectional forwarding detection session - // to the RESYNC request. - BfdSession(val *vpp_bfd.SingleHopBFD_Session) DataResyncDSL - // BfdAuthKeys adds VPP bidirectional forwarding detection key to the RESYNC - // request. - BfdAuthKeys(val *vpp_bfd.SingleHopBFD_Key) DataResyncDSL - // BfdEchoFunction adds VPP bidirectional forwarding detection echo function - // to the RESYNC request. - BfdEchoFunction(val *vpp_bfd.SingleHopBFD_EchoFunction) DataResyncDSL - // BD adds VPP Bridge Domain to the RESYNC request. - BD(bd *vpp_l2.BridgeDomains_BridgeDomain) DataResyncDSL - // BDFIB adds VPP L2 FIB to the RESYNC request. - BDFIB(fib *vpp_l2.FibTable_FibEntry) DataResyncDSL - // XConnect adds VPP Cross Connect to the RESYNC request. - XConnect(xcon *vpp_l2.XConnectPairs_XConnectPair) DataResyncDSL - // StaticRoute adds VPP L3 Static Route to the RESYNC request. - StaticRoute(staticRoute *vpp_l3.StaticRoutes_Route) DataResyncDSL - // ACL adds VPP Access Control List to the RESYNC request. - ACL(acl *vpp_acl.AccessLists_Acl) DataResyncDSL - // Arp adds VPP L3 ARP to the RESYNC request. - Arp(arp *vpp_l3.ArpTable_ArpEntry) DataResyncDSL - // ProxyArpInterfaces adds L3 proxy ARP interfaces to the RESYNC request. - ProxyArpInterfaces(pArpIfs *vpp_l3.ProxyArpInterfaces_InterfaceList) DataResyncDSL - // ProxyArpRanges adds L3 proxy ARP ranges to the RESYNC request. - ProxyArpRanges(pArpRng *vpp_l3.ProxyArpRanges_RangeList) DataResyncDSL - // L4Features adds L4 features to the RESYNC request - L4Features(val *vpp_l4.L4Features) DataResyncDSL - // AppNamespace adds VPP Application namespaces to the RESYNC request - AppNamespace(appNs *vpp_l4.AppNamespaces_AppNamespace) DataResyncDSL - // StnRule adds Stn rule to the RESYNC request. - StnRule(stn *vpp_stn.STN_Rule) DataResyncDSL - // NAT44Global adds a request to RESYNC global configuration for NAT44 - NAT44Global(nat *nat.Nat44Global) DataResyncDSL - // NAT44DNat adds a request to RESYNC a new DNAT configuration - NAT44DNat(dnat *nat.Nat44DNat_DNatConfig) DataResyncDSL - // PuntSocketRegister adds request to RESYNC a new punt to host entry - PuntSocketRegister(puntCfg *punt.Punt) DataResyncDSL - - // Send propagates the RESYNC request to the plugins. - Send() vpp_clientv1.Reply -} diff --git a/clientv1/linux/dbadapter/data_change_db.go b/clientv1/linux/dbadapter/data_change_db.go deleted file mode 100644 index 6eaa64388d..0000000000 --- a/clientv1/linux/dbadapter/data_change_db.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbadapter - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/linux" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - - "github.com/ligato/vpp-agent/clientv1/vpp/dbadapter" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" - - "github.com/ligato/vpp-agent/clientv1/vpp" - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" -) - -// NewDataChangeDSL returns a new instance of DataChangeDSL which implements -// the data change DSL for both Linux and VPP config (inherits dbadapter -// from vppplugin). -// Transaction is used to propagate changes to plugins. -func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL { - vppDbAdapter := dbadapter.NewDataChangeDSL(txn) - return &DataChangeDSL{txn: txn, vppDataChange: vppDbAdapter} -} - -// DataChangeDSL is an implementation of Domain Specific Language (DSL) -// for changes of both Linux and VPP configuration. -type DataChangeDSL struct { - txn keyval.ProtoTxn - vppDataChange vppclient.DataChangeDSL -} - -// PutDSL implements put operations of data change DSL. -type PutDSL struct { - parent *DataChangeDSL - vppPut vppclient.PutDSL -} - -// DeleteDSL implements delete operations of data change DSL. -type DeleteDSL struct { - parent *DataChangeDSL - vppDelete vppclient.DeleteDSL -} - -// Put initiates a chained sequence of data change DSL statements and declares -// new configurable objects or changes existing ones. -func (dsl *DataChangeDSL) Put() linuxclient.PutDSL { - return &PutDSL{dsl, dsl.vppDataChange.Put()} -} - -// Delete initiates a chained sequence of data change DSL statements -// removing existing configurable objects. -func (dsl *DataChangeDSL) Delete() linuxclient.DeleteDSL { - return &DeleteDSL{dsl, dsl.vppDataChange.Delete()} -} - -// Send propagates requested changes to the plugins. -func (dsl *DataChangeDSL) Send() vppclient.Reply { - return dsl.vppDataChange.Send() -} - -// LinuxInterface adds a request to create or update Linux network interface. -func (dsl *PutDSL) LinuxInterface(val *linuxIf.LinuxInterfaces_Interface) linuxclient.PutDSL { - dsl.parent.txn.Put(linuxIf.InterfaceKey(val.Name), val) - return dsl -} - -// LinuxArpEntry adds a request to create or update Linux ARP entry. -func (dsl *PutDSL) LinuxArpEntry(val *linuxL3.LinuxStaticArpEntries_ArpEntry) linuxclient.PutDSL { - dsl.parent.txn.Put(linuxL3.StaticArpKey(val.Name), val) - return dsl -} - -// LinuxRoute adds a request to create or update Linux route. -func (dsl *PutDSL) LinuxRoute(val *linuxL3.LinuxStaticRoutes_Route) linuxclient.PutDSL { - dsl.parent.txn.Put(linuxL3.StaticRouteKey(val.Name), val) - return dsl -} - -// VppInterface adds a request to create or update VPP network interface. -func (dsl *PutDSL) VppInterface(val *interfaces.Interfaces_Interface) linuxclient.PutDSL { - dsl.vppPut.Interface(val) - return dsl -} - -// VppIPSecSPD adds a request to create or update VPP IPSec security policy database. -func (dsl *PutDSL) VppIPSecSPD(val *ipsec.SecurityPolicyDatabases_SPD) linuxclient.PutDSL { - dsl.vppPut.IPSecSPD(val) - return dsl -} - -// VppIPSecSA adds a request to create or update VPP IPSec security associations. -func (dsl *PutDSL) VppIPSecSA(val *ipsec.SecurityAssociations_SA) linuxclient.PutDSL { - dsl.vppPut.IPSecSA(val) - return dsl -} - -// VppIPSecTunnel adds a request to create or update VPP IPSec tunnel. -func (dsl *PutDSL) VppIPSecTunnel(val *ipsec.TunnelInterfaces_Tunnel) linuxclient.PutDSL { - dsl.vppPut.IPSecTunnel(val) - return dsl -} - -// BfdSession adds a request to create or update VPP bidirectional forwarding -// detection session. -func (dsl *PutDSL) BfdSession(val *bfd.SingleHopBFD_Session) linuxclient.PutDSL { - dsl.vppPut.BfdSession(val) - return dsl -} - -// BfdAuthKeys adds a request to create or update VPP bidirectional forwarding -// detection key. -func (dsl *PutDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) linuxclient.PutDSL { - dsl.vppPut.BfdAuthKeys(val) - return dsl -} - -// BfdEchoFunction adds a request to create or update VPP bidirectional forwarding -// detection echo function. -func (dsl *PutDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) linuxclient.PutDSL { - dsl.vppPut.BfdEchoFunction(val) - return dsl -} - -// BD adds a request to create or update VPP Bridge Domain. -func (dsl *PutDSL) BD(val *l2.BridgeDomains_BridgeDomain) linuxclient.PutDSL { - dsl.vppPut.BD(val) - return dsl -} - -// BDFIB adds a request to create or update VPP L2 Forwarding Information Base. -func (dsl *PutDSL) BDFIB(fib *l2.FibTable_FibEntry) linuxclient.PutDSL { - dsl.vppPut.BDFIB(fib) - return dsl -} - -// XConnect adds a request to create or update VPP Cross Connect. -func (dsl *PutDSL) XConnect(val *l2.XConnectPairs_XConnectPair) linuxclient.PutDSL { - dsl.vppPut.XConnect(val) - return dsl -} - -// StaticRoute adds a request to create or update VPP L3 Static Route. -func (dsl *PutDSL) StaticRoute(val *l3.StaticRoutes_Route) linuxclient.PutDSL { - dsl.vppPut.StaticRoute(val) - return dsl -} - -// ACL adds a request to create or update VPP Access Control List. -func (dsl *PutDSL) ACL(acl *acl.AccessLists_Acl) linuxclient.PutDSL { - dsl.vppPut.ACL(acl) - return dsl -} - -// Arp adds a request to create or update VPP L3 ARP. -func (dsl *PutDSL) Arp(arp *l3.ArpTable_ArpEntry) linuxclient.PutDSL { - dsl.vppPut.Arp(arp) - return dsl -} - -// ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces. -func (dsl *PutDSL) ProxyArpInterfaces(arp *l3.ProxyArpInterfaces_InterfaceList) linuxclient.PutDSL { - dsl.vppPut.ProxyArpInterfaces(arp) - return dsl -} - -// ProxyArpRanges adds a request to create or update VPP L3 proxy ARP ranges -func (dsl *PutDSL) ProxyArpRanges(arp *l3.ProxyArpRanges_RangeList) linuxclient.PutDSL { - dsl.vppPut.ProxyArpRanges(arp) - return dsl -} - -// PuntSocketRegister adds a request to create or update VPP punt unix domain socket registration -func (dsl *PutDSL) PuntSocketRegister(puntCfg *punt.Punt) linuxclient.PutDSL { - dsl.vppPut.PuntSocketRegister(puntCfg) - return dsl -} - -// L4Features adds a request to enable or disable L4 features -func (dsl *PutDSL) L4Features(val *l4.L4Features) linuxclient.PutDSL { - dsl.vppPut.L4Features(val) - return dsl -} - -// AppNamespace adds a request to create or update VPP Application namespace -func (dsl *PutDSL) AppNamespace(appNs *l4.AppNamespaces_AppNamespace) linuxclient.PutDSL { - dsl.vppPut.AppNamespace(appNs) - return dsl -} - -// StnRule adds a request to create or update VPP Stn rule. -func (dsl *PutDSL) StnRule(stn *stn.STN_Rule) linuxclient.PutDSL { - dsl.vppPut.StnRule(stn) - return dsl -} - -// NAT44Global adds a request to set global configuration for NAT44 -func (dsl *PutDSL) NAT44Global(nat44 *nat.Nat44Global) linuxclient.PutDSL { - dsl.vppPut.NAT44Global(nat44) - return dsl -} - -// NAT44DNat adds a request to create a new DNAT configuration -func (dsl *PutDSL) NAT44DNat(nat44 *nat.Nat44DNat_DNatConfig) linuxclient.PutDSL { - dsl.vppPut.NAT44DNat(nat44) - return dsl -} - -// Delete changes the DSL mode to allow removal of an existing configuration. -func (dsl *PutDSL) Delete() linuxclient.DeleteDSL { - return &DeleteDSL{dsl.parent, dsl.vppPut.Delete()} -} - -// Send propagates requested changes to the plugins. -func (dsl *PutDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} - -// LinuxInterface adds a request to delete an existing Linux network -// interface. -func (dsl *DeleteDSL) LinuxInterface(interfaceName string) linuxclient.DeleteDSL { - dsl.parent.txn.Delete(linuxIf.InterfaceKey(interfaceName)) - return dsl -} - -// LinuxArpEntry adds a request to delete Linux ARP entry. -func (dsl *DeleteDSL) LinuxArpEntry(entryName string) linuxclient.DeleteDSL { - dsl.parent.txn.Delete(linuxL3.StaticArpKey(entryName)) - return dsl -} - -// LinuxRoute adds a request to delete Linux route. -func (dsl *DeleteDSL) LinuxRoute(routeName string) linuxclient.DeleteDSL { - dsl.parent.txn.Delete(linuxL3.StaticRouteKey(routeName)) - return dsl -} - -// VppInterface adds a request to delete an existing VPP network interface. -func (dsl *DeleteDSL) VppInterface(ifaceName string) linuxclient.DeleteDSL { - dsl.vppDelete.Interface(ifaceName) - return dsl -} - -// VppIPSecSPD adds a request to delete an existing VPP IPSec security policy database. -func (dsl *DeleteDSL) VppIPSecSPD(spdName string) linuxclient.DeleteDSL { - dsl.vppDelete.IPSecSPD(spdName) - return dsl -} - -// VppIPSecSA adds a request to delete an existing VPP IPSec security associations. -func (dsl *DeleteDSL) VppIPSecSA(saName string) linuxclient.DeleteDSL { - dsl.vppDelete.IPSecSA(saName) - return dsl -} - -// VppIPSecTunnel adds a request to delete an existing VPP IPSec tunnel. -func (dsl *DeleteDSL) VppIPSecTunnel(tunName string) linuxclient.DeleteDSL { - dsl.vppDelete.IPSecTunnel(tunName) - return dsl -} - -// BfdSession adds a request to delete an existing VPP bidirectional forwarding -// detection session. -func (dsl *DeleteDSL) BfdSession(bfdSessionIfaceName string) linuxclient.DeleteDSL { - dsl.vppDelete.BfdSession(bfdSessionIfaceName) - return dsl -} - -// BfdAuthKeys adds a request to delete an existing VPP bidirectional forwarding -// detection key. -func (dsl *DeleteDSL) BfdAuthKeys(bfdKey string) linuxclient.DeleteDSL { - dsl.vppDelete.BfdAuthKeys(bfdKey) - return dsl -} - -// BfdEchoFunction adds a request to delete an existing VPP bidirectional -// forwarding detection echo function. -func (dsl *DeleteDSL) BfdEchoFunction(bfdEchoName string) linuxclient.DeleteDSL { - dsl.vppDelete.BfdEchoFunction(bfdEchoName) - return dsl -} - -// BD adds a request to delete an existing VPP Bridge Domain. -func (dsl *DeleteDSL) BD(bdName string) linuxclient.DeleteDSL { - dsl.vppDelete.BD(bdName) - return dsl -} - -// BDFIB adds a request to delete an existing VPP L2 Forwarding Information Base. -func (dsl *DeleteDSL) BDFIB(bdName string, mac string) linuxclient.DeleteDSL { - dsl.vppDelete.BDFIB(bdName, mac) - return dsl -} - -// XConnect adds a request to delete an existing VPP Cross Connect. -func (dsl *DeleteDSL) XConnect(rxIfaceName string) linuxclient.DeleteDSL { - dsl.vppDelete.XConnect(rxIfaceName) - return dsl -} - -// StaticRoute adds a request to delete an existing VPP L3 Static Route. -func (dsl *DeleteDSL) StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) linuxclient.DeleteDSL { - dsl.vppDelete.StaticRoute(vrf, dstAddr, nextHopAddr) - return dsl -} - -// ACL adds a request to delete an existing VPP Access Control List. -func (dsl *DeleteDSL) ACL(aclName string) linuxclient.DeleteDSL { - dsl.vppDelete.ACL(aclName) - return dsl -} - -// L4Features adds a request to enable or disable L4 features -func (dsl *DeleteDSL) L4Features() linuxclient.DeleteDSL { - dsl.vppDelete.L4Features() - return dsl -} - -// PuntSocketDeregister adds request to de-register an existing punt to host entry -func (dsl *DeleteDSL) PuntSocketDeregister(puntName string) linuxclient.DeleteDSL { - dsl.vppDelete.PuntSocketDeregister(puntName) - return dsl -} - -// AppNamespace adds a request to delete VPP Application namespace -// Note: current version does not support application namespace deletion -func (dsl *DeleteDSL) AppNamespace(id string) linuxclient.DeleteDSL { - dsl.vppDelete.AppNamespace(id) - return dsl -} - -// Arp adds a request to delete an existing VPP L3 ARP. -func (dsl *DeleteDSL) Arp(ifaceName string, ipAddr string) linuxclient.DeleteDSL { - dsl.vppDelete.Arp(ifaceName, ipAddr) - return dsl -} - -// ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces -func (dsl *DeleteDSL) ProxyArpInterfaces(label string) linuxclient.DeleteDSL { - dsl.vppDelete.ProxyArpInterfaces(label) - return dsl -} - -// ProxyArpRanges adds a request to delete an existing VPP L3 proxy ARP ranges -func (dsl *DeleteDSL) ProxyArpRanges(label string) linuxclient.DeleteDSL { - dsl.vppDelete.ProxyArpRanges(label) - return dsl -} - -// StnRule adds a request to delete an existing VPP Stn rule. -func (dsl *DeleteDSL) StnRule(ruleName string) linuxclient.DeleteDSL { - dsl.vppDelete.StnRule(ruleName) - return dsl -} - -// NAT44Global adds a request to remove global configuration for NAT44 -func (dsl *DeleteDSL) NAT44Global() linuxclient.DeleteDSL { - dsl.vppDelete.NAT44Global() - return dsl -} - -// NAT44DNat adds a request to delete a new DNAT configuration -func (dsl *DeleteDSL) NAT44DNat(label string) linuxclient.DeleteDSL { - dsl.vppDelete.NAT44DNat(label) - return dsl -} - -// Put changes the DSL mode to allow configuration editing. -func (dsl *DeleteDSL) Put() linuxclient.PutDSL { - return &PutDSL{dsl.parent, dsl.vppDelete.Put()} -} - -// Send propagates requested changes to the plugins. -func (dsl *DeleteDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} diff --git a/clientv1/linux/dbadapter/data_resync_db.go b/clientv1/linux/dbadapter/data_resync_db.go deleted file mode 100644 index 3957925f23..0000000000 --- a/clientv1/linux/dbadapter/data_resync_db.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbadapter - -import ( - "github.com/ligato/vpp-agent/clientv1/linux" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" - - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/vpp/dbadapter" - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" -) - -// NewDataResyncDSL returns a new instance of DataResyncDSL which implements -// the data RESYNC DSL for both Linux and VPP config (inherits dbadapter -// from vppplugin). -// Transaction is used to propagate changes to plugins. -// Function is used to list keys with already existing configuration. -func NewDataResyncDSL(txn keyval.ProtoTxn, listKeys func(prefix string) (keyval.ProtoKeyIterator, error)) *DataResyncDSL { - vppDataResync := dbadapter.NewDataResyncDSL(txn, listKeys) - return &DataResyncDSL{txn, []string{}, listKeys, vppDataResync} -} - -// DataResyncDSL is an implementation of Domain Specific Language (DSL) for data -// RESYNC of both Linux and VPP configuration. -type DataResyncDSL struct { - txn keyval.ProtoTxn - txnKeys []string - listKeys func(prefix string) (keyval.ProtoKeyIterator, error) - - vppDataResync vppclient.DataResyncDSL -} - -// LinuxInterface adds Linux interface to the RESYNC request. -func (dsl *DataResyncDSL) LinuxInterface(val *linuxIf.LinuxInterfaces_Interface) linuxclient.DataResyncDSL { - key := linuxIf.InterfaceKey(val.Name) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// LinuxArpEntry adds Linux ARP entry to the RESYNC request. -func (dsl *DataResyncDSL) LinuxArpEntry(val *linuxL3.LinuxStaticArpEntries_ArpEntry) linuxclient.DataResyncDSL { - key := linuxL3.StaticArpKey(val.Name) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// LinuxRoute adds Linux route to the RESYNC request. -func (dsl *DataResyncDSL) LinuxRoute(val *linuxL3.LinuxStaticRoutes_Route) linuxclient.DataResyncDSL { - key := linuxL3.StaticRouteKey(val.Name) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// VppInterface adds VPP interface to the RESYNC request. -func (dsl *DataResyncDSL) VppInterface(intf *interfaces.Interfaces_Interface) linuxclient.DataResyncDSL { - dsl.vppDataResync.Interface(intf) - return dsl -} - -// VppIPSecSPD adds VPP security policy database to the RESYNC request. -func (dsl *DataResyncDSL) VppIPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) linuxclient.DataResyncDSL { - dsl.vppDataResync.IPSecSPD(spd) - return dsl -} - -// VppIPSecSA adds VPP security association to the RESYNC request. -func (dsl *DataResyncDSL) VppIPSecSA(sa *ipsec.SecurityAssociations_SA) linuxclient.DataResyncDSL { - dsl.vppDataResync.IPSecSA(sa) - return dsl -} - -// VppIPSecTunnel adds VPP IPSec tunnel to the RESYNC request. -func (dsl *DataResyncDSL) VppIPSecTunnel(tunnel *ipsec.TunnelInterfaces_Tunnel) linuxclient.DataResyncDSL { - dsl.vppDataResync.IPSecTunnel(tunnel) - return dsl -} - -// BfdSession adds VPP bidirectional forwarding detection session -// to the RESYNC request. -func (dsl *DataResyncDSL) BfdSession(val *bfd.SingleHopBFD_Session) linuxclient.DataResyncDSL { - dsl.vppDataResync.BfdSession(val) - return dsl -} - -// BfdAuthKeys adds VPP bidirectional forwarding detection key to the RESYNC -// request. -func (dsl *DataResyncDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) linuxclient.DataResyncDSL { - dsl.vppDataResync.BfdAuthKeys(val) - return dsl -} - -// BfdEchoFunction adds VPP bidirectional forwarding detection echo function -// to the RESYNC request. -func (dsl *DataResyncDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) linuxclient.DataResyncDSL { - dsl.vppDataResync.BfdEchoFunction(val) - return dsl -} - -// BD adds VPP Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) BD(bd *l2.BridgeDomains_BridgeDomain) linuxclient.DataResyncDSL { - dsl.vppDataResync.BD(bd) - return dsl -} - -// BDFIB adds VPP L2 FIB to the RESYNC request. -func (dsl *DataResyncDSL) BDFIB(fib *l2.FibTable_FibEntry) linuxclient.DataResyncDSL { - dsl.vppDataResync.BDFIB(fib) - return dsl -} - -// XConnect adds VPP Cross Connect to the RESYNC request. -func (dsl *DataResyncDSL) XConnect(xcon *l2.XConnectPairs_XConnectPair) linuxclient.DataResyncDSL { - dsl.vppDataResync.XConnect(xcon) - return dsl -} - -// StaticRoute adds VPP L3 Static Route to the RESYNC request. -func (dsl *DataResyncDSL) StaticRoute(staticRoute *l3.StaticRoutes_Route) linuxclient.DataResyncDSL { - dsl.vppDataResync.StaticRoute(staticRoute) - return dsl -} - -// ACL adds VPP Access Control List to the RESYNC request. -func (dsl *DataResyncDSL) ACL(acl *acl.AccessLists_Acl) linuxclient.DataResyncDSL { - dsl.vppDataResync.ACL(acl) - return dsl -} - -// Arp adds VPP L3 ARP to the RESYNC request. -func (dsl *DataResyncDSL) Arp(arp *l3.ArpTable_ArpEntry) linuxclient.DataResyncDSL { - dsl.vppDataResync.Arp(arp) - return dsl -} - -// ProxyArpInterfaces adds L3 proxy ARP interfaces to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpInterfaces(val *l3.ProxyArpInterfaces_InterfaceList) linuxclient.DataResyncDSL { - dsl.vppDataResync.ProxyArpInterfaces(val) - return dsl -} - -// ProxyArpRanges adds L3 proxy ARP ranges to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpRanges(val *l3.ProxyArpRanges_RangeList) linuxclient.DataResyncDSL { - dsl.vppDataResync.ProxyArpRanges(val) - return dsl -} - -// L4Features adds L4 features to the RESYNC request -func (dsl *DataResyncDSL) L4Features(val *l4.L4Features) linuxclient.DataResyncDSL { - dsl.vppDataResync.L4Features(val) - return dsl -} - -// AppNamespace adds VPP Application namespaces to the RESYNC request -func (dsl *DataResyncDSL) AppNamespace(appNs *l4.AppNamespaces_AppNamespace) linuxclient.DataResyncDSL { - dsl.vppDataResync.AppNamespace(appNs) - return dsl -} - -// StnRule adds Stn rule to the RESYNC request. -func (dsl *DataResyncDSL) StnRule(stn *stn.STN_Rule) linuxclient.DataResyncDSL { - dsl.vppDataResync.StnRule(stn) - return dsl -} - -// NAT44Global adds a request to RESYNC global configuration for NAT44 -func (dsl *DataResyncDSL) NAT44Global(nat44 *nat.Nat44Global) linuxclient.DataResyncDSL { - dsl.vppDataResync.NAT44Global(nat44) - - return dsl -} - -// NAT44DNat adds a request to RESYNC a new DNAT configuration -func (dsl *DataResyncDSL) NAT44DNat(nat44 *nat.Nat44DNat_DNatConfig) linuxclient.DataResyncDSL { - dsl.vppDataResync.NAT44DNat(nat44) - - return dsl -} - -// PuntSocketRegister adds request to RESYNC a new punt to host entry -func (dsl *DataResyncDSL) PuntSocketRegister(puntCfg *punt.Punt) linuxclient.DataResyncDSL { - dsl.vppDataResync.PuntSocketRegister(puntCfg) - - return dsl -} - -// AppendKeys is a helper function that fills the keySet with values -// pointed to by the iterator . -func appendKeys(keys *keySet, it keyval.ProtoKeyIterator) { - for { - k, _, stop := it.GetNext() - if stop { - break - } - - (*keys)[k] = nil - } -} - -// KeySet is a helper type that reuses map keys to store values as a set. -// The values of the map are nil. -type keySet map[string] /*key*/ interface{} /*nil*/ - -// Send propagates the request to the plugins. -// It deletes obsolete keys if listKeys() (from constructor) function is not nil. -func (dsl *DataResyncDSL) Send() vppclient.Reply { - - for dsl.listKeys != nil { - toBeDeleted := keySet{} - - // fill all known keys associated with the Linux network configuration: - keys, err := dsl.listKeys(linuxIf.InterfacePrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(linuxL3.StaticArpPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(linuxL3.StaticRoutePrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - - // remove keys that are part of the transaction - for _, txnKey := range dsl.txnKeys { - delete(toBeDeleted, txnKey) - } - - for delKey := range toBeDeleted { - dsl.txn.Delete(delKey) - } - - break - } - - return dsl.vppDataResync.Send() -} diff --git a/clientv1/linux/localclient/localclient_api.go b/clientv1/linux/localclient/localclient_api.go deleted file mode 100644 index 339e1b6fd1..0000000000 --- a/clientv1/linux/localclient/localclient_api.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localclient - -import ( - "github.com/ligato/cn-infra/datasync/kvdbsync/local" - "github.com/ligato/vpp-agent/clientv1/linux" - "github.com/ligato/vpp-agent/clientv1/linux/dbadapter" -) - -// PluginID defines the name of Linux localclient plugin. -//const PluginID core.PluginName = "LinuxPlugin_LOCAL_CLIENT" - -// DataResyncRequest allows creating a RESYNC request using convenient RESYNC -// DSL and sending it locally through go channels (i.e. without using Data Store). -func DataResyncRequest(caller string) linuxclient.DataResyncDSL { - return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), - nil /*no need to list anything*/) -} - -// DataChangeRequest allows creating Data Change request(s) using convenient -// Data Change DSL and sending it locally through go channels (i.e. without using -// Data Store). -func DataChangeRequest(caller string) linuxclient.DataChangeDSL { - return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) -} diff --git a/clientv1/linux/remoteclient/remoteclient_api.go b/clientv1/linux/remoteclient/remoteclient_api.go deleted file mode 100644 index 16c683c8af..0000000000 --- a/clientv1/linux/remoteclient/remoteclient_api.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remoteclient - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/linux" - "github.com/ligato/vpp-agent/clientv1/linux/dbadapter" -) - -// DataResyncRequestDB allows creating a RESYNC request, using convenient RESYNC -// DSL and sending it through the provided . -// User of the API does not need to be aware of keys. -// User of the API does not need to delete the obsolete objects/keys -// prior to RESYNC - it is handled by DataResyncDSL. -func DataResyncRequestDB(broker keyval.ProtoBroker) linuxclient.DataResyncDSL { - return dbadapter.NewDataResyncDSL(broker.NewTxn(), broker.ListKeys) -} - -// DataChangeRequestDB allows creating Data Change requests, using convenient -// Data Change DSL and sending it through the provided . -// User of the API does not need to be aware of keys. -func DataChangeRequestDB(broker keyval.ProtoBroker) linuxclient.DataChangeDSL { - return dbadapter.NewDataChangeDSL(broker.NewTxn()) -} diff --git a/clientv1/vpp/data_change_api.go b/clientv1/vpp/data_change_api.go deleted file mode 100644 index 71bb399176..0000000000 --- a/clientv1/vpp/data_change_api.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vppclient - -import ( - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// DataChangeDSL defines Domain Specific Language (DSL) for data change. -// of the VPP configuration. -// Use this interface to make your implementation independent of the local -// and any remote client. -// Every DSL statement (apart from Send) returns the receiver (possibly wrapped -// to change the scope of DSL), allowing the calls to be chained together -// conveniently in a single statement. -type DataChangeDSL interface { - // Put initiates a chained sequence of data change DSL statements, declaring - // new or changing existing configurable objects, e.g.: - // Put().Interface(&memif).XConnect(&xconnect).BD(&BD) ... Send() - // The set of available objects to be created or changed is defined by PutDSL. - Put() PutDSL - - // Delete initiates a chained sequence of data change DSL statements, - // removing existing configurable objects (by name), e.g.: - // Delete().Interface(memifName).XConnect(xconnectName).BD(BDName) ... Send() - // The set of available objects to be removed is defined by DeleteDSL. - Delete() DeleteDSL - - // Send propagates requested changes to the plugins. - Send() Reply -} - -// PutDSL is a subset of data change DSL statements, used to declare new -// VPP configuration or to change an existing one. -type PutDSL interface { - // Interface adds a request to create or update VPP network interface. - Interface(val *interfaces.Interfaces_Interface) PutDSL - // BfdSession adds a request to create or update bidirectional forwarding - // detection session. - BfdSession(val *bfd.SingleHopBFD_Session) PutDSL - // BfdAuthKeys adds a request to create or update bidirectional forwarding - // detection key. - BfdAuthKeys(val *bfd.SingleHopBFD_Key) PutDSL - // BfdEchoFunction adds a request to create or update bidirectional - // forwarding detection echo function. - BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) PutDSL - // BD adds a request to create or update VPP Bridge Domain. - BD(val *l2.BridgeDomains_BridgeDomain) PutDSL - // BDFIB adds a request to create or update VPP L2 Forwarding Information Base. - BDFIB(fib *l2.FibTable_FibEntry) PutDSL - // XConnect adds a request to create or update VPP Cross Connect. - XConnect(val *l2.XConnectPairs_XConnectPair) PutDSL - // StaticRoute adds a request to create or update VPP L3 Static Route. - StaticRoute(val *l3.StaticRoutes_Route) PutDSL - // ACL adds a request to create or update VPP Access Control List. - ACL(acl *acl.AccessLists_Acl) PutDSL - // Arp adds a request to create or update VPP L3 ARP. - Arp(arp *l3.ArpTable_ArpEntry) PutDSL - // ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces - ProxyArpInterfaces(pArpIfs *l3.ProxyArpInterfaces_InterfaceList) PutDSL - // ProxyArpRanges adds a request to create or update VPP L3 proxy ARP ranges - ProxyArpRanges(pArpRng *l3.ProxyArpRanges_RangeList) PutDSL - // L4Features adds a request to enable or disable L4 features - L4Features(val *l4.L4Features) PutDSL - // AppNamespace adds a request to create or update VPP Application namespace - AppNamespace(appNs *l4.AppNamespaces_AppNamespace) PutDSL - // StnRule adds a request to create or update Stn rule. - StnRule(stn *stn.STN_Rule) PutDSL - // NAT44Global adds a request to set global configuration for NAT44 - NAT44Global(nat *nat.Nat44Global) PutDSL - // NAT44DNat adds a request to create a new DNAT configuration - NAT44DNat(dnat *nat.Nat44DNat_DNatConfig) PutDSL - // IPSecSA adds request to create a new Security Association - IPSecSA(sa *ipsec.SecurityAssociations_SA) PutDSL - // IPSecSPD adds request to create a new Security Policy Database - IPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) PutDSL - // IPSecTunnel adds request to create a new IPSec tunnel - IPSecTunnel(spd *ipsec.TunnelInterfaces_Tunnel) PutDSL - // PuntSocketRegister adds request to register a new punt to host entry - PuntSocketRegister(puntCfg *punt.Punt) PutDSL - - // Delete changes the DSL mode to allow removal of an existing configuration. - // See documentation for DataChangeDSL.Delete(). - Delete() DeleteDSL - - // Send propagates requested changes to the plugins. - Send() Reply -} - -// DeleteDSL is a subset of data change DSL statements, used to remove -// an existing VPP configuration. -type DeleteDSL interface { - // Interface adds a request to delete an existing VPP network interface. - Interface(ifaceName string) DeleteDSL - // BfdSession adds a request to delete an existing bidirectional forwarding - // detection session. - BfdSession(bfdSessionIfaceName string) DeleteDSL - // BfdAuthKeys adds a request to delete an existing bidirectional forwarding - // detection key. - BfdAuthKeys(bfdKey string) DeleteDSL - // BfdEchoFunction adds a request to delete an existing bidirectional - // forwarding detection echo function. - BfdEchoFunction(bfdEchoName string) DeleteDSL - // BD adds a request to delete an existing VPP Bridge Domain. - BD(bdName string) DeleteDSL - // BDFIB adds a request to delete an existing VPP L2 Forwarding Information - // Base. - BDFIB(bdName string, mac string) DeleteDSL - // XConnect adds a request to delete an existing VPP Cross Connect. - XConnect(rxIfaceName string) DeleteDSL - // StaticRoute adds a request to delete an existing VPP L3 Static Route. - StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) DeleteDSL - // ACL adds a request to delete an existing VPP Access Control List. - ACL(aclName string) DeleteDSL - // L4Features adds a request to enable or disable L4 features - L4Features() DeleteDSL - // AppNamespace adds a request to delete VPP Application namespace - // Note: current version does not support application namespace deletion - AppNamespace(id string) DeleteDSL - // Arp adds a request to delete an existing VPP L3 ARP. - Arp(ifaceName string, ipAddr string) DeleteDSL - // ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces - ProxyArpInterfaces(label string) DeleteDSL - // ProxyArpRanges adds a request to delete an existing VPP L3 proxy ARP ranges - ProxyArpRanges(label string) DeleteDSL - // StnRule adds a request to delete an existing Stn rule. - StnRule(ruleName string) DeleteDSL - // NAT44Global adds a request to remove global configuration for NAT44 - NAT44Global() DeleteDSL - // NAT44DNat adds a request to delete a new DNAT configuration - NAT44DNat(label string) DeleteDSL - // IPSecSA adds request to delete a Security Association - IPSecSA(saName string) DeleteDSL - // IPSecSPD adds request to delete a Security Policy Database - IPSecSPD(spdName string) DeleteDSL - // IPSecTunnel adds request to delete an IPSec tunnel - IPSecTunnel(tunName string) DeleteDSL - // PuntSocketDeregister adds request to de-register an existing punt to host entry - PuntSocketDeregister(puntName string) DeleteDSL - - // Put changes the DSL mode to allow configuration editing. - // See documentation for DataChangeDSL.Put(). - Put() PutDSL - - // Send propagates requested changes to the plugins. - Send() Reply -} - -// Reply interface allows to wait for a reply to previously called Send() and -// extract the result from it (success/error). -type Reply interface { - // ReceiveReply waits for a reply to previously called Send() and returns - // the result (error or nil). - ReceiveReply() error -} diff --git a/clientv1/vpp/data_dump_api.go b/clientv1/vpp/data_dump_api.go deleted file mode 100644 index c54b52feef..0000000000 --- a/clientv1/vpp/data_dump_api.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vppclient - -import ( - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" -) - -// DataDumpDSL defines Domain Specific Language (DSL) for data read. -// of the VPP configuration. -// Use this interface to make your implementation independent of the local -// and any remote client. -// Every DSL statement (apart from Send) returns the receiver (possibly wrapped -// to change the scope of DSL), allowing the calls to be chained together -// conveniently in a single statement. -type DataDumpDSL interface { - // Dump initiates a chained sequence of data read DSL statements, reading - // existing configurable objects, e.g.: - // Dump().Interfaces().BD() ... Send() - // The set of available objects to be created or changed is defined by GetDSL. - Dump() DumpDSL -} - -// DumpDSL is a subset of data read DSL statements, used to read existing -// VPP configuration. -type DumpDSL interface { - // ACLs adds a request to read VPP access lists. - ACLs() DumpDSL - // Interfaces adds a request to read VPP interfaces. - Interfaces() DumpDSL - // IPSecSPDs adds a request to read IPSec SPDs. - IPSecSPDs() DumpDSL - // IPSecSAs adds a request to read IPSec SAs. - IPSecSAs() DumpDSL - // IPSecTunnels adds a request to read IPSec tunnels. - IPSecTunnels() DumpDSL - // BDs adds a request to read bridge domains. - BDs() DumpDSL - // FIBs adds a request to read FIBs. - FIBs() DumpDSL - // XConnects adds a request to read cross connects. - XConnects() DumpDSL - // Routes adds a request to read routes. - Routes() DumpDSL - // ARPs adds a request to read ARPs. - ARPs() DumpDSL - // PuntRegistrations adds a request to read punt socket registrations. - PuntRegistrations() DumpDSL - // LinuxInterfaces adds a request to read linux interfaces. - LinuxInterfaces() DumpDSL - // LinuxARPs adds a request to read linux ARPs. - LinuxARPs() DumpDSL - // LinuxRoutes adds a request to read linux routes. - LinuxRoutes() DumpDSL - - // Send propagates requested changes to the plugins. - Send() DumpReply -} - -// DumpReply interface allows to wait for a reply to previously called Send() and -// extract the result from it (success/error). -type DumpReply interface { - // ReceiveReply waits for a reply to previously called Send() and returns - // the result (data set or error). - ReceiveReply() (ReplyData, error) -} - -// ReplyData is helper interface for more convenient access to typed data -type ReplyData interface { - // GetACLs returns all access lists from the reply - GetACLs() []*acl.AccessLists_Acl - // GetInterfaces returns all the interfaces from the reply - GetInterfaces() []*interfaces.Interfaces_Interface - // GetIPSecSPDs returns all the IPSec SPDs from the reply - GetIPSecSPDs() []*ipsec.SecurityPolicyDatabases_SPD - // GetIPSecSAs returns all the IPSec SAa from the reply - GetIPSecSAs() []*ipsec.SecurityAssociations_SA - // GetBDs returns all the bridge domains from the reply - GetBDs() []*l2.BridgeDomains_BridgeDomain - // GetFIBs returns all the FIB entries from the reply - GetFIBs() []*l2.FibTable_FibEntry - // GetXConnects returns all the XConnects from the reply - GetXConnects() []*l2.XConnectPairs_XConnectPair - // GetARPs returns all the ARPs from the reply - GetARPs() []*l3.ArpTable_ArpEntry - // GetRoutes returns all the routes from the reply - GetRoutes() []*l3.StaticRoutes_Route - // GetPunts returns all the punts from the reply - GetPunts() []*rpc.PuntResponse_PuntEntry - // GetLinuxInterfaces returns all the linux interfaces from the reply - GetLinuxInterfaces() []*linuxIf.LinuxInterfaces_Interface - // GetLinuxARPs returns all the linux ARPs from the reply - GetLinuxARPs() []*linuxL3.LinuxStaticArpEntries_ArpEntry - // GetLinuxRoutes returns all the linux routes from the reply - GetLinuxRoutes() []*linuxL3.LinuxStaticRoutes_Route -} diff --git a/clientv1/vpp/data_resync_api.go b/clientv1/vpp/data_resync_api.go deleted file mode 100644 index 13cd1a9c82..0000000000 --- a/clientv1/vpp/data_resync_api.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vppclient - -import ( - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// DataResyncDSL defines the Domain Specific Language (DSL) for data RESYNC -// of the VPP configuration. -// Use this interface to make your implementation independent of the local -// and any remote client. -// Each method (apart from Send) returns the receiver, allowing the calls -// to be chained together conveniently in a single statement. -type DataResyncDSL interface { - // Interface adds interface to the RESYNC request. - Interface(intf *interfaces.Interfaces_Interface) DataResyncDSL - // BfdSession adds bidirectional forwarding detection session to the RESYNC - // request. - BfdSession(val *bfd.SingleHopBFD_Session) DataResyncDSL - // BfdAuthKeys adds bidirectional forwarding detection key to the RESYNC - // request. - BfdAuthKeys(val *bfd.SingleHopBFD_Key) DataResyncDSL - // BfdEchoFunction adds bidirectional forwarding detection echo function - // to the RESYNC request. - BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) DataResyncDSL - // BD adds Bridge Domain to the RESYNC request. - BD(bd *l2.BridgeDomains_BridgeDomain) DataResyncDSL - // BDFIB adds L2 Forwarding Information Base. - BDFIB(fib *l2.FibTable_FibEntry) DataResyncDSL - // XConnect adds Cross Connect to the RESYNC request. - XConnect(xcon *l2.XConnectPairs_XConnectPair) DataResyncDSL - // StaticRoute adds L3 Static Route to the RESYNC request. - StaticRoute(staticRoute *l3.StaticRoutes_Route) DataResyncDSL - // ACL adds Access Control List to the RESYNC request. - ACL(acl *acl.AccessLists_Acl) DataResyncDSL - // Arp adds VPP L3 ARP to the RESYNC request. - Arp(arp *l3.ArpTable_ArpEntry) DataResyncDSL - // ProxyArpInterfaces adds L3 proxy ARP interfaces to the RESYNC request. - ProxyArpInterfaces(pArpIfs *l3.ProxyArpInterfaces_InterfaceList) DataResyncDSL - // ProxyArpRanges adds L3 proxy ARP ranges to the RESYNC request. - ProxyArpRanges(pArpRng *l3.ProxyArpRanges_RangeList) DataResyncDSL - // L4Features adds L4 features to the RESYNC request - L4Features(val *l4.L4Features) DataResyncDSL - // AppNamespace adds VPP Application namespaces to the RESYNC request - AppNamespace(appNs *l4.AppNamespaces_AppNamespace) DataResyncDSL - // StnRule adds Stn rule to the RESYNC request. - StnRule(stn *stn.STN_Rule) DataResyncDSL - // NAT44Global adds a request to RESYNC global configuration for NAT44 - NAT44Global(nat *nat.Nat44Global) DataResyncDSL - // NAT44DNat adds a request to RESYNC a new DNAT configuration - NAT44DNat(dnat *nat.Nat44DNat_DNatConfig) DataResyncDSL - // IPSecSA adds request to RESYNC a new Security Association - IPSecSA(sa *ipsec.SecurityAssociations_SA) DataResyncDSL - // IPSecSPD adds request to RESYNC a new Security Policy Database - IPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) DataResyncDSL - // IPSecTunnel adds request to RESYNC a new IPSec tunnel - IPSecTunnel(tun *ipsec.TunnelInterfaces_Tunnel) DataResyncDSL - // PuntSocketRegister adds request to RESYNC a new punt to host entry - PuntSocketRegister(puntCfg *punt.Punt) DataResyncDSL - - // Send propagates the RESYNC request to the plugins. - Send() Reply -} diff --git a/clientv1/vpp/dbadapter/data_change_db.go b/clientv1/vpp/dbadapter/data_change_db.go deleted file mode 100644 index 85ee5a48a8..0000000000 --- a/clientv1/vpp/dbadapter/data_change_db.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbadapter - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// NewDataChangeDSL returns a new instance of DataChangeDSL which implements -// the data change DSL for VPP config. -// Transaction is used to propagate changes to plugins. -func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL { - return &DataChangeDSL{txn: txn} -} - -// DataChangeDSL is an implementation of Domain Specific Language (DSL) -// for changes of the VPP configuration. -type DataChangeDSL struct { - txn keyval.ProtoTxn -} - -// PutDSL implements put operations of data change DSL. -type PutDSL struct { - parent *DataChangeDSL -} - -// DeleteDSL implements delete operations of data change DSL. -type DeleteDSL struct { - parent *DataChangeDSL -} - -// Put initiates a chained sequence of data change DSL statements declaring -// new configurable objects or changing existing ones. -func (dsl *DataChangeDSL) Put() vppclient.PutDSL { - return &PutDSL{dsl} -} - -// Delete initiates a chained sequence of data change DSL statements -// removing existing configurable objects. -func (dsl *DataChangeDSL) Delete() vppclient.DeleteDSL { - return &DeleteDSL{dsl} -} - -// Send propagates requested changes to the plugins. -func (dsl *DataChangeDSL) Send() vppclient.Reply { - err := dsl.txn.Commit() - return &Reply{err} -} - -// Interface adds a request to create or update VPP network interface. -func (dsl *PutDSL) Interface(val *interfaces.Interfaces_Interface) vppclient.PutDSL { - dsl.parent.txn.Put(intf.InterfaceKey(val.Name), val) - return dsl -} - -// BfdSession adds a request to create or update bidirectional forwarding -// detection session. -func (dsl *PutDSL) BfdSession(val *bfd.SingleHopBFD_Session) vppclient.PutDSL { - dsl.parent.txn.Put(bfd.SessionKey(val.Interface), val) - return dsl -} - -// BfdAuthKeys adds a request to create or update bidirectional forwarding -// detection key. -func (dsl *PutDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) vppclient.PutDSL { - dsl.parent.txn.Put(bfd.AuthKeysKey(string(val.Id)), val) - return dsl -} - -// BfdEchoFunction adds a request to create or update bidirectional forwarding -// detection echo function. -func (dsl *PutDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) vppclient.PutDSL { - dsl.parent.txn.Put(bfd.EchoFunctionKey(val.EchoSourceInterface), val) - return dsl -} - -// BD adds a request to create or update VPP Bridge Domain. -func (dsl *PutDSL) BD(val *l2.BridgeDomains_BridgeDomain) vppclient.PutDSL { - dsl.parent.txn.Put(l2.BridgeDomainKey(val.Name), val) - return dsl -} - -// BDFIB adds a request to create or update VPP L2 Forwarding Information Base. -func (dsl *PutDSL) BDFIB(val *l2.FibTable_FibEntry) vppclient.PutDSL { - dsl.parent.txn.Put(l2.FibKey(val.BridgeDomain, val.PhysAddress), val) - return dsl -} - -// XConnect adds a request to create or update VPP Cross Connect. -func (dsl *PutDSL) XConnect(val *l2.XConnectPairs_XConnectPair) vppclient.PutDSL { - dsl.parent.txn.Put(l2.XConnectKey(val.ReceiveInterface), val) - return dsl -} - -// StaticRoute adds a request to create or update VPP L3 Static Route. -func (dsl *PutDSL) StaticRoute(val *l3.StaticRoutes_Route) vppclient.PutDSL { - dsl.parent.txn.Put(l3.RouteKey(val.VrfId, val.DstIpAddr, val.NextHopAddr), val) - return dsl -} - -// ACL adds a request to create or update VPP Access Control List. -func (dsl *PutDSL) ACL(val *acl.AccessLists_Acl) vppclient.PutDSL { - dsl.parent.txn.Put(acl.Key(val.AclName), val) - return dsl -} - -// L4Features create or update request for the L4Features -func (dsl *PutDSL) L4Features(val *l4.L4Features) vppclient.PutDSL { - dsl.parent.txn.Put(l4.FeatureKey(), val) - - return dsl -} - -// AppNamespace create or update request for the Application Namespaces List -func (dsl *PutDSL) AppNamespace(val *l4.AppNamespaces_AppNamespace) vppclient.PutDSL { - dsl.parent.txn.Put(l4.AppNamespacesKey(val.NamespaceId), val) - - return dsl -} - -// Arp adds a request to create or update VPP L3 ARP entry. -func (dsl *PutDSL) Arp(arp *l3.ArpTable_ArpEntry) vppclient.PutDSL { - dsl.parent.txn.Put(l3.ArpEntryKey(arp.Interface, arp.IpAddress), arp) - return dsl -} - -// ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces. -func (dsl *PutDSL) ProxyArpInterfaces(arp *l3.ProxyArpInterfaces_InterfaceList) vppclient.PutDSL { - dsl.parent.txn.Put(l3.ProxyArpInterfaceKey(arp.Label), arp) - return dsl -} - -// ProxyArpRanges adds a request to create or update VPP L3 proxy ARP ranges -func (dsl *PutDSL) ProxyArpRanges(arp *l3.ProxyArpRanges_RangeList) vppclient.PutDSL { - dsl.parent.txn.Put(l3.ProxyArpRangeKey(arp.Label), arp) - return dsl -} - -// StnRule adds a request to create or update STN rule. -func (dsl *PutDSL) StnRule(val *stn.STN_Rule) vppclient.PutDSL { - dsl.parent.txn.Put(stn.Key(val.RuleName), val) - return dsl -} - -// NAT44Global adds a request to set global configuration for NAT44 -func (dsl *PutDSL) NAT44Global(nat44 *nat.Nat44Global) vppclient.PutDSL { - dsl.parent.txn.Put(nat.GlobalPrefix, nat44) - return dsl -} - -// NAT44DNat adds a request to create a new DNAT configuration -func (dsl *PutDSL) NAT44DNat(nat44 *nat.Nat44DNat_DNatConfig) vppclient.PutDSL { - dsl.parent.txn.Put(nat.DNatKey(nat44.Label), nat44) - return dsl -} - -// IPSecSA adds request to create a new Security Association -func (dsl *PutDSL) IPSecSA(sa *ipsec.SecurityAssociations_SA) vppclient.PutDSL { - dsl.parent.txn.Put(ipsec.SAKey(sa.Name), sa) - return dsl -} - -// IPSecSPD adds request to create a new Security Policy Database -func (dsl *PutDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) vppclient.PutDSL { - dsl.parent.txn.Put(ipsec.SPDKey(spd.Name), spd) - return dsl -} - -// IPSecTunnel adds request to create a new IPSec tunnel -func (dsl *PutDSL) IPSecTunnel(tunnel *ipsec.TunnelInterfaces_Tunnel) vppclient.PutDSL { - dsl.parent.txn.Put(ipsec.TunnelKey(tunnel.Name), tunnel) - return dsl -} - -// PuntSocketRegister adds request to register a new punt to host entry -func (dsl *PutDSL) PuntSocketRegister(puntCfg *punt.Punt) vppclient.PutDSL { - dsl.parent.txn.Put(punt.Key(puntCfg.Name), puntCfg) - return dsl -} - -// Delete changes the DSL mode to allow removal of an existing configuration. -func (dsl *PutDSL) Delete() vppclient.DeleteDSL { - return &DeleteDSL{dsl.parent} -} - -// Send propagates requested changes to the plugins. -func (dsl *PutDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} - -// Interface adds a request to delete an existing VPP network interface. -func (dsl *DeleteDSL) Interface(interfaceName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(intf.InterfaceKey(interfaceName)) - return dsl -} - -// BfdSession adds a request to delete an existing bidirectional forwarding -// detection session. -func (dsl *DeleteDSL) BfdSession(bfdSessionIfaceName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(bfd.SessionKey(bfdSessionIfaceName)) - return dsl -} - -// BfdAuthKeys adds a request to delete an existing bidirectional forwarding -// detection key. -func (dsl *DeleteDSL) BfdAuthKeys(bfdKey string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(bfd.AuthKeysKey(bfdKey)) - return dsl -} - -// BfdEchoFunction adds a request to delete an existing bidirectional forwarding -// detection echo function. -func (dsl *DeleteDSL) BfdEchoFunction(bfdEchoName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(bfd.EchoFunctionKey(bfdEchoName)) - return dsl -} - -// BD adds a request to delete an existing VPP Bridge Domain. -func (dsl *DeleteDSL) BD(bdName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l2.BridgeDomainKey(bdName)) - return dsl -} - -// BDFIB adds a request to delete an existing VPP L2 Forwarding Information -// Base. -func (dsl *DeleteDSL) BDFIB(bdName string, mac string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l2.FibKey(bdName, mac)) - return dsl -} - -// XConnect adds a request to delete an existing VPP Cross Connect. -func (dsl *DeleteDSL) XConnect(rxIfName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l2.XConnectKey(rxIfName)) - return dsl -} - -// StaticRoute adds a request to delete an existing VPP L3 Static Route. -func (dsl *DeleteDSL) StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l3.RouteKey(vrf, dstAddr, nextHopAddr)) - return dsl -} - -// ACL adds a request to delete an existing VPP Access Control List. -func (dsl *DeleteDSL) ACL(aclName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(acl.Key(aclName)) - return dsl -} - -// L4Features delete request for the L4Features -func (dsl *DeleteDSL) L4Features() vppclient.DeleteDSL { - dsl.parent.txn.Delete(l4.FeatureKey()) - return dsl -} - -// Arp adds a request to delete an existing VPP L3 ARP entry. -func (dsl *DeleteDSL) Arp(ifaceName string, ipAddr string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l3.ArpEntryKey(ifaceName, ipAddr)) - return dsl -} - -// ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces -func (dsl *DeleteDSL) ProxyArpInterfaces(label string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l3.ProxyArpInterfaceKey(label)) - return dsl -} - -// ProxyArpRanges adds a request to delete an existing VPP L3 proxy ARP ranges -func (dsl *DeleteDSL) ProxyArpRanges(label string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l3.ProxyArpRangeKey(label)) - return dsl -} - -// AppNamespace adds a request to delete an existing VPP Application Namespace. -func (dsl *DeleteDSL) AppNamespace(id string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(l4.AppNamespacesKey(id)) - return dsl -} - -// StnRule adds request to delete Stn rule. -func (dsl *DeleteDSL) StnRule(ruleName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(stn.Key(ruleName)) - return dsl -} - -// NAT44Global adds a request to remove global configuration for NAT44 -func (dsl *DeleteDSL) NAT44Global() vppclient.DeleteDSL { - dsl.parent.txn.Delete(nat.GlobalPrefix) - return dsl -} - -// NAT44DNat adds a request to delete a DNAT configuration -func (dsl *DeleteDSL) NAT44DNat(label string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(nat.DNatKey(label)) - return dsl -} - -// IPSecSA adds request to delete a Security Association -func (dsl *DeleteDSL) IPSecSA(saName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(ipsec.SAKey(saName)) - return dsl -} - -// IPSecSPD adds request to delete a Security Policy Database -func (dsl *DeleteDSL) IPSecSPD(spdName string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(ipsec.SPDKey(spdName)) - return dsl -} - -// IPSecTunnel adds request to delete an IPSec tunnel -func (dsl *DeleteDSL) IPSecTunnel(name string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(ipsec.TunnelKey(name)) - return dsl -} - -// PuntSocketDeregister adds request to de-register an existing punt to host entry -func (dsl *DeleteDSL) PuntSocketDeregister(name string) vppclient.DeleteDSL { - dsl.parent.txn.Delete(punt.Key(name)) - return dsl -} - - -// Put changes the DSL mode to allow configuration editing. -func (dsl *DeleteDSL) Put() vppclient.PutDSL { - return &PutDSL{dsl.parent} -} - -// Send propagates requested changes to the plugins. -func (dsl *DeleteDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} - -// Reply interface allows to wait for a reply to previously called Send() and -// extract the result from it (success/error). -type Reply struct { - err error -} - -// ReceiveReply waits for a reply to previously called Send() and returns -// the result (error or nil). -func (dsl Reply) ReceiveReply() error { - return dsl.err -} diff --git a/clientv1/vpp/dbadapter/data_resync_db.go b/clientv1/vpp/dbadapter/data_resync_db.go deleted file mode 100644 index 674eea7c6d..0000000000 --- a/clientv1/vpp/dbadapter/data_resync_db.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbadapter - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" -) - -// NewDataResyncDSL returns a new instance of DataResyncDSL which implements -// the data RESYNC DSL for VPP configuration. -// Transaction is used to propagate changes to plugins. -// Function is used to list keys with already existing configuration. -func NewDataResyncDSL(txn keyval.ProtoTxn, listKeys func(prefix string) (keyval.ProtoKeyIterator, error)) *DataResyncDSL { - return &DataResyncDSL{txn, []string{}, listKeys} -} - -// DataResyncDSL is an implementation of Domain Specific Language (DSL) for data -// RESYNC of VPP configuration. -type DataResyncDSL struct { - txn keyval.ProtoTxn - txnKeys []string - listKeys func(prefix string) (keyval.ProtoKeyIterator, error) -} - -// Interface adds VPP interface to the RESYNC request. -func (dsl *DataResyncDSL) Interface(val *interfaces.Interfaces_Interface) vppclient.DataResyncDSL { - key := intf.InterfaceKey(val.Name) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// BfdSession adds bidirectional forwarding detection session to the RESYNC -// request. -func (dsl *DataResyncDSL) BfdSession(val *bfd.SingleHopBFD_Session) vppclient.DataResyncDSL { - key := bfd.SessionKey(val.Interface) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// BfdAuthKeys adds bidirectional forwarding detection key to the RESYNC -// request. -func (dsl *DataResyncDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) vppclient.DataResyncDSL { - key := bfd.AuthKeysKey(string(val.Id)) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// BfdEchoFunction adds bidirectional forwarding detection echo function -// to the RESYNC request. -func (dsl *DataResyncDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) vppclient.DataResyncDSL { - key := bfd.EchoFunctionKey(val.EchoSourceInterface) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// BD adds Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) BD(val *l2.BridgeDomains_BridgeDomain) vppclient.DataResyncDSL { - key := l2.BridgeDomainKey(val.Name) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// BDFIB adds Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) BDFIB(val *l2.FibTable_FibEntry) vppclient.DataResyncDSL { - key := l2.FibKey(val.BridgeDomain, val.PhysAddress) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// XConnect adds Cross Connect to the RESYNC request. -func (dsl *DataResyncDSL) XConnect(val *l2.XConnectPairs_XConnectPair) vppclient.DataResyncDSL { - key := l2.XConnectKey(val.ReceiveInterface) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// StaticRoute adds L3 Static Route to the RESYNC request. -func (dsl *DataResyncDSL) StaticRoute(val *l3.StaticRoutes_Route) vppclient.DataResyncDSL { - key := l3.RouteKey(val.VrfId, val.DstIpAddr, val.NextHopAddr) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// ACL adds Access Control List to the RESYNC request. -func (dsl *DataResyncDSL) ACL(val *acl.AccessLists_Acl) vppclient.DataResyncDSL { - key := acl.Key(val.AclName) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// L4Features adds L4Features to the RESYNC request -func (dsl *DataResyncDSL) L4Features(val *l4.L4Features) vppclient.DataResyncDSL { - key := l4.FeatureKey() - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// AppNamespace adds Application Namespace to the RESYNC request -func (dsl *DataResyncDSL) AppNamespace(val *l4.AppNamespaces_AppNamespace) vppclient.DataResyncDSL { - key := l4.AppNamespacesKey(val.NamespaceId) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// ProxyArpInterfaces adds L3 proxy ARP interfaces to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpInterfaces(val *l3.ProxyArpInterfaces_InterfaceList) vppclient.DataResyncDSL { - key := l3.ProxyArpInterfaceKey(val.Label) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// ProxyArpRanges adds L3 proxy ARP ranges to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpRanges(val *l3.ProxyArpRanges_RangeList) vppclient.DataResyncDSL { - key := l3.ProxyArpRangeKey(val.Label) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// Arp adds L3 ARP entry to the RESYNC request. -func (dsl *DataResyncDSL) Arp(val *l3.ArpTable_ArpEntry) vppclient.DataResyncDSL { - key := l3.ArpEntryKey(val.Interface, val.IpAddress) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// StnRule adds Stn rule to the RESYNC request. -func (dsl *DataResyncDSL) StnRule(val *stn.STN_Rule) vppclient.DataResyncDSL { - key := stn.Key(val.RuleName) - dsl.txn.Put(key, val) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// NAT44Global adds a request to RESYNC global configuration for NAT44 -func (dsl *DataResyncDSL) NAT44Global(nat44 *nat.Nat44Global) vppclient.DataResyncDSL { - key := nat.GlobalPrefix - dsl.txn.Put(key, nat44) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// NAT44DNat adds a request to RESYNC a new DNAT configuration -func (dsl *DataResyncDSL) NAT44DNat(nat44 *nat.Nat44DNat_DNatConfig) vppclient.DataResyncDSL { - key := nat.DNatKey(nat44.Label) - dsl.txn.Put(key, nat44) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// IPSecSA adds request to create a new Security Association -func (dsl *DataResyncDSL) IPSecSA(sa *ipsec.SecurityAssociations_SA) vppclient.DataResyncDSL { - key := ipsec.SAKey(sa.Name) - dsl.txn.Put(key, sa) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// IPSecSPD adds request to create a new Security Policy Database -func (dsl *DataResyncDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabases_SPD) vppclient.DataResyncDSL { - key := ipsec.SPDKey(spd.Name) - dsl.txn.Put(key, spd) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// IPSecTunnel adds request to create a new IPSec tunnel -func (dsl *DataResyncDSL) IPSecTunnel(tun *ipsec.TunnelInterfaces_Tunnel) vppclient.DataResyncDSL { - key := ipsec.TunnelKey(tun.Name) - dsl.txn.Put(key, tun) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// PuntSocketRegister adds request to RESYNC a new punt to host entry -func (dsl *DataResyncDSL) PuntSocketRegister(puntCfg *punt.Punt) vppclient.DataResyncDSL { - key := punt.Key(puntCfg.Name) - dsl.txn.Put(key, puntCfg) - dsl.txnKeys = append(dsl.txnKeys, key) - - return dsl -} - -// AppendKeys is a helper function that fills the keySet with values -// pointed to by the iterator . -func appendKeys(keys *keySet, it keyval.ProtoKeyIterator) { - for { - k, _, stop := it.GetNext() - if stop { - break - } - - (*keys)[k] = nil - } -} - -// KeySet is a helper type that reuses map keys to store values as a set. -// The values of the map are nil. -type keySet map[string] /*key*/ interface{} /*nil*/ - -// Send propagates the request to the plugins. -// It deletes obsolete keys if listKeys() (from constructor) function is not nil. -func (dsl *DataResyncDSL) Send() vppclient.Reply { - - for dsl.listKeys != nil { - toBeDeleted := keySet{} - - // fill all known keys of one VPP: - keys, err := dsl.listKeys(acl.Prefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(intf.Prefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(nat.Prefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(ipsec.KeyPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(bfd.SessionPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(bfd.AuthKeysPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(bfd.EchoFunctionPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l2.BdPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l2.FibPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l2.XConnectPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l3.RoutesPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l3.ArpPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l3.ProxyARPInterfacePrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l3.ProxyARPRangePrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l4.Prefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(l4.FeaturesPrefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - keys, err = dsl.listKeys(stn.Prefix) - if err != nil { - break - } - appendKeys(&toBeDeleted, keys) - - // remove keys that are part of the transaction - for _, txnKey := range dsl.txnKeys { - delete(toBeDeleted, txnKey) - } - - for delKey := range toBeDeleted { - dsl.txn.Delete(delKey) - } - - break - } - - err := dsl.txn.Commit() - - return &Reply{err: err} -} diff --git a/clientv1/vpp/grpcadapter/data_change_grpc.go b/clientv1/vpp/grpcadapter/data_change_grpc.go deleted file mode 100644 index 938c94b06e..0000000000 --- a/clientv1/vpp/grpcadapter/data_change_grpc.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcadapter - -import ( - "github.com/gogo/protobuf/proto" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/clientv1/vpp" - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" - "golang.org/x/net/context" -) - -// NewDataChangeDSL is a constructor -func NewDataChangeDSL(client rpc.DataChangeServiceClient) *DataChangeDSL { - return &DataChangeDSL{client, make([]proto.Message, 0), make([]proto.Message, 0)} -} - -// DataChangeDSL is used to conveniently assign all the data that are needed for the DataChange. -// This is an implementation of Domain Specific Language (DSL) for a change of the VPP configuration. -type DataChangeDSL struct { - client rpc.DataChangeServiceClient - put []proto.Message - del []proto.Message -} - -// PutDSL allows to add or edit the configuration of delault plugins based on grpc requests. -type PutDSL struct { - parent *DataChangeDSL -} - -// DeleteDSL allows to remove the configuration of delault plugins based on grpc requests. -type DeleteDSL struct { - parent *DataChangeDSL -} - -// Interface creates or updates the network interface. -func (dsl *PutDSL) Interface(val *interfaces.Interfaces_Interface) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// BfdSession creates or updates the bidirectional forwarding detection session. -func (dsl *PutDSL) BfdSession(val *bfd.SingleHopBFD_Session) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// BfdAuthKeys creates or updates the bidirectional forwarding detection key. -func (dsl *PutDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// BfdEchoFunction creates or updates the bidirectional forwarding detection echo function. -func (dsl *PutDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// BD creates or updates the Bridge Domain. -func (dsl *PutDSL) BD(val *l2.BridgeDomains_BridgeDomain) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// BDFIB deletes request for the L2 Forwarding Information Base. -func (dsl *PutDSL) BDFIB(val *l2.FibTable_FibEntry) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// XConnect creates or updates the Cross Connect. -func (dsl *PutDSL) XConnect(val *l2.XConnectPairs_XConnectPair) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// StaticRoute creates or updates the L3 Static Route. -func (dsl *PutDSL) StaticRoute(val *l3.StaticRoutes_Route) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// ACL creates or updates request for the Access Control List. -func (dsl *PutDSL) ACL(val *acl.AccessLists_Acl) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// L4Features creates or updates the request for the L4Features. -func (dsl *PutDSL) L4Features(val *l4.L4Features) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// AppNamespace creates or updates the request for the Application Namespaces List. -func (dsl *PutDSL) AppNamespace(val *l4.AppNamespaces_AppNamespace) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// Arp adds a request to create or update VPP L3 ARP entry. -func (dsl *PutDSL) Arp(val *l3.ArpTable_ArpEntry) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces. -func (dsl *PutDSL) ProxyArpInterfaces(val *l3.ProxyArpInterfaces_InterfaceList) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// ProxyArpRanges adds a request to create or update VPP L3 proxy ARP ranges -func (dsl *PutDSL) ProxyArpRanges(val *l3.ProxyArpRanges_RangeList) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// StnRule adds a request to create or update STN rule. -func (dsl *PutDSL) StnRule(val *stn.STN_Rule) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// NAT44Global adds a request to set global configuration for NAT44 -func (dsl *PutDSL) NAT44Global(val *nat.Nat44Global) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// NAT44DNat adds a request to create a new DNAT configuration -func (dsl *PutDSL) NAT44DNat(val *nat.Nat44DNat_DNatConfig) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// IPSecSA adds request to create a new Security Association -func (dsl *PutDSL) IPSecSA(val *ipsec.SecurityAssociations_SA) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// IPSecSPD adds request to create a new Security Policy Database -func (dsl *PutDSL) IPSecSPD(val *ipsec.SecurityPolicyDatabases_SPD) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// IPSecTunnel adds request to create a new IPSec tunnel -func (dsl *PutDSL) IPSecTunnel(val *ipsec.TunnelInterfaces_Tunnel) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// PuntSocketRegister adds request to register a new punt to host entry -func (dsl *PutDSL) PuntSocketRegister(val *punt.Punt) vppclient.PutDSL { - dsl.parent.put = append(dsl.parent.put, val) - return dsl -} - -// Put enables creating Interface/BD... -func (dsl *DataChangeDSL) Put() vppclient.PutDSL { - return &PutDSL{dsl} -} - -// Delete enables deleting Interface/BD... -func (dsl *DataChangeDSL) Delete() vppclient.DeleteDSL { - return &DeleteDSL{dsl} -} - -// Delete enables deleting Interface/BD... -func (dsl *PutDSL) Delete() vppclient.DeleteDSL { - return &DeleteDSL{dsl.parent} -} - -// Send propagates changes to the channels. -func (dsl *PutDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} - -// Interface deletes request for the network interface. -func (dsl *DeleteDSL) Interface(interfaceName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &interfaces.Interfaces_Interface{ - Name: interfaceName, - }) - return dsl -} - -// BfdSession adds a request to delete an existing bidirectional forwarding -// detection session. -func (dsl *DeleteDSL) BfdSession(ifName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &bfd.SingleHopBFD_Session{ - Interface: ifName, - }) - return dsl -} - -// BfdAuthKeys adds a request to delete an existing bidirectional forwarding -// detection key. -func (dsl *DeleteDSL) BfdAuthKeys(bfdKeyID string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &bfd.SingleHopBFD_Key{ - Name: bfdKeyID, - }) - return dsl -} - -// BfdEchoFunction adds a request to delete an existing bidirectional forwarding -// detection echo function. -func (dsl *DeleteDSL) BfdEchoFunction(bfdEchoName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &bfd.SingleHopBFD_EchoFunction{ - Name: bfdEchoName, - }) - return dsl -} - -// BD deletes request for the Bridge Domain. -func (dsl *DeleteDSL) BD(bdName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l2.BridgeDomains_BridgeDomain{ - Name: bdName, - }) - return dsl -} - -// BDFIB deletes request for the L2 Forwarding Information Base. -func (dsl *DeleteDSL) BDFIB(bdName string, mac string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l2.FibTable_FibEntry{ - PhysAddress: mac, - BridgeDomain: bdName, - }) - return dsl -} - -// XConnect deletes the Cross Connect. -func (dsl *DeleteDSL) XConnect(rxIfName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l2.XConnectPairs_XConnectPair{ - ReceiveInterface: rxIfName, - }) - return dsl -} - -// StaticRoute deletes the L3 Static Route. -func (dsl *DeleteDSL) StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l3.StaticRoutes_Route{ - VrfId: vrf, - DstIpAddr: dstAddr, - NextHopAddr: nextHopAddr, - }) - return dsl -} - -// ACL deletes request for Access Control List. -func (dsl *DeleteDSL) ACL(aclName string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &acl.AccessLists_Acl{ - AclName: aclName, - }) - return dsl -} - -// L4Features deletes request for the L4Features. -func (dsl *DeleteDSL) L4Features() vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l4.L4Features{}) - return dsl -} - -// AppNamespace delets request for the Application Namespaces List. -func (dsl *DeleteDSL) AppNamespace(id string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l4.AppNamespaces_AppNamespace{ - NamespaceId: id, - }) - return dsl -} - -// Arp adds a request to delete an existing VPP L3 ARP entry. -func (dsl *DeleteDSL) Arp(ifaceName string, ipAddr string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l3.ArpTable_ArpEntry{ - Interface: ifaceName, - IpAddress: ipAddr, - }) - return dsl -} - -// ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces -func (dsl *DeleteDSL) ProxyArpInterfaces(label string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l3.ProxyArpInterfaces_InterfaceList{ - Label: label, - }) - return dsl -} - -// ProxyArpRanges adds a request to delete an existing VPP L3 proxy ARP ranges -func (dsl *DeleteDSL) ProxyArpRanges(label string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &l3.ProxyArpRanges_RangeList{ - Label: label, - }) - return dsl -} - -// StnRule adds request to delete Stn rule. -func (dsl *DeleteDSL) StnRule(name string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &stn.STN_Rule{ - RuleName: name, - }) - return dsl -} - -// NAT44Global adds a request to remove global configuration for NAT44 -func (dsl *DeleteDSL) NAT44Global() vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &nat.Nat44Global{}) - return dsl -} - -// NAT44DNat adds a request to delete a DNAT configuration -func (dsl *DeleteDSL) NAT44DNat(label string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &nat.Nat44DNat_DNatConfig{ - Label: label, - }) - return dsl -} - -// IPSecSA adds request to delete a Security Association -func (dsl *DeleteDSL) IPSecSA(name string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &ipsec.SecurityAssociations_SA{ - Name: name, - }) - return dsl -} - -// IPSecSPD adds request to delete a Security Policy Database -func (dsl *DeleteDSL) IPSecSPD(name string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &ipsec.SecurityPolicyDatabases_SPD{ - Name: name, - }) - return dsl -} - -// IPSecTunnel adds request to delete a IPSec tunnel -func (dsl *DeleteDSL) IPSecTunnel(name string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &ipsec.TunnelInterfaces_Tunnel{ - Name: name, - }) - return dsl -} - -// PuntSocketDeregister adds request to de-register an existing punt to host entry -func (dsl *DeleteDSL) PuntSocketDeregister(name string) vppclient.DeleteDSL { - dsl.parent.del = append(dsl.parent.del, &punt.Punt{ - Name: name, - }) - return dsl -} - -// Put enables creating Interface/BD... -func (dsl *DeleteDSL) Put() vppclient.PutDSL { - return &PutDSL{dsl.parent} -} - -// Send propagates changes to the channels. -func (dsl *DeleteDSL) Send() vppclient.Reply { - return dsl.parent.Send() -} - -// Send propagates changes to the channels. -func (dsl *DataChangeDSL) Send() vppclient.Reply { - var wasErr error - - // Prepare requests with data todo can be scalable - delRequest := getRequestFromData(dsl.del) - putRequest := getRequestFromData(dsl.put) - - ctx := context.Background() - - if _, err := dsl.client.Del(ctx, delRequest); err != nil { - wasErr = err - } - if _, err := dsl.client.Put(ctx, putRequest); err != nil { - wasErr = err - } - - return &Reply{wasErr} -} - -func getRequestFromData(data []proto.Message) *rpc.DataRequest { - request := &rpc.DataRequest{} - for _, item := range data { - switch typedItem := item.(type) { - case *acl.AccessLists_Acl: - request.AccessLists = append(request.AccessLists, typedItem) - case *interfaces.Interfaces_Interface: - request.Interfaces = append(request.Interfaces, typedItem) - case *ipsec.SecurityPolicyDatabases_SPD: - request.SPDs = append(request.SPDs, typedItem) - case *ipsec.SecurityAssociations_SA: - request.SAs = append(request.SAs, typedItem) - case *ipsec.TunnelInterfaces_Tunnel: - request.Tunnels = append(request.Tunnels, typedItem) - case *bfd.SingleHopBFD_Session: - request.BfdSessions = append(request.BfdSessions, typedItem) - case *bfd.SingleHopBFD_Key: - request.BfdAuthKeys = append(request.BfdAuthKeys, typedItem) - case *bfd.SingleHopBFD_EchoFunction: - request.BfdEchoFunction = typedItem - case *l2.BridgeDomains_BridgeDomain: - request.BridgeDomains = append(request.BridgeDomains, typedItem) - case *l2.FibTable_FibEntry: - request.FIBs = append(request.FIBs, typedItem) - case *l2.XConnectPairs_XConnectPair: - request.XCons = append(request.XCons, typedItem) - case *l3.StaticRoutes_Route: - request.StaticRoutes = append(request.StaticRoutes, typedItem) - case *l3.ArpTable_ArpEntry: - request.ArpEntries = append(request.ArpEntries, typedItem) - case *l3.ProxyArpInterfaces_InterfaceList: - request.ProxyArpInterfaces = append(request.ProxyArpInterfaces, typedItem) - case *l3.ProxyArpRanges_RangeList: - request.ProxyArpRanges = append(request.ProxyArpRanges, typedItem) - case *l4.L4Features: - request.L4Feature = typedItem - case *l4.AppNamespaces_AppNamespace: - request.ApplicationNamespaces = append(request.ApplicationNamespaces, typedItem) - case *stn.STN_Rule: - request.StnRules = append(request.StnRules, typedItem) - case *nat.Nat44Global: - request.NatGlobal = typedItem - case *nat.Nat44DNat_DNatConfig: - request.DNATs = append(request.DNATs, typedItem) - case *punt.Punt: - request.Punts = append(request.Punts, typedItem) - case *linuxIf.LinuxInterfaces_Interface: - request.LinuxInterfaces = append(request.LinuxInterfaces, typedItem) - case *linuxL3.LinuxStaticArpEntries_ArpEntry: - request.LinuxArpEntries = append(request.LinuxArpEntries, typedItem) - case *linuxL3.LinuxStaticRoutes_Route: - request.LinuxRoutes = append(request.LinuxRoutes, typedItem) - default: - logrus.DefaultLogger().Warnf("Unsupported data for GRPC request: %v", typedItem) - } - } - - return request -} - -// Reply enables waiting for the reply and getting result (success/error). -type Reply struct { - err error -} - -// ReceiveReply returns error or nil. -func (dsl Reply) ReceiveReply() error { - return dsl.err -} diff --git a/clientv1/vpp/grpcadapter/data_dump_grpc.go b/clientv1/vpp/grpcadapter/data_dump_grpc.go deleted file mode 100644 index 59af8145b5..0000000000 --- a/clientv1/vpp/grpcadapter/data_dump_grpc.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcadapter - -import ( - "context" - - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - - "github.com/gogo/protobuf/proto" - "github.com/ligato/vpp-agent/clientv1/vpp" - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" -) - -// NewDataDumpDSL is a constructor -func NewDataDumpDSL(client rpc.DataDumpServiceClient) *DataDumpDSL { - return &DataDumpDSL{ - client: client, - } -} - -// DataDumpDSL is used to conveniently assign all the data that are needed for the Data read. -// This is an implementation of Domain Specific Language (DSL) for a change of the VPP configuration. -type DataDumpDSL struct { - client rpc.DataDumpServiceClient - dump []proto.Message -} - -// DumpDSL allows to read the configuration of default plugins based on grpc requests. -type DumpDSL struct { - parent *DataDumpDSL -} - -// Dump enables reading Interface/BD... -func (dsl *DataDumpDSL) Dump() vppclient.DumpDSL { - return &DumpDSL{dsl} -} - -// ACLs adds a request to read an existing VPP access lists -func (dsl *DumpDSL) ACLs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &acl.AccessLists_Acl{}) - return dsl -} - -// Interfaces adds a request to read an existing VPP interfaces -func (dsl *DumpDSL) Interfaces() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &interfaces.Interfaces_Interface{}) - return dsl -} - -// IPSecSPDs adds a request to read an existing IPSec SPDs -func (dsl *DumpDSL) IPSecSPDs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &ipsec.SecurityPolicyDatabases_SPD{}) - return dsl -} - -// IPSecSAs adds a request to read an existing IPSec SAs -func (dsl *DumpDSL) IPSecSAs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &ipsec.SecurityAssociations_SA{}) - return dsl -} - -// IPSecTunnels adds a request to read an existing IPSec tunnels -func (dsl *DumpDSL) IPSecTunnels() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &ipsec.TunnelInterfaces_Tunnel{}) - return dsl -} - -// BDs adds a request to read an existing bridge domains -func (dsl *DumpDSL) BDs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &l2.BridgeDomains_BridgeDomain{}) - return dsl -} - -// FIBs adds a request to read an existing FIBs -func (dsl *DumpDSL) FIBs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &l2.FibTable_FibEntry{}) - return dsl -} - -// XConnects adds a request to read an existing cross connects -func (dsl *DumpDSL) XConnects() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &l2.XConnectPairs_XConnectPair{}) - return dsl -} - -// Routes adds a request to read an existing VPP routes -func (dsl *DumpDSL) Routes() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &l3.StaticRoutes_Route{}) - return dsl -} - -// ARPs adds a request to read an existing VPP ARPs -func (dsl *DumpDSL) ARPs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &l3.ArpTable_ArpEntry{}) - return dsl -} - -// PuntRegistrations adds a request to read punt socket registrations. -func (dsl *DumpDSL) PuntRegistrations() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &punt.Punt{}) - return dsl -} - -// LinuxInterfaces adds a request to read an existing linux interfaces -func (dsl *DumpDSL) LinuxInterfaces() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &linuxIf.LinuxInterfaces_Interface{}) - return dsl -} - -// LinuxARPs adds a request to read an existing linux ARPs -func (dsl *DumpDSL) LinuxARPs() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &linuxL3.LinuxStaticArpEntries_ArpEntry{}) - return dsl -} - -// LinuxRoutes adds a request to read an existing linux routes -func (dsl *DumpDSL) LinuxRoutes() vppclient.DumpDSL { - dsl.parent.dump = append(dsl.parent.dump, &linuxL3.LinuxStaticRoutes_Route{}) - return dsl -} - -// Send propagates request -func (dsl *DumpDSL) Send() vppclient.DumpReply { - return dsl.parent.Send() -} - -// Send propagates request -func (dsl *DataDumpDSL) Send() vppclient.DumpReply { - ctx := context.Background() - - rd := &replyData{} - - for _, dataType := range dsl.dump { - request := &rpc.DumpRequest{} - - switch dataType.(type) { - case *acl.AccessLists_Acl: - resp, err := dsl.client.DumpAcls(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.acl = resp.AccessLists - - case *interfaces.Interfaces_Interface: - resp, err := dsl.client.DumpInterfaces(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.ifs = resp.Interfaces - case *ipsec.SecurityPolicyDatabases_SPD: - resp, err := dsl.client.DumpIPSecSPDs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.spds = resp.SPDs - case *ipsec.SecurityAssociations_SA: - resp, err := dsl.client.DumpIPSecSAs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.sas = resp.SAa - case *ipsec.TunnelInterfaces_Tunnel: - resp, err := dsl.client.DumpIPSecTunnels(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.tuns = resp.Tunnels - case *l2.BridgeDomains_BridgeDomain: - resp, err := dsl.client.DumpBDs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.bds = resp.BridgeDomains - case *l2.FibTable_FibEntry: - resp, err := dsl.client.DumpFIBs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.fibs = resp.FIBs - case *l2.XConnectPairs_XConnectPair: - resp, err := dsl.client.DumpXConnects(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.xcs = resp.XCons - case *l3.ArpTable_ArpEntry: - resp, err := dsl.client.DumpARPs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.arps = resp.ArpEntries - case *l3.StaticRoutes_Route: - resp, err := dsl.client.DumpRoutes(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.routes = resp.StaticRoutes - case *punt.Punt: - resp, err := dsl.client.DumpPunt(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.punts = resp.PuntEntries - case *linuxIf.LinuxInterfaces_Interface: - resp, err := dsl.client.DumpLinuxInterfaces(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.linuxIfs = resp.LinuxInterfaces - case *linuxL3.LinuxStaticArpEntries: - resp, err := dsl.client.DumpLinuxARPs(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.linuxArps = resp.LinuxArpEntries - case *linuxL3.LinuxStaticRoutes_Route: - resp, err := dsl.client.DumpLinuxRoutes(ctx, request) - if err != nil { - return &GetReply{&replyData{err: err}} - } - rd.linuxRoutes = resp.LinuxRoutes - } - } - - return &GetReply{rd} -} - -// GetReply enables waiting for the reply and getting result (data list/error). -type GetReply struct { - rd *replyData -} - -// replyData is helper struct implementing ReplyData interface and allows to read typed data from the reply -type replyData struct { - err error - - acl []*acl.AccessLists_Acl - ifs []*interfaces.Interfaces_Interface - spds []*ipsec.SecurityPolicyDatabases_SPD - sas []*ipsec.SecurityAssociations_SA - tuns []*ipsec.TunnelInterfaces_Tunnel - bds []*l2.BridgeDomains_BridgeDomain - fibs []*l2.FibTable_FibEntry - xcs []*l2.XConnectPairs_XConnectPair - routes []*l3.StaticRoutes_Route - arps []*l3.ArpTable_ArpEntry - punts []*rpc.PuntResponse_PuntEntry - linuxIfs []*linuxIf.LinuxInterfaces_Interface - linuxArps []*linuxL3.LinuxStaticArpEntries_ArpEntry - linuxRoutes []*linuxL3.LinuxStaticRoutes_Route -} - -// ReceiveReply returns all the data and error -func (reply *GetReply) ReceiveReply() (vppclient.ReplyData, error) { - return reply.rd, reply.rd.err -} - -// GetACLs returns all access lists from the reply -func (rd *replyData) GetACLs() []*acl.AccessLists_Acl { - return rd.acl -} - -// GetInterfaces returns all the interfaces from the reply -func (rd *replyData) GetInterfaces() []*interfaces.Interfaces_Interface { - return rd.ifs -} - -// GetIPSecSPDs returns all the IPSec SPDs from the reply -func (rd *replyData) GetIPSecSPDs() []*ipsec.SecurityPolicyDatabases_SPD { - return rd.spds -} - -// GetIPSecSAs returns all the IPSec SAa from the reply -func (rd *replyData) GetIPSecSAs() []*ipsec.SecurityAssociations_SA { - return rd.sas -} - -// GetBDs returns all the bridge domains from the reply -func (rd *replyData) GetBDs() []*l2.BridgeDomains_BridgeDomain { - return rd.bds -} - -// GetFIBs returns all the FIB entries from the reply -func (rd *replyData) GetFIBs() []*l2.FibTable_FibEntry { - return rd.fibs -} - -// GetXConnects returns all the XConnects from the reply -func (rd *replyData) GetXConnects() []*l2.XConnectPairs_XConnectPair { - return rd.xcs -} - -// GetARPs returns all the ARPs from the reply -func (rd *replyData) GetARPs() []*l3.ArpTable_ArpEntry { - return rd.arps -} - -// GetRoutes returns all the routes from the reply -func (rd *replyData) GetRoutes() []*l3.StaticRoutes_Route { - return rd.routes -} - -// GetPunts returns all the punt registrations from the reply -func (rd *replyData) GetPunts() []*rpc.PuntResponse_PuntEntry { - return rd.punts -} - -// GetLinuxInterfaces returns all the linux interfaces from the reply -func (rd *replyData) GetLinuxInterfaces() []*linuxIf.LinuxInterfaces_Interface { - return rd.linuxIfs -} - -// GetLinuxARPs returns all the linux ARPs from the reply -func (rd *replyData) GetLinuxARPs() []*linuxL3.LinuxStaticArpEntries_ArpEntry { - return rd.linuxArps -} - -// GetLinuxRoutes returns all the linux routes from the reply -func (rd *replyData) GetLinuxRoutes() []*linuxL3.LinuxStaticRoutes_Route { - return rd.linuxRoutes -} diff --git a/clientv1/vpp/grpcadapter/data_resync_grpc.go b/clientv1/vpp/grpcadapter/data_resync_grpc.go deleted file mode 100644 index 559b4d84e9..0000000000 --- a/clientv1/vpp/grpcadapter/data_resync_grpc.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcadapter - -import ( - "github.com/gogo/protobuf/proto" - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" - "golang.org/x/net/context" -) - -// NewDataResyncDSL is a constructor. -func NewDataResyncDSL(client rpc.DataResyncServiceClient) *DataResyncDSL { - return &DataResyncDSL{client, make([]proto.Message, 0)} -} - -// DataResyncDSL is used to conveniently assign all the data that are needed for the RESYNC. -// This is implementation of Domain Specific Language (DSL) for data RESYNC of the VPP configuration. -type DataResyncDSL struct { - client rpc.DataResyncServiceClient - put []proto.Message -} - -// Interface adds Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) Interface(val *interfaces.Interfaces_Interface) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// BfdSession adds BFD session to the RESYNC request. -func (dsl *DataResyncDSL) BfdSession(val *bfd.SingleHopBFD_Session) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// BfdAuthKeys adds BFD key to the RESYNC request. -func (dsl *DataResyncDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// BfdEchoFunction adds BFD echo function to the RESYNC request. -func (dsl *DataResyncDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// BD adds Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) BD(val *l2.BridgeDomains_BridgeDomain) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// BDFIB adds Bridge Domain to the RESYNC request. -func (dsl *DataResyncDSL) BDFIB(val *l2.FibTable_FibEntry) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// XConnect adds Cross Connect to the RESYNC request. -func (dsl *DataResyncDSL) XConnect(val *l2.XConnectPairs_XConnectPair) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// StaticRoute adds L3 Static Route to the RESYNC request. -func (dsl *DataResyncDSL) StaticRoute(val *l3.StaticRoutes_Route) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// ACL adds Access Control List to the RESYNC request. -func (dsl *DataResyncDSL) ACL(val *acl.AccessLists_Acl) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// L4Features adds L4Features to the RESYNC request. -func (dsl *DataResyncDSL) L4Features(val *l4.L4Features) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// AppNamespace adds Application Namespace to the RESYNC request. -func (dsl *DataResyncDSL) AppNamespace(val *l4.AppNamespaces_AppNamespace) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// Arp adds VPP L3 ARP to the RESYNC request. -func (dsl *DataResyncDSL) Arp(val *l3.ArpTable_ArpEntry) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// ProxyArpInterfaces adds L3 proxy ARP interfaces to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpInterfaces(val *l3.ProxyArpInterfaces_InterfaceList) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// ProxyArpRanges adds L3 proxy ARP ranges to the RESYNC request. -func (dsl *DataResyncDSL) ProxyArpRanges(val *l3.ProxyArpRanges_RangeList) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// StnRule adds Stn rule to the RESYNC request. -func (dsl *DataResyncDSL) StnRule(val *stn.STN_Rule) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// NAT44Global adds a request to RESYNC global configuration for NAT44 -func (dsl *DataResyncDSL) NAT44Global(val *nat.Nat44Global) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// NAT44DNat adds a request to RESYNC a new DNAT configuration -func (dsl *DataResyncDSL) NAT44DNat(val *nat.Nat44DNat_DNatConfig) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// IPSecSA adds request to create a new Security Association -func (dsl *DataResyncDSL) IPSecSA(val *ipsec.SecurityAssociations_SA) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// IPSecSPD adds request to create a new Security Policy Database -func (dsl *DataResyncDSL) IPSecSPD(val *ipsec.SecurityPolicyDatabases_SPD) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// IPSecTunnel adds request to create a new IPSec tunnel -func (dsl *DataResyncDSL) IPSecTunnel(val *ipsec.TunnelInterfaces_Tunnel) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// PuntSocketRegister adds request to RESYNC a new punt to host entry -func (dsl *DataResyncDSL) PuntSocketRegister(val *punt.Punt) vppclient.DataResyncDSL { - dsl.put = append(dsl.put, val) - return dsl -} - -// Send propagates the request to the plugins. It deletes obsolete keys if listKeys() function is not null. -// The listkeys() function is used to list all current keys. -func (dsl *DataResyncDSL) Send() vppclient.Reply { - var wasErr error - - // Prepare requests with data todo can be scalable - resyncRequest := getRequestFromData(dsl.put) - - ctx := context.Background() - - if _, err := dsl.client.Resync(ctx, resyncRequest); err != nil { - wasErr = err - } - - return &Reply{err: wasErr} -} diff --git a/clientv1/vpp/grpcadapter/doc.go b/clientv1/vpp/grpcadapter/doc.go deleted file mode 100644 index 7970927ab5..0000000000 --- a/clientv1/vpp/grpcadapter/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package grpcadapter implements Domain Specific Language (DSL) for resync and -// change of VPP configuration using GRPC client. -package grpcadapter diff --git a/clientv1/vpp/localclient/localclient_api.go b/clientv1/vpp/localclient/localclient_api.go deleted file mode 100644 index 28637716a7..0000000000 --- a/clientv1/vpp/localclient/localclient_api.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package localclient - -import ( - "github.com/ligato/cn-infra/datasync/kvdbsync/local" - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/clientv1/vpp/dbadapter" -) - -// PluginID defines the name of VPP (vppplugin) localclient plugin. -//const PluginID core.PluginName = "DefaultVppPlugins_LOCAL_CLIENT" - -// DataResyncRequest allows creating a RESYNC request using convenient RESYNC -// DSL and sending it locally through go channels (i.e. without using Data Store). -func DataResyncRequest(caller string) vppclient.DataResyncDSL { - return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), - nil /*no need to list anything*/) -} - -// DataChangeRequest allows creating Data Change request(s) using convenient -// Data Change DSL and sending it locally through go channels (i.e. without using -// Data Store). -func DataChangeRequest(caller string) vppclient.DataChangeDSL { - return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) -} diff --git a/clientv1/vpp/remoteclient/remoteclient_api.go b/clientv1/vpp/remoteclient/remoteclient_api.go deleted file mode 100644 index 13058ca24c..0000000000 --- a/clientv1/vpp/remoteclient/remoteclient_api.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remoteclient - -import ( - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/clientv1/vpp" - "github.com/ligato/vpp-agent/clientv1/vpp/dbadapter" - "github.com/ligato/vpp-agent/clientv1/vpp/grpcadapter" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" -) - -// DataResyncRequestDB allows creating a RESYNC request using convenient RESYNC -// DSL and sending it through the provided . -// User of the API does not need to be aware of keys. -// User of the API does not need to delete the obsolete objects/keys -// prior to RESYNC - it is handled by DataResyncDSL. -func DataResyncRequestDB(broker keyval.ProtoBroker) vppclient.DataResyncDSL { - return dbadapter.NewDataResyncDSL(broker.NewTxn(), broker.ListKeys) -} - -// DataChangeRequestDB allows createing Data Change requests using convenient -// Data Change DSL and sending it through the provided . -// User of the API does not need to be aware of keys. -func DataChangeRequestDB(broker keyval.ProtoBroker) vppclient.DataChangeDSL { - return dbadapter.NewDataChangeDSL(broker.NewTxn()) -} - -// DataResyncRequestGRPC allows sending RESYNC requests conveniently. -// User of the API does not need to be aware of keys. -// User of the API does not need to delete the obsolete objects/keys during RESYNC. -func DataResyncRequestGRPC(client rpc.DataResyncServiceClient) vppclient.DataResyncDSL { - return grpcadapter.NewDataResyncDSL(client) -} - -// DataChangeRequestGRPC allows sending Data Change requests conveniently (even without directly using Broker). -// User of the API does not need to be aware of keys. -func DataChangeRequestGRPC(client rpc.DataChangeServiceClient) vppclient.DataChangeDSL { - return grpcadapter.NewDataChangeDSL(client) -} - -// DataDumpRequestGRPC allows sending 'Dump' data requests conveniently (even without directly using Broker). -// User of the API does not need to be aware of keys. -func DataDumpRequestGRPC(client rpc.DataDumpServiceClient) vppclient.DataDumpDSL { - return grpcadapter.NewDataDumpDSL(client) -} diff --git a/clientv2/README.md b/clientv2/README.md new file mode 100644 index 0000000000..f891a9395e --- /dev/null +++ b/clientv2/README.md @@ -0,0 +1,23 @@ +# Client v2 + +Client v2 (i.e. the second version) defines an API that allows to manage +configuration of VPP and Linux plugins. +How the configuration is transported between APIs and the plugins +is fully abstracted from the user. + +The API calls can be split into two groups: + - **resync** applies a given (full) configuration. An existing + configuration, if present, is replaced. The name is an abbreviation + of *resynchronization*. It is used initially and after any system + event that may leave the configuration out-of-sync while the set + of outdated configuration options is impossible to determine locally + (e.g. temporarily lost connection to data store). + - **data change** allows to deliver incremental changes + of a configuration. + +There are two implementations: + - **local client** runs inside the same process as the agent + and delivers configuration through go channels directly + to the plugins. + - **remote client** stores the configuration using the given + `keyval.broker`. diff --git a/clientv2/doc.go b/clientv2/doc.go new file mode 100644 index 0000000000..4f8d4b6083 --- /dev/null +++ b/clientv2/doc.go @@ -0,0 +1,3 @@ +// Package clientv2 provides clients for local and remote management of VPP +// and Linux configuration via VPP Agent plugins. +package clientv2 diff --git a/clientv2/linux/data_change_api.go b/clientv2/linux/data_change_api.go new file mode 100644 index 0000000000..420383fd46 --- /dev/null +++ b/clientv2/linux/data_change_api.go @@ -0,0 +1,184 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linuxclient + +import ( + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + vpp_acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + vpp_interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + vpp_l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + vpp_l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + vpp_stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + vpp_clientv2 "github.com/ligato/vpp-agent/clientv2/vpp" +) + +// DataChangeDSL defines the Domain Specific Language (DSL) for data change +// of both Linux and VPP configuration. +// Use this interface to make your implementation independent of the local +// and any remote client. +// Every DSL statement (apart from Send) returns the receiver (possibly wrapped +// to change the scope of DSL), allowing the calls to be chained together +// conveniently in a single statement. +type DataChangeDSL interface { + // Put initiates a chained sequence of data change DSL statements, declaring + // new configurable objects or changing existing ones, e.g.: + // Put().LinuxInterface(&veth).VppInterface(&afpacket).BD(&BD) ... Send() + // The set of available objects to be created or changed is defined by PutDSL. + Put() PutDSL + + // Delete initiates a chained sequence of data change DSL statements, + // removing existing configurable objects (by name), e.g: + // Delete().LinuxInterface(vethName).VppInterface(afpacketName).BD(BDName) ... Send() + // The set of available objects to be removed is defined by DeleteDSL. + Delete() DeleteDSL + + // Send propagates requested changes to the plugins. + Send() vpp_clientv2.Reply +} + +// PutDSL is a subset of data change DSL statements, used to declare new +// Linux or VPP configuration or change existing one. +type PutDSL interface { + // LinuxInterface adds a request to create or update Linux network interface. + LinuxInterface(val *linux_interfaces.Interface) PutDSL + // LinuxArpEntry adds a request to crete or update Linux ARP entry + LinuxArpEntry(val *linux_l3.ARPEntry) PutDSL + // LinuxRoute adds a request to crete or update Linux route + LinuxRoute(val *linux_l3.Route) PutDSL + + // VppInterface adds a request to create or update VPP network interface. + VppInterface(val *vpp_interfaces.Interface) PutDSL + // ACL adds a request to create or update VPP Access Control List. + ACL(acl *vpp_acl.ACL) PutDSL + /*// BfdSession adds a request to create or update VPP bidirectional + // forwarding detection session. + BfdSession(val *vpp_bfd.SingleHopBFD_Session) PutDSL + // BfdAuthKeys adds a request to create or update VPP bidirectional + // forwarding detection key. + BfdAuthKeys(val *vpp_bfd.SingleHopBFD_Key) PutDSL + // BfdEchoFunction adds a request to create or update VPP bidirectional + // forwarding detection echo function. + BfdEchoFunction(val *vpp_bfd.SingleHopBFD_EchoFunction) PutDSL*/ + // BD adds a request to create or update VPP Bridge Domain. + BD(val *vpp_l2.BridgeDomain) PutDSL + // BDFIB adds a request to create or update VPP L2 Forwarding Information Base. + BDFIB(fib *vpp_l2.FIBEntry) PutDSL + // XConnect adds a request to create or update VPP Cross Connect. + XConnect(val *vpp_l2.XConnectPair) PutDSL + // StaticRoute adds a request to create or update VPP L3 Static Route. + StaticRoute(val *vpp_l3.Route) PutDSL + // Arp adds a request to create or update VPP L3 ARP. + Arp(arp *vpp_l3.ARPEntry) PutDSL + // ProxyArp adds a request to create or update VPP L3 proxy ARP. + ProxyArp(proxyArp *vpp_l3.ProxyARP) PutDSL + // IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. + IPScanNeighbor(ipScanNeigh *vpp_l3.IPScanNeighbor) PutDSL + /*// L4Features adds a request to enable or disable L4 features + L4Features(val *vpp_l4.L4Features) PutDSL + // AppNamespace adds a request to create or update VPP Application namespace + AppNamespace(appNs *vpp_l4.AppNamespaces_AppNamespace) PutDSL*/ + // StnRule adds a request to create or update VPP Stn rule. + StnRule(stn *vpp_stn.Rule) PutDSL + // NAT44Global adds a request to set global configuration for NAT44 + NAT44Global(nat *nat.Nat44Global) PutDSL + // DNAT44 adds a request to create or update DNAT44 configuration + DNAT44(dnat *nat.DNat44) PutDSL + // IPSecSA adds request to create a new Security Association + IPSecSA(sa *ipsec.SecurityAssociation) PutDSL + // IPSecSPD adds request to create a new Security Policy Database + IPSecSPD(spd *ipsec.SecurityPolicyDatabase) PutDSL + // PuntIPRedirect adds request to create or update rule to punt L3 traffic via interface. + PuntIPRedirect(val *punt.IPRedirect) PutDSL + // PuntToHost adds request to create or update rule to punt L4 traffic to a host. + PuntToHost(val *punt.ToHost) PutDSL + + // Delete changes the DSL mode to allow removing an existing configuration. + // See documentation for DataChangeDSL.Delete(). + Delete() DeleteDSL + + // Send propagates requested changes to the plugins. + Send() vpp_clientv2.Reply +} + +// DeleteDSL is a subset of data change DSL statements, used to remove +// existing Linux or VPP configuration. +type DeleteDSL interface { + // LinuxInterface adds a request to delete an existing Linux network + // interface. + LinuxInterface(ifaceName string) DeleteDSL + // LinuxArpEntry adds a request to delete Linux ARP entry + LinuxArpEntry(ifaceName string, ipAddr string) DeleteDSL + // LinuxRoute adds a request to delete Linux route + LinuxRoute(dstAddr, outIfaceName string) DeleteDSL + + // VppInterface adds a request to delete an existing VPP network interface. + VppInterface(ifaceName string) DeleteDSL + // ACL adds a request to delete an existing VPP Access Control List. + ACL(aclName string) DeleteDSL + /*// BfdSession adds a request to delete an existing VPP bidirectional + // forwarding detection session. + BfdSession(bfdSessionIfaceName string) DeleteDSL + // BfdAuthKeys adds a request to delete an existing VPP bidirectional + // forwarding detection key. + BfdAuthKeys(bfdKey string) DeleteDSL + // BfdEchoFunction adds a request to delete an existing VPP bidirectional + // forwarding detection echo function. + BfdEchoFunction(bfdEchoName string) DeleteDSL*/ + // BD adds a request to delete an existing VPP Bridge Domain. + BD(bdName string) DeleteDSL + // FIB adds a request to delete an existing VPP L2 Forwarding Information + // Base. + BDFIB(bdName string, mac string) DeleteDSL + // XConnect adds a request to delete an existing VPP Cross Connect. + XConnect(rxIfaceName string) DeleteDSL + // StaticRoute adds a request to delete an existing VPP L3 Static Route. + StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) DeleteDSL + /*// L4Features adds a request to enable or disable L4 features + L4Features() DeleteDSL + // AppNamespace adds a request to delete VPP Application namespace + // Note: current version does not support application namespace deletion + AppNamespace(id string) DeleteDSL*/ + // Arp adds a request to delete an existing VPP L3 ARP. + Arp(ifaceName string, ipAddr string) DeleteDSL + // ProxyArp adds a request to delete an existing VPP L3 proxy ARP + ProxyArp() DeleteDSL + // IPScanNeighbor adds a request to delete an existing VPP L3 IP Scan Neighbor. + IPScanNeighbor() DeleteDSL + // StnRule adds a request to delete an existing VPP Stn rule. + StnRule(iface, addr string) DeleteDSL + // NAT44Global adds a request to remove global configuration for NAT44 + NAT44Global() DeleteDSL + // DNAT44 adds a request to delete an existing DNAT-44 configuration + DNAT44(label string) DeleteDSL + // IPSecSA adds request to delete a Security Association + IPSecSA(saIndex string) DeleteDSL + // IPSecSPD adds request to delete a Security Policy Database + IPSecSPD(spdIndex string) DeleteDSL + // PuntIPRedirect adds request to delete a rule used to punt L3 traffic via interface. + PuntIPRedirect(l3Proto punt.L3Protocol, txInterface string) DeleteDSL + // PuntToHost adds request to delete a rule used to punt L4 traffic to a host. + PuntToHost(l3Proto punt.L3Protocol, l4Proto punt.L4Protocol, port uint32) DeleteDSL + + // Put changes the DSL mode to allow configuration editing. + // See documentation for DataChangeDSL.Put(). + Put() PutDSL + + // Send propagates requested changes to the plugins. + Send() vpp_clientv2.Reply +} diff --git a/clientv2/linux/data_resync_api.go b/clientv2/linux/data_resync_api.go new file mode 100644 index 0000000000..1aab217053 --- /dev/null +++ b/clientv2/linux/data_resync_api.go @@ -0,0 +1,93 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linuxclient + +import ( + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + vpp_acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + vpp_interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + vpp_l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + vpp_l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + vpp_stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + vpp_clientv2 "github.com/ligato/vpp-agent/clientv2/vpp" +) + +// DataResyncDSL defines the Domain Specific Language (DSL) for data RESYNC +// of both Linux and VPP configuration. +// Use this interface to make your implementation independent of the local +// and any remote client. +// Each method (apart from Send) returns the receiver, allowing the calls +// to be chained together conveniently in a single statement. +type DataResyncDSL interface { + // LinuxInterface adds Linux interface to the RESYNC request. + LinuxInterface(intf *linux_interfaces.Interface) DataResyncDSL + // LinuxInterface adds Linux ARP entry to the RESYNC request. + LinuxArpEntry(arp *linux_l3.ARPEntry) DataResyncDSL + // LinuxInterface adds Linux route to the RESYNC request. + LinuxRoute(route *linux_l3.Route) DataResyncDSL + + // VppInterface adds VPP interface to the RESYNC request. + VppInterface(intf *vpp_interfaces.Interface) DataResyncDSL + // ACL adds VPP Access Control List to the RESYNC request. + ACL(acl *vpp_acl.ACL) DataResyncDSL + /*// BfdSession adds VPP bidirectional forwarding detection session + // to the RESYNC request. + BfdSession(val *vpp_bfd.SingleHopBFD_Session) DataResyncDSL + // BfdAuthKeys adds VPP bidirectional forwarding detection key to the RESYNC + // request. + BfdAuthKeys(val *vpp_bfd.SingleHopBFD_Key) DataResyncDSL + // BfdEchoFunction adds VPP bidirectional forwarding detection echo function + // to the RESYNC request. + BfdEchoFunction(val *vpp_bfd.SingleHopBFD_EchoFunction) DataResyncDSL*/ + // BD adds VPP Bridge Domain to the RESYNC request. + BD(bd *vpp_l2.BridgeDomain) DataResyncDSL + // BDFIB adds VPP L2 FIB to the RESYNC request. + BDFIB(fib *vpp_l2.FIBEntry) DataResyncDSL + // XConnect adds VPP Cross Connect to the RESYNC request. + XConnect(xcon *vpp_l2.XConnectPair) DataResyncDSL + // StaticRoute adds VPP L3 Static Route to the RESYNC request. + StaticRoute(staticRoute *vpp_l3.Route) DataResyncDSL + // Arp adds VPP L3 ARP to the RESYNC request. + Arp(arp *vpp_l3.ARPEntry) DataResyncDSL + // ProxyArp adds L3 proxy ARP interfaces to the RESYNC request. + ProxyArp(proxyArp *vpp_l3.ProxyARP) DataResyncDSL + // IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. + IPScanNeighbor(ipScanNeigh *vpp_l3.IPScanNeighbor) DataResyncDSL + /*// L4Features adds L4 features to the RESYNC request + L4Features(val *vpp_l4.L4Features) DataResyncDSL + // AppNamespace adds VPP Application namespaces to the RESYNC request + AppNamespace(appNs *vpp_l4.AppNamespaces_AppNamespace) DataResyncDSL*/ + // StnRule adds Stn rule to the RESYNC request. + StnRule(stn *vpp_stn.Rule) DataResyncDSL + // NAT44Global adds global NAT44 configuration to the RESYNC request. + NAT44Global(nat *nat.Nat44Global) DataResyncDSL + // DNAT44 adds DNAT44 configuration to the RESYNC request + DNAT44(dnat *nat.DNat44) DataResyncDSL + // IPSecSA adds request to RESYNC a new Security Association + IPSecSA(sa *ipsec.SecurityAssociation) DataResyncDSL + // IPSecSPD adds request to RESYNC a new Security Policy Database + IPSecSPD(spd *ipsec.SecurityPolicyDatabase) DataResyncDSL + // PuntIPRedirect adds request to RESYNC a rule used to punt L3 traffic via interface. + PuntIPRedirect(val *punt.IPRedirect) DataResyncDSL + // PuntToHost adds request to RESYNC a rule used to punt L4 traffic to a host. + PuntToHost(val *punt.ToHost) DataResyncDSL + + // Send propagates the RESYNC request to the plugins. + Send() vpp_clientv2.Reply +} diff --git a/clientv2/linux/dbadapter/data_change_db.go b/clientv2/linux/dbadapter/data_change_db.go new file mode 100644 index 0000000000..d226dcd391 --- /dev/null +++ b/clientv2/linux/dbadapter/data_change_db.go @@ -0,0 +1,394 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbadapter + +import ( + "github.com/ligato/cn-infra/db/keyval" + + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + "github.com/ligato/vpp-agent/clientv2/linux" + "github.com/ligato/vpp-agent/clientv2/vpp" + "github.com/ligato/vpp-agent/clientv2/vpp/dbadapter" +) + +// NewDataChangeDSL returns a new instance of DataChangeDSL which implements +// the data change DSL for both Linux and VPP config (inherits dbadapter +// from vppplugin). +// Transaction is used to propagate changes to plugins. +func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL { + vppDbAdapter := dbadapter.NewDataChangeDSL(txn) + return &DataChangeDSL{txn: txn, vppDataChange: vppDbAdapter} +} + +// DataChangeDSL is an implementation of Domain Specific Language (DSL) +// for changes of both Linux and VPP configuration. +type DataChangeDSL struct { + txn keyval.ProtoTxn + vppDataChange vppclient.DataChangeDSL +} + +// PutDSL implements put operations of data change DSL. +type PutDSL struct { + parent *DataChangeDSL + vppPut vppclient.PutDSL +} + +// DeleteDSL implements delete operations of data change DSL. +type DeleteDSL struct { + parent *DataChangeDSL + vppDelete vppclient.DeleteDSL +} + +// Put initiates a chained sequence of data change DSL statements and declares +// new configurable objects or changes existing ones. +func (dsl *DataChangeDSL) Put() linuxclient.PutDSL { + return &PutDSL{dsl, dsl.vppDataChange.Put()} +} + +// Delete initiates a chained sequence of data change DSL statements +// removing existing configurable objects. +func (dsl *DataChangeDSL) Delete() linuxclient.DeleteDSL { + return &DeleteDSL{dsl, dsl.vppDataChange.Delete()} +} + +// Send propagates requested changes to the plugins. +func (dsl *DataChangeDSL) Send() vppclient.Reply { + return dsl.vppDataChange.Send() +} + +// LinuxInterface adds a request to create or update Linux network interface. +func (dsl *PutDSL) LinuxInterface(val *linux_interfaces.Interface) linuxclient.PutDSL { + dsl.parent.txn.Put(linux_interfaces.InterfaceKey(val.Name), val) + return dsl +} + +// LinuxArpEntry adds a request to create or update Linux ARP entry. +func (dsl *PutDSL) LinuxArpEntry(val *linux_l3.ARPEntry) linuxclient.PutDSL { + dsl.parent.txn.Put(linux_l3.ArpKey(val.Interface, val.IpAddress), val) + return dsl +} + +// LinuxRoute adds a request to create or update Linux route. +func (dsl *PutDSL) LinuxRoute(val *linux_l3.Route) linuxclient.PutDSL { + dsl.parent.txn.Put(linux_l3.RouteKey(val.DstNetwork, val.OutgoingInterface), val) + return dsl +} + +// VppInterface adds a request to create or update VPP network interface. +func (dsl *PutDSL) VppInterface(val *interfaces.Interface) linuxclient.PutDSL { + dsl.vppPut.Interface(val) + return dsl +} + +// ACL adds a request to create or update VPP Access Control List. +func (dsl *PutDSL) ACL(acl *acl.ACL) linuxclient.PutDSL { + dsl.vppPut.ACL(acl) + return dsl +} + +/*// BfdSession adds a request to create or update VPP bidirectional forwarding +// detection session. +func (dsl *PutDSL) BfdSession(val *bfd.SingleHopBFD_Session) linuxclient.PutDSL { + dsl.vppPut.BfdSession(val) + return dsl +} + +// BfdAuthKeys adds a request to create or update VPP bidirectional forwarding +// detection key. +func (dsl *PutDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) linuxclient.PutDSL { + dsl.vppPut.BfdAuthKeys(val) + return dsl +} + +// BfdEchoFunction adds a request to create or update VPP bidirectional forwarding +// detection echo function. +func (dsl *PutDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) linuxclient.PutDSL { + dsl.vppPut.BfdEchoFunction(val) + return dsl +}*/ + +// BD adds a request to create or update VPP Bridge Domain. +func (dsl *PutDSL) BD(val *l2.BridgeDomain) linuxclient.PutDSL { + dsl.vppPut.BD(val) + return dsl +} + +// BDFIB adds a request to create or update VPP L2 Forwarding Information Base. +func (dsl *PutDSL) BDFIB(fib *l2.FIBEntry) linuxclient.PutDSL { + dsl.vppPut.BDFIB(fib) + return dsl +} + +// XConnect adds a request to create or update VPP Cross Connect. +func (dsl *PutDSL) XConnect(val *l2.XConnectPair) linuxclient.PutDSL { + dsl.vppPut.XConnect(val) + return dsl +} + +// StaticRoute adds a request to create or update VPP L3 Static Route. +func (dsl *PutDSL) StaticRoute(val *l3.Route) linuxclient.PutDSL { + dsl.vppPut.StaticRoute(val) + return dsl +} + +// Arp adds a request to create or update VPP L3 ARP. +func (dsl *PutDSL) Arp(arp *l3.ARPEntry) linuxclient.PutDSL { + dsl.vppPut.Arp(arp) + return dsl +} + +// ProxyArp adds a request to create or update VPP L3 proxy ARP. +func (dsl *PutDSL) ProxyArp(proxyArp *l3.ProxyARP) linuxclient.PutDSL { + dsl.vppPut.ProxyArp(proxyArp) + return dsl +} + +// IPScanNeighbor adds a request to delete an existing VPP L3 IP Scan Neighbor. +func (dsl *PutDSL) IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) linuxclient.PutDSL { + dsl.vppPut.IPScanNeighbor(ipScanNeigh) + return dsl +} + +/*// L4Features adds a request to enable or disable L4 features +func (dsl *PutDSL) L4Features(val *l4.L4Features) linuxclient.PutDSL { + dsl.vppPut.L4Features(val) + return dsl +} + +// AppNamespace adds a request to create or update VPP Application namespace +func (dsl *PutDSL) AppNamespace(appNs *l4.AppNamespaces_AppNamespace) linuxclient.PutDSL { + dsl.vppPut.AppNamespace(appNs) + return dsl +}*/ + +// StnRule adds a request to create or update VPP Stn rule. +func (dsl *PutDSL) StnRule(stn *stn.Rule) linuxclient.PutDSL { + dsl.vppPut.StnRule(stn) + return dsl +} + +// NAT44Global adds a request to set global configuration for NAT44 +func (dsl *PutDSL) NAT44Global(nat44 *nat.Nat44Global) linuxclient.PutDSL { + dsl.vppPut.NAT44Global(nat44) + return dsl +} + +// DNAT44 adds a request to create or update DNAT44 configuration +func (dsl *PutDSL) DNAT44(nat44 *nat.DNat44) linuxclient.PutDSL { + dsl.vppPut.DNAT44(nat44) + return dsl +} + +// IPSecSA adds request to create a new Security Association +func (dsl *PutDSL) IPSecSA(sa *ipsec.SecurityAssociation) linuxclient.PutDSL { + dsl.vppPut.IPSecSA(sa) + return dsl +} + +// IPSecSPD adds request to create a new Security Policy Database +func (dsl *PutDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabase) linuxclient.PutDSL { + dsl.vppPut.IPSecSPD(spd) + return dsl +} + +// PuntIPRedirect adds request to create or update rule to punt L3 traffic via interface. +func (dsl *PutDSL) PuntIPRedirect(val *punt.IPRedirect) linuxclient.PutDSL { + dsl.vppPut.PuntIPRedirect(val) + return dsl +} + +// PuntToHost adds request to create or update rule to punt L4 traffic to a host. +func (dsl *PutDSL) PuntToHost(val *punt.ToHost) linuxclient.PutDSL { + dsl.vppPut.PuntToHost(val) + return dsl +} + +// Delete changes the DSL mode to allow removal of an existing configuration. +func (dsl *PutDSL) Delete() linuxclient.DeleteDSL { + return &DeleteDSL{dsl.parent, dsl.vppPut.Delete()} +} + +// Send propagates requested changes to the plugins. +func (dsl *PutDSL) Send() vppclient.Reply { + return dsl.parent.Send() +} + +// LinuxInterface adds a request to delete an existing Linux network +// interface. +func (dsl *DeleteDSL) LinuxInterface(interfaceName string) linuxclient.DeleteDSL { + dsl.parent.txn.Delete(linux_interfaces.InterfaceKey(interfaceName)) + return dsl +} + +// LinuxArpEntry adds a request to delete Linux ARP entry. +func (dsl *DeleteDSL) LinuxArpEntry(ifaceName string, ipAddr string) linuxclient.DeleteDSL { + dsl.parent.txn.Delete(linux_l3.ArpKey(ifaceName, ipAddr)) + return dsl +} + +// LinuxRoute adds a request to delete Linux route. +func (dsl *DeleteDSL) LinuxRoute(dstAddr, outIfaceName string) linuxclient.DeleteDSL { + dsl.parent.txn.Delete(linux_l3.RouteKey(dstAddr, outIfaceName)) + return dsl +} + +// VppInterface adds a request to delete an existing VPP network interface. +func (dsl *DeleteDSL) VppInterface(ifaceName string) linuxclient.DeleteDSL { + dsl.vppDelete.Interface(ifaceName) + return dsl +} + +// ACL adds a request to delete an existing VPP Access Control List. +func (dsl *DeleteDSL) ACL(aclName string) linuxclient.DeleteDSL { + dsl.vppDelete.ACL(aclName) + return dsl +} + +/*// BfdSession adds a request to delete an existing VPP bidirectional forwarding +// detection session. +func (dsl *DeleteDSL) BfdSession(bfdSessionIfaceName string) linuxclient.DeleteDSL { + dsl.vppDelete.BfdSession(bfdSessionIfaceName) + return dsl +} + +// BfdAuthKeys adds a request to delete an existing VPP bidirectional forwarding +// detection key. +func (dsl *DeleteDSL) BfdAuthKeys(bfdKey string) linuxclient.DeleteDSL { + dsl.vppDelete.BfdAuthKeys(bfdKey) + return dsl +} + +// BfdEchoFunction adds a request to delete an existing VPP bidirectional +// forwarding detection echo function. +func (dsl *DeleteDSL) BfdEchoFunction(bfdEchoName string) linuxclient.DeleteDSL { + dsl.vppDelete.BfdEchoFunction(bfdEchoName) + return dsl +}*/ + +// BD adds a request to delete an existing VPP Bridge Domain. +func (dsl *DeleteDSL) BD(bdName string) linuxclient.DeleteDSL { + dsl.vppDelete.BD(bdName) + return dsl +} + +// BDFIB adds a request to delete an existing VPP L2 Forwarding Information Base. +func (dsl *DeleteDSL) BDFIB(bdName string, mac string) linuxclient.DeleteDSL { + dsl.vppDelete.BDFIB(bdName, mac) + return dsl +} + +// XConnect adds a request to delete an existing VPP Cross Connect. +func (dsl *DeleteDSL) XConnect(rxIfaceName string) linuxclient.DeleteDSL { + dsl.vppDelete.XConnect(rxIfaceName) + return dsl +} + +// StaticRoute adds a request to delete an existing VPP L3 Static Route. +func (dsl *DeleteDSL) StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) linuxclient.DeleteDSL { + dsl.vppDelete.StaticRoute(vrf, dstAddr, nextHopAddr) + return dsl +} + +// IPScanNeighbor adds a request to delete an existing VPP L3 IP Scan Neighbor. +func (dsl *DeleteDSL) IPScanNeighbor() linuxclient.DeleteDSL { + dsl.vppDelete.IPScanNeighbor() + return dsl +} + +// Arp adds a request to delete an existing VPP L3 ARP. +func (dsl *DeleteDSL) Arp(ifaceName string, ipAddr string) linuxclient.DeleteDSL { + dsl.vppDelete.Arp(ifaceName, ipAddr) + return dsl +} + +// ProxyArp adds a request to delete an existing VPP L3 proxy ARP. +func (dsl *DeleteDSL) ProxyArp() linuxclient.DeleteDSL { + dsl.vppDelete.ProxyArp() + return dsl +} + +/*// L4Features adds a request to enable or disable L4 features +func (dsl *DeleteDSL) L4Features() linuxclient.DeleteDSL { + dsl.vppDelete.L4Features() + return dsl +} + +// AppNamespace adds a request to delete VPP Application namespace +// Note: current version does not support application namespace deletion +func (dsl *DeleteDSL) AppNamespace(id string) linuxclient.DeleteDSL { + dsl.vppDelete.AppNamespace(id) + return dsl +}*/ + +// StnRule adds a request to delete an existing VPP Stn rule. +func (dsl *DeleteDSL) StnRule(iface, addr string) linuxclient.DeleteDSL { + dsl.vppDelete.StnRule(iface, addr) + return dsl +} + +// NAT44Global adds a request to remove global configuration for NAT44 +func (dsl *DeleteDSL) NAT44Global() linuxclient.DeleteDSL { + dsl.vppDelete.NAT44Global() + return dsl +} + +// DNAT44 adds a request to delete an existing DNAT44 configuration +func (dsl *DeleteDSL) DNAT44(label string) linuxclient.DeleteDSL { + dsl.vppDelete.DNAT44(label) + return dsl +} + +// IPSecSA adds request to delete a Security Association +func (dsl *DeleteDSL) IPSecSA(saIndex string) linuxclient.DeleteDSL { + dsl.vppDelete.IPSecSA(saIndex) + return dsl +} + +// IPSecSPD adds request to delete a Security Policy Database +func (dsl *DeleteDSL) IPSecSPD(spdIndex string) linuxclient.DeleteDSL { + dsl.vppDelete.IPSecSPD(spdIndex) + return dsl +} + +// PuntIPRedirect adds request to delete a rule used to punt L3 traffic via interface. +func (dsl *DeleteDSL) PuntIPRedirect(l3Proto punt.L3Protocol, txInterface string) linuxclient.DeleteDSL { + dsl.vppDelete.PuntIPRedirect(l3Proto, txInterface) + return dsl +} + +// PuntToHost adds request to delete a rule used to punt L4 traffic to a host. +func (dsl *DeleteDSL) PuntToHost(l3Proto punt.L3Protocol, l4Proto punt.L4Protocol, port uint32) linuxclient.DeleteDSL { + dsl.vppDelete.PuntToHost(l3Proto, l4Proto, port) + return dsl +} + +// Put changes the DSL mode to allow configuration editing. +func (dsl *DeleteDSL) Put() linuxclient.PutDSL { + return &PutDSL{dsl.parent, dsl.vppDelete.Put()} +} + +// Send propagates requested changes to the plugins. +func (dsl *DeleteDSL) Send() vppclient.Reply { + return dsl.parent.Send() +} diff --git a/clientv2/linux/dbadapter/data_resync_db.go b/clientv2/linux/dbadapter/data_resync_db.go new file mode 100644 index 0000000000..3d496ebc69 --- /dev/null +++ b/clientv2/linux/dbadapter/data_resync_db.go @@ -0,0 +1,256 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbadapter + +import ( + "github.com/ligato/cn-infra/db/keyval" + + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + "github.com/ligato/vpp-agent/clientv2/linux" + "github.com/ligato/vpp-agent/clientv2/vpp" + "github.com/ligato/vpp-agent/clientv2/vpp/dbadapter" +) + +// NewDataResyncDSL returns a new instance of DataResyncDSL which implements +// the data RESYNC DSL for both Linux and VPP config (inherits dbadapter +// from vppplugin). +// Transaction is used to propagate changes to plugins. +// Function is used to list keys with already existing configuration. +func NewDataResyncDSL(txn keyval.ProtoTxn, listKeys func(prefix string) (keyval.ProtoKeyIterator, error)) *DataResyncDSL { + vppDataResync := dbadapter.NewDataResyncDSL(txn, listKeys) + return &DataResyncDSL{txn, []string{}, listKeys, vppDataResync} +} + +// DataResyncDSL is an implementation of Domain Specific Language (DSL) for data +// RESYNC of both Linux and VPP configuration. +type DataResyncDSL struct { + txn keyval.ProtoTxn + txnKeys []string + listKeys func(prefix string) (keyval.ProtoKeyIterator, error) + + vppDataResync vppclient.DataResyncDSL +} + +// LinuxInterface adds Linux interface to the RESYNC request. +func (dsl *DataResyncDSL) LinuxInterface(val *linux_interfaces.Interface) linuxclient.DataResyncDSL { + key := linux_interfaces.InterfaceKey(val.Name) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// LinuxArpEntry adds Linux ARP entry to the RESYNC request. +func (dsl *DataResyncDSL) LinuxArpEntry(val *linux_l3.ARPEntry) linuxclient.DataResyncDSL { + key := linux_l3.ArpKey(val.Interface, val.IpAddress) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// LinuxRoute adds Linux route to the RESYNC request. +func (dsl *DataResyncDSL) LinuxRoute(val *linux_l3.Route) linuxclient.DataResyncDSL { + key := linux_l3.RouteKey(val.DstNetwork, val.OutgoingInterface) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// VppInterface adds VPP interface to the RESYNC request. +func (dsl *DataResyncDSL) VppInterface(intf *interfaces.Interface) linuxclient.DataResyncDSL { + dsl.vppDataResync.Interface(intf) + return dsl +} + +// ACL adds VPP Access Control List to the RESYNC request. +func (dsl *DataResyncDSL) ACL(acl *acl.ACL) linuxclient.DataResyncDSL { + dsl.vppDataResync.ACL(acl) + return dsl +} + +/*// BfdSession adds VPP bidirectional forwarding detection session +// to the RESYNC request. +func (dsl *DataResyncDSL) BfdSession(val *bfd.SingleHopBFD_Session) linuxclient.DataResyncDSL { + dsl.vppDataResync.BfdSession(val) + return dsl +} + +// BfdAuthKeys adds VPP bidirectional forwarding detection key to the RESYNC +// request. +func (dsl *DataResyncDSL) BfdAuthKeys(val *bfd.SingleHopBFD_Key) linuxclient.DataResyncDSL { + dsl.vppDataResync.BfdAuthKeys(val) + return dsl +} + +// BfdEchoFunction adds VPP bidirectional forwarding detection echo function +// to the RESYNC request. +func (dsl *DataResyncDSL) BfdEchoFunction(val *bfd.SingleHopBFD_EchoFunction) linuxclient.DataResyncDSL { + dsl.vppDataResync.BfdEchoFunction(val) + return dsl +}*/ + +// BD adds VPP Bridge Domain to the RESYNC request. +func (dsl *DataResyncDSL) BD(bd *l2.BridgeDomain) linuxclient.DataResyncDSL { + dsl.vppDataResync.BD(bd) + return dsl +} + +// BDFIB adds VPP L2 FIB to the RESYNC request. +func (dsl *DataResyncDSL) BDFIB(fib *l2.FIBEntry) linuxclient.DataResyncDSL { + dsl.vppDataResync.BDFIB(fib) + return dsl +} + +// XConnect adds VPP Cross Connect to the RESYNC request. +func (dsl *DataResyncDSL) XConnect(xcon *l2.XConnectPair) linuxclient.DataResyncDSL { + dsl.vppDataResync.XConnect(xcon) + return dsl +} + +// StaticRoute adds VPP L3 Static Route to the RESYNC request. +func (dsl *DataResyncDSL) StaticRoute(staticRoute *l3.Route) linuxclient.DataResyncDSL { + dsl.vppDataResync.StaticRoute(staticRoute) + return dsl +} + +// Arp adds VPP L3 ARP to the RESYNC request. +func (dsl *DataResyncDSL) Arp(arp *l3.ARPEntry) linuxclient.DataResyncDSL { + dsl.vppDataResync.Arp(arp) + return dsl +} + +// ProxyArp adds L3 proxy ARP to the RESYNC request. +func (dsl *DataResyncDSL) ProxyArp(proxyArp *l3.ProxyARP) linuxclient.DataResyncDSL { + dsl.vppDataResync.ProxyArp(proxyArp) + return dsl +} + +// IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. +func (dsl *DataResyncDSL) IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) linuxclient.DataResyncDSL { + dsl.vppDataResync.IPScanNeighbor(ipScanNeigh) + + return dsl +} + +/*// L4Features adds L4 features to the RESYNC request +func (dsl *DataResyncDSL) L4Features(val *l4.L4Features) linuxclient.DataResyncDSL { + dsl.vppDataResync.L4Features(val) + return dsl +} + +// AppNamespace adds VPP Application namespaces to the RESYNC request +func (dsl *DataResyncDSL) AppNamespace(appNs *l4.AppNamespaces_AppNamespace) linuxclient.DataResyncDSL { + dsl.vppDataResync.AppNamespace(appNs) + return dsl +}*/ + +// StnRule adds Stn rule to the RESYNC request. +func (dsl *DataResyncDSL) StnRule(stn *stn.Rule) linuxclient.DataResyncDSL { + dsl.vppDataResync.StnRule(stn) + return dsl +} + +// NAT44Global adds global NAT44 configuration to the RESYNC request. +func (dsl *DataResyncDSL) NAT44Global(nat44 *nat.Nat44Global) linuxclient.DataResyncDSL { + dsl.vppDataResync.NAT44Global(nat44) + return dsl +} + +// DNAT44 adds DNAT44 configuration to the RESYNC request +func (dsl *DataResyncDSL) DNAT44(nat44 *nat.DNat44) linuxclient.DataResyncDSL { + dsl.vppDataResync.DNAT44(nat44) + return dsl +} + +// IPSecSA adds request to RESYNC a new Security Association +func (dsl *DataResyncDSL) IPSecSA(sa *ipsec.SecurityAssociation) linuxclient.DataResyncDSL { + dsl.vppDataResync.IPSecSA(sa) + return dsl +} + +// IPSecSPD adds request to RESYNC a new Security Policy Database +func (dsl *DataResyncDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabase) linuxclient.DataResyncDSL { + dsl.vppDataResync.IPSecSPD(spd) + return dsl +} + +// PuntIPRedirect adds request to RESYNC a rule used to punt L3 traffic via interface. +func (dsl *DataResyncDSL) PuntIPRedirect(val *punt.IPRedirect) linuxclient.DataResyncDSL { + dsl.vppDataResync.PuntIPRedirect(val) + return dsl +} + +// PuntToHost adds request to RESYNC a rule used to punt L4 traffic to a host. +func (dsl *DataResyncDSL) PuntToHost(val *punt.ToHost) linuxclient.DataResyncDSL { + dsl.vppDataResync.PuntToHost(val) + return dsl +} + +// AppendKeys is a helper function that fills the keySet with values +// pointed to by the iterator . +func appendKeys(keys *keySet, it keyval.ProtoKeyIterator) { + for { + k, _, stop := it.GetNext() + if stop { + break + } + + (*keys)[k] = nil + } +} + +// KeySet is a helper type that reuses map keys to store values as a set. +// The values of the map are nil. +type keySet map[string] /*key*/ interface{} /*nil*/ + +// Send propagates the request to the plugins. +// It deletes obsolete keys if listKeys() (from constructor) function is not nil. +func (dsl *DataResyncDSL) Send() vppclient.Reply { + + for dsl.listKeys != nil { + toBeDeleted := keySet{} + + // fill all known keys associated with the Linux network configuration: + keys, err := dsl.listKeys(interfaces.ModelInterface.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + + // remove keys that are part of the transaction + for _, txnKey := range dsl.txnKeys { + delete(toBeDeleted, txnKey) + } + + for delKey := range toBeDeleted { + dsl.txn.Delete(delKey) + } + + break + } + + return dsl.vppDataResync.Send() +} diff --git a/clientv1/linux/dbadapter/doc.go b/clientv2/linux/dbadapter/doc.go similarity index 100% rename from clientv1/linux/dbadapter/doc.go rename to clientv2/linux/dbadapter/doc.go diff --git a/clientv1/linux/doc.go b/clientv2/linux/doc.go similarity index 100% rename from clientv1/linux/doc.go rename to clientv2/linux/doc.go diff --git a/clientv1/linux/localclient/doc.go b/clientv2/linux/localclient/doc.go similarity index 100% rename from clientv1/linux/localclient/doc.go rename to clientv2/linux/localclient/doc.go diff --git a/clientv2/linux/localclient/localclient_api.go b/clientv2/linux/localclient/localclient_api.go new file mode 100644 index 0000000000..c404034249 --- /dev/null +++ b/clientv2/linux/localclient/localclient_api.go @@ -0,0 +1,38 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package localclient + +import ( + "github.com/ligato/cn-infra/datasync/kvdbsync/local" + "github.com/ligato/vpp-agent/clientv2/linux" + "github.com/ligato/vpp-agent/clientv2/linux/dbadapter" +) + +// PluginID defines the name of Linux localclient plugin. +//const PluginID core.PluginName = "LinuxPlugin_LOCAL_CLIENT" + +// DataResyncRequest allows creating a RESYNC request using convenient RESYNC +// DSL and sending it locally through go channels (i.e. without using Data Store). +func DataResyncRequest(caller string) linuxclient.DataResyncDSL { + return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), + nil /*no need to list anything*/) +} + +// DataChangeRequest allows creating Data Change request(s) using convenient +// Data Change DSL and sending it locally through go channels (i.e. without using +// Data Store). +func DataChangeRequest(caller string) linuxclient.DataChangeDSL { + return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) +} diff --git a/clientv1/linux/localclient/localclient_init.go b/clientv2/linux/localclient/localclient_init.go similarity index 94% rename from clientv1/linux/localclient/localclient_init.go rename to clientv2/linux/localclient/localclient_init.go index f694af6aff..afc9921bf0 100644 --- a/clientv1/linux/localclient/localclient_init.go +++ b/clientv2/linux/localclient/localclient_init.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. +// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/clientv1/linux/remoteclient/doc.go b/clientv2/linux/remoteclient/doc.go similarity index 100% rename from clientv1/linux/remoteclient/doc.go rename to clientv2/linux/remoteclient/doc.go diff --git a/clientv2/linux/remoteclient/remoteclient_api.go b/clientv2/linux/remoteclient/remoteclient_api.go new file mode 100644 index 0000000000..9c51246e7a --- /dev/null +++ b/clientv2/linux/remoteclient/remoteclient_api.go @@ -0,0 +1,37 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remoteclient + +import ( + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/vpp-agent/clientv2/linux" + "github.com/ligato/vpp-agent/clientv2/linux/dbadapter" +) + +// DataResyncRequestDB allows creating a RESYNC request, using convenient RESYNC +// DSL and sending it through the provided . +// User of the API does not need to be aware of keys. +// User of the API does not need to delete the obsolete objects/keys +// prior to RESYNC - it is handled by DataResyncDSL. +func DataResyncRequestDB(broker keyval.ProtoBroker) linuxclient.DataResyncDSL { + return dbadapter.NewDataResyncDSL(broker.NewTxn(), broker.ListKeys) +} + +// DataChangeRequestDB allows creating Data Change requests, using convenient +// Data Change DSL and sending it through the provided . +// User of the API does not need to be aware of keys. +func DataChangeRequestDB(broker keyval.ProtoBroker) linuxclient.DataChangeDSL { + return dbadapter.NewDataChangeDSL(broker.NewTxn()) +} diff --git a/clientv2/vpp/data_change_api.go b/clientv2/vpp/data_change_api.go new file mode 100644 index 0000000000..fc8185ed4e --- /dev/null +++ b/clientv2/vpp/data_change_api.go @@ -0,0 +1,147 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppclient + +import ( + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" +) + +// DataChangeDSL defines Domain Specific Language (DSL) for data change. +// of the VPP configuration. +// Use this interface to make your implementation independent of the local +// and any remote client. +// Every DSL statement (apart from Send) returns the receiver (possibly wrapped +// to change the scope of DSL), allowing the calls to be chained together +// conveniently in a single statement. +type DataChangeDSL interface { + // Put initiates a chained sequence of data change DSL statements, declaring + // new or changing existing configurable objects, e.g.: + // Put().Interface(&memif).XConnect(&xconnect).BD(&BD) ... Send() + // The set of available objects to be created or changed is defined by PutDSL. + Put() PutDSL + + // Delete initiates a chained sequence of data change DSL statements, + // removing existing configurable objects (by name), e.g.: + // Delete().Interface(memifName).XConnect(xconnectName).BD(BDName) ... Send() + // The set of available objects to be removed is defined by DeleteDSL. + Delete() DeleteDSL + + // Send propagates requested changes to the plugins. + Send() Reply +} + +// PutDSL is a subset of data change DSL statements, used to declare new +// VPP configuration or to change an existing one. +type PutDSL interface { + // Interface adds a request to create or update VPP network interface. + Interface(val *interfaces.Interface) PutDSL + // ACL adds a request to create or update VPP Access Control List. + ACL(acl *acl.ACL) PutDSL + // BD adds a request to create or update VPP Bridge Domain. + BD(val *l2.BridgeDomain) PutDSL + // BDFIB adds a request to create or update VPP L2 Forwarding Information Base. + BDFIB(fib *l2.FIBEntry) PutDSL + // XConnect adds a request to create or update VPP Cross Connect. + XConnect(val *l2.XConnectPair) PutDSL + // StaticRoute adds a request to create or update VPP L3 Static Route. + StaticRoute(val *l3.Route) PutDSL + // Arp adds a request to create or update VPP L3 ARP. + Arp(arp *l3.ARPEntry) PutDSL + // ProxyArpInterfaces adds a request to create or update VPP L3 proxy ARP interfaces + ProxyArp(proxyArp *l3.ProxyARP) PutDSL + // IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. + IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) PutDSL + // StnRule adds a request to create or update Stn rule. + StnRule(stn *stn.Rule) PutDSL + // NAT44Global adds a request to set global configuration for NAT44 + NAT44Global(nat *nat.Nat44Global) PutDSL + // DNAT44 adds a request to create or update DNAT44 configuration + DNAT44(dnat *nat.DNat44) PutDSL + // IPSecSA adds request to create a new Security Association + IPSecSA(sa *ipsec.SecurityAssociation) PutDSL + // IPSecSPD adds request to create a new Security Policy Database + IPSecSPD(spd *ipsec.SecurityPolicyDatabase) PutDSL + // PuntIPRedirect adds request to create or update rule to punt L3 traffic via interface. + PuntIPRedirect(val *punt.IPRedirect) PutDSL + // PuntToHost adds request to create or update rule to punt L4 traffic to a host. + PuntToHost(val *punt.ToHost) PutDSL + + // Delete changes the DSL mode to allow removal of an existing configuration. + // See documentation for DataChangeDSL.Delete(). + Delete() DeleteDSL + + // Send propagates requested changes to the plugins. + Send() Reply +} + +// DeleteDSL is a subset of data change DSL statements, used to remove +// an existing VPP configuration. +type DeleteDSL interface { + // Interface adds a request to delete an existing VPP network interface. + Interface(ifaceName string) DeleteDSL + // ACL adds a request to delete an existing VPP Access Control List. + ACL(aclName string) DeleteDSL + // BD adds a request to delete an existing VPP Bridge Domain. + BD(bdName string) DeleteDSL + // BDFIB adds a request to delete an existing VPP L2 Forwarding Information + // Base. + BDFIB(bdName string, mac string) DeleteDSL + // XConnect adds a request to delete an existing VPP Cross Connect. + XConnect(rxIfaceName string) DeleteDSL + // StaticRoute adds a request to delete an existing VPP L3 Static Route. + StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) DeleteDSL + // Arp adds a request to delete an existing VPP L3 ARP. + Arp(ifaceName string, ipAddr string) DeleteDSL + // ProxyArpInterfaces adds a request to delete an existing VPP L3 proxy ARP interfaces + ProxyArp() DeleteDSL + // IPScanNeighbor adds a request to delete an existing VPP L3 IP Scan Neighbor. + IPScanNeighbor() DeleteDSL + // StnRule adds a request to delete an existing Stn rule. + StnRule(iface, addr string) DeleteDSL + // NAT44Global adds a request to remove global configuration for NAT44 + NAT44Global() DeleteDSL + // DNAT44 adds a request to delete an existing DNAT44 configuration + DNAT44(label string) DeleteDSL + // IPSecSA adds request to delete a Security Association + IPSecSA(saIndex string) DeleteDSL + // IPSecSPD adds request to delete a Security Policy Database + IPSecSPD(spdIndex string) DeleteDSL + // PuntIPRedirect adds request to delete a rule used to punt L3 traffic via interface. + PuntIPRedirect(l3Proto punt.L3Protocol, txInterface string) DeleteDSL + // PuntToHost adds request to delete a rule used to punt L4 traffic to a host. + PuntToHost(l3Proto punt.L3Protocol, l4Proto punt.L4Protocol, port uint32) DeleteDSL + + // Put changes the DSL mode to allow configuration editing. + // See documentation for DataChangeDSL.Put(). + Put() PutDSL + + // Send propagates requested changes to the plugins. + Send() Reply +} + +// Reply interface allows to wait for a reply to previously called Send() and +// extract the result from it (success/error). +type Reply interface { + // ReceiveReply waits for a reply to previously called Send() and returns + // the result (error or nil). + ReceiveReply() error +} diff --git a/clientv2/vpp/data_resync_api.go b/clientv2/vpp/data_resync_api.go new file mode 100644 index 0000000000..469bdc6452 --- /dev/null +++ b/clientv2/vpp/data_resync_api.go @@ -0,0 +1,70 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppclient + +import ( + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" +) + +// DataResyncDSL defines the Domain Specific Language (DSL) for data RESYNC +// of the VPP configuration. +// Use this interface to make your implementation independent of the local +// and any remote client. +// Each method (apart from Send) returns the receiver, allowing the calls +// to be chained together conveniently in a single statement. +type DataResyncDSL interface { + // Interface adds interface to the RESYNC request. + Interface(intf *interfaces.Interface) DataResyncDSL + // ACL adds Access Control List to the RESYNC request. + ACL(acl *acl.ACL) DataResyncDSL + // BD adds Bridge Domain to the RESYNC request. + BD(bd *l2.BridgeDomain) DataResyncDSL + // BDFIB adds L2 Forwarding Information Base. + BDFIB(fib *l2.FIBEntry) DataResyncDSL + // XConnect adds Cross Connect to the RESYNC request. + XConnect(xcon *l2.XConnectPair) DataResyncDSL + // StaticRoute adds L3 Static Route to the RESYNC request. + StaticRoute(staticRoute *l3.Route) DataResyncDSL + // Arp adds VPP L3 ARP to the RESYNC request. + Arp(arp *l3.ARPEntry) DataResyncDSL + // ProxyArp adds L3 proxy ARP interfaces to the RESYNC request. + ProxyArp(proxyArp *l3.ProxyARP) DataResyncDSL + // IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. + IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) DataResyncDSL + // StnRule adds Stn rule to the RESYNC request. + StnRule(stn *stn.Rule) DataResyncDSL + // NAT44Global adds global NAT44 configuration to the RESYNC request. + NAT44Global(nat *nat.Nat44Global) DataResyncDSL + // DNAT44 adds DNAT44 configuration to the RESYNC request + DNAT44(dnat *nat.DNat44) DataResyncDSL + // IPSecSA adds request to RESYNC a new Security Association + IPSecSA(sa *ipsec.SecurityAssociation) DataResyncDSL + // IPSecSPD adds request to RESYNC a new Security Policy Database + IPSecSPD(spd *ipsec.SecurityPolicyDatabase) DataResyncDSL + // PuntIPRedirect adds request to RESYNC a rule used to punt L3 traffic via interface. + PuntIPRedirect(val *punt.IPRedirect) DataResyncDSL + // PuntToHost adds request to RESYNC a rule used to punt L4 traffic to a host. + PuntToHost(val *punt.ToHost) DataResyncDSL + + // Send propagates the RESYNC request to the plugins. + Send() Reply +} diff --git a/clientv2/vpp/dbadapter/data_change_db.go b/clientv2/vpp/dbadapter/data_change_db.go new file mode 100644 index 0000000000..3168d07a70 --- /dev/null +++ b/clientv2/vpp/dbadapter/data_change_db.go @@ -0,0 +1,296 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbadapter + +import ( + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/vpp-agent/pkg/models" + + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + intf "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + "github.com/ligato/vpp-agent/clientv2/vpp" +) + +// NewDataChangeDSL returns a new instance of DataChangeDSL which implements +// the data change DSL for VPP config. +// Transaction is used to propagate changes to plugins. +func NewDataChangeDSL(txn keyval.ProtoTxn) *DataChangeDSL { + return &DataChangeDSL{txn: txn} +} + +// DataChangeDSL is an implementation of Domain Specific Language (DSL) +// for changes of the VPP configuration. +type DataChangeDSL struct { + txn keyval.ProtoTxn +} + +// PutDSL implements put operations of data change DSL. +type PutDSL struct { + parent *DataChangeDSL +} + +// DeleteDSL implements delete operations of data change DSL. +type DeleteDSL struct { + parent *DataChangeDSL +} + +// Put initiates a chained sequence of data change DSL statements declaring +// new configurable objects or changing existing ones. +func (dsl *DataChangeDSL) Put() vppclient.PutDSL { + return &PutDSL{dsl} +} + +// Delete initiates a chained sequence of data change DSL statements +// removing existing configurable objects. +func (dsl *DataChangeDSL) Delete() vppclient.DeleteDSL { + return &DeleteDSL{dsl} +} + +// Send propagates requested changes to the plugins. +func (dsl *DataChangeDSL) Send() vppclient.Reply { + err := dsl.txn.Commit() + return &Reply{err} +} + +// Interface adds a request to create or update VPP network interface. +func (dsl *PutDSL) Interface(val *intf.Interface) vppclient.PutDSL { + dsl.parent.txn.Put(intf.InterfaceKey(val.Name), val) + return dsl +} + +// ACL adds a request to create or update VPP Access Control List. +func (dsl *PutDSL) ACL(val *acl.ACL) vppclient.PutDSL { + dsl.parent.txn.Put(acl.Key(val.Name), val) + return dsl +} + +// BD adds a request to create or update VPP Bridge Domain. +func (dsl *PutDSL) BD(val *l2.BridgeDomain) vppclient.PutDSL { + dsl.parent.txn.Put(l2.BridgeDomainKey(val.Name), val) + return dsl +} + +// BDFIB adds a request to create or update VPP L2 Forwarding Information Base. +func (dsl *PutDSL) BDFIB(val *l2.FIBEntry) vppclient.PutDSL { + dsl.parent.txn.Put(l2.FIBKey(val.BridgeDomain, val.PhysAddress), val) + return dsl +} + +// XConnect adds a request to create or update VPP Cross Connect. +func (dsl *PutDSL) XConnect(val *l2.XConnectPair) vppclient.PutDSL { + dsl.parent.txn.Put(l2.XConnectKey(val.ReceiveInterface), val) + return dsl +} + +// StaticRoute adds a request to create or update VPP L3 Static Route. +func (dsl *PutDSL) StaticRoute(val *l3.Route) vppclient.PutDSL { + dsl.parent.txn.Put(l3.RouteKey(val.VrfId, val.DstNetwork, val.NextHopAddr), val) + return dsl +} + +// Arp adds a request to create or update VPP L3 ARP entry. +func (dsl *PutDSL) Arp(arp *l3.ARPEntry) vppclient.PutDSL { + dsl.parent.txn.Put(l3.ArpEntryKey(arp.Interface, arp.IpAddress), arp) + return dsl +} + +// ProxyArp adds a request to create or update VPP L3 proxy ARP. +func (dsl *PutDSL) ProxyArp(proxyArp *l3.ProxyARP) vppclient.PutDSL { + dsl.parent.txn.Put(models.Key(&l3.ProxyARP{}), proxyArp) + return dsl +} + +// IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. +func (dsl *PutDSL) IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) vppclient.PutDSL { + dsl.parent.txn.Put(models.Key(&l3.IPScanNeighbor{}), ipScanNeigh) + return dsl +} + +// StnRule adds a request to create or update STN rule. +func (dsl *PutDSL) StnRule(val *stn.Rule) vppclient.PutDSL { + dsl.parent.txn.Put(stn.Key(val.Interface, val.IpAddress), val) + return dsl +} + +// NAT44Global adds a request to set global configuration for NAT44 +func (dsl *PutDSL) NAT44Global(nat44 *nat.Nat44Global) vppclient.PutDSL { + dsl.parent.txn.Put(models.Key(&nat.Nat44Global{}), nat44) + return dsl +} + +// DNAT44 adds a request to create or update DNAT44 configuration +func (dsl *PutDSL) DNAT44(nat44 *nat.DNat44) vppclient.PutDSL { + dsl.parent.txn.Put(nat.DNAT44Key(nat44.Label), nat44) + return dsl +} + +// IPSecSA adds request to create a new Security Association +func (dsl *PutDSL) IPSecSA(sa *ipsec.SecurityAssociation) vppclient.PutDSL { + dsl.parent.txn.Put(ipsec.SAKey(sa.Index), sa) + return dsl +} + +// IPSecSPD adds request to create a new Security Policy Database +func (dsl *PutDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabase) vppclient.PutDSL { + dsl.parent.txn.Put(ipsec.SPDKey(spd.Index), spd) + return dsl +} + +// PuntIPRedirect adds request to create or update rule to punt L3 traffic via interface. +func (dsl *PutDSL) PuntIPRedirect(val *punt.IPRedirect) vppclient.PutDSL { + dsl.parent.txn.Put(punt.IPRedirectKey(val.L3Protocol, val.TxInterface), val) + return dsl +} + +// PuntToHost adds request to create or update rule to punt L4 traffic to a host. +func (dsl *PutDSL) PuntToHost(val *punt.ToHost) vppclient.PutDSL { + dsl.parent.txn.Put(punt.ToHostKey(val.L3Protocol, val.L4Protocol, val.Port), val) + return dsl +} + +// Delete changes the DSL mode to allow removal of an existing configuration. +func (dsl *PutDSL) Delete() vppclient.DeleteDSL { + return &DeleteDSL{dsl.parent} +} + +// Send propagates requested changes to the plugins. +func (dsl *PutDSL) Send() vppclient.Reply { + return dsl.parent.Send() +} + +// Interface adds a request to delete an existing VPP network interface. +func (dsl *DeleteDSL) Interface(interfaceName string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(intf.InterfaceKey(interfaceName)) + return dsl +} + +// ACL adds a request to delete an existing VPP Access Control List. +func (dsl *DeleteDSL) ACL(aclName string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(acl.Key(aclName)) + return dsl +} + +// BD adds a request to delete an existing VPP Bridge Domain. +func (dsl *DeleteDSL) BD(bdName string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(l2.BridgeDomainKey(bdName)) + return dsl +} + +// BDFIB adds a request to delete an existing VPP L2 Forwarding Information +// Base. +func (dsl *DeleteDSL) BDFIB(bdName string, mac string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(l2.FIBKey(bdName, mac)) + return dsl +} + +// XConnect adds a request to delete an existing VPP Cross Connect. +func (dsl *DeleteDSL) XConnect(rxIfName string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(l2.XConnectKey(rxIfName)) + return dsl +} + +// StaticRoute adds a request to delete an existing VPP L3 Static Route. +func (dsl *DeleteDSL) StaticRoute(vrf uint32, dstAddr string, nextHopAddr string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(l3.RouteKey(vrf, dstAddr, nextHopAddr)) + return dsl +} + +// Arp adds a request to delete an existing VPP L3 ARP entry. +func (dsl *DeleteDSL) Arp(ifaceName string, ipAddr string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(l3.ArpEntryKey(ifaceName, ipAddr)) + return dsl +} + +// ProxyArp adds a request to delete an existing VPP L3 proxy ARP. +func (dsl *DeleteDSL) ProxyArp() vppclient.DeleteDSL { + dsl.parent.txn.Delete(models.Key(&l3.ProxyARP{})) + return dsl +} + +// IPScanNeighbor adds a request to delete an existing VPP L3 IP Scan Neighbor. +func (dsl *DeleteDSL) IPScanNeighbor() vppclient.DeleteDSL { + dsl.parent.txn.Delete(models.Key(&l3.IPScanNeighbor{})) + return dsl +} + +// StnRule adds request to delete Stn rule. +func (dsl *DeleteDSL) StnRule(iface, addr string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(stn.Key(iface, addr)) + return dsl +} + +// NAT44Global adds a request to remove global configuration for NAT44 +func (dsl *DeleteDSL) NAT44Global() vppclient.DeleteDSL { + dsl.parent.txn.Delete(models.Key(&nat.Nat44Global{})) + return dsl +} + +// DNAT44 adds a request to delete an existing DNAT44 configuration +func (dsl *DeleteDSL) DNAT44(label string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(nat.DNAT44Key(label)) + return dsl +} + +// IPSecSA adds request to create a new Security Association +func (dsl *DeleteDSL) IPSecSA(saIndex string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(ipsec.SAKey(saIndex)) + return dsl +} + +// IPSecSPD adds request to create a new Security Policy Database +func (dsl *DeleteDSL) IPSecSPD(spdIndex string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(ipsec.SPDKey(spdIndex)) + return dsl +} + +// PuntIPRedirect adds request to delete a rule used to punt L3 traffic via interface. +func (dsl *DeleteDSL) PuntIPRedirect(l3Proto punt.L3Protocol, txInterface string) vppclient.DeleteDSL { + dsl.parent.txn.Delete(punt.IPRedirectKey(l3Proto, txInterface)) + return dsl +} + +// PuntToHost adds request to delete a rule used to punt L4 traffic to a host. +func (dsl *DeleteDSL) PuntToHost(l3Proto punt.L3Protocol, l4Proto punt.L4Protocol, port uint32) vppclient.DeleteDSL { + dsl.parent.txn.Delete(punt.ToHostKey(l3Proto, l4Proto, port)) + return dsl +} + +// Put changes the DSL mode to allow configuration editing. +func (dsl *DeleteDSL) Put() vppclient.PutDSL { + return &PutDSL{dsl.parent} +} + +// Send propagates requested changes to the plugins. +func (dsl *DeleteDSL) Send() vppclient.Reply { + return dsl.parent.Send() +} + +// Reply interface allows to wait for a reply to previously called Send() and +// extract the result from it (success/error). +type Reply struct { + err error +} + +// ReceiveReply waits for a reply to previously called Send() and returns +// the result (error or nil). +func (dsl Reply) ReceiveReply() error { + return dsl.err +} diff --git a/clientv2/vpp/dbadapter/data_resync_db.go b/clientv2/vpp/dbadapter/data_resync_db.go new file mode 100644 index 0000000000..f2575ea013 --- /dev/null +++ b/clientv2/vpp/dbadapter/data_resync_db.go @@ -0,0 +1,259 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbadapter + +import ( + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/vpp-agent/pkg/models" + + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + intf "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" + stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + "github.com/ligato/vpp-agent/clientv2/vpp" +) + +// NewDataResyncDSL returns a new instance of DataResyncDSL which implements +// the data RESYNC DSL for VPP configuration. +// Transaction is used to propagate changes to plugins. +// Function is used to list keys with already existing configuration. +func NewDataResyncDSL(txn keyval.ProtoTxn, listKeys func(prefix string) (keyval.ProtoKeyIterator, error)) *DataResyncDSL { + return &DataResyncDSL{txn, []string{}, listKeys} +} + +// DataResyncDSL is an implementation of Domain Specific Language (DSL) for data +// RESYNC of VPP configuration. +type DataResyncDSL struct { + txn keyval.ProtoTxn + txnKeys []string + listKeys func(prefix string) (keyval.ProtoKeyIterator, error) +} + +// Interface adds VPP interface to the RESYNC request. +func (dsl *DataResyncDSL) Interface(val *intf.Interface) vppclient.DataResyncDSL { + key := intf.InterfaceKey(val.Name) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// ACL adds Access Control List to the RESYNC request. +func (dsl *DataResyncDSL) ACL(val *acl.ACL) vppclient.DataResyncDSL { + key := acl.Key(val.Name) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// BD adds Bridge Domain to the RESYNC request. +func (dsl *DataResyncDSL) BD(val *l2.BridgeDomain) vppclient.DataResyncDSL { + key := l2.BridgeDomainKey(val.Name) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// BDFIB adds Bridge Domain to the RESYNC request. +func (dsl *DataResyncDSL) BDFIB(val *l2.FIBEntry) vppclient.DataResyncDSL { + key := l2.FIBKey(val.BridgeDomain, val.PhysAddress) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// XConnect adds Cross Connect to the RESYNC request. +func (dsl *DataResyncDSL) XConnect(val *l2.XConnectPair) vppclient.DataResyncDSL { + key := l2.XConnectKey(val.ReceiveInterface) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// StaticRoute adds L3 Static Route to the RESYNC request. +func (dsl *DataResyncDSL) StaticRoute(val *l3.Route) vppclient.DataResyncDSL { + key := l3.RouteKey(val.VrfId, val.DstNetwork, val.NextHopAddr) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// ProxyArp adds L3 proxy ARP to the RESYNC request. +func (dsl *DataResyncDSL) ProxyArp(proxyArp *l3.ProxyARP) vppclient.DataResyncDSL { + key := models.Key(&l3.ProxyARP{}) + dsl.txn.Put(key, proxyArp) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// Arp adds L3 ARP entry to the RESYNC request. +func (dsl *DataResyncDSL) Arp(val *l3.ARPEntry) vppclient.DataResyncDSL { + key := l3.ArpEntryKey(val.Interface, val.IpAddress) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// IPScanNeighbor adds L3 IP Scan Neighbor to the RESYNC request. +func (dsl *DataResyncDSL) IPScanNeighbor(ipScanNeigh *l3.IPScanNeighbor) vppclient.DataResyncDSL { + key := models.Key(&l3.IPScanNeighbor{}) + dsl.txn.Put(key, ipScanNeigh) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// StnRule adds Stn rule to the RESYNC request. +func (dsl *DataResyncDSL) StnRule(val *stn.Rule) vppclient.DataResyncDSL { + key := stn.Key(val.Interface, val.IpAddress) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// NAT44Global adds global NAT44 configuration to the RESYNC request. +func (dsl *DataResyncDSL) NAT44Global(nat44 *nat.Nat44Global) vppclient.DataResyncDSL { + key := models.Key(&nat.Nat44Global{}) + dsl.txn.Put(key, nat44) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// DNAT44 adds DNAT44 configuration to the RESYNC request +func (dsl *DataResyncDSL) DNAT44(nat44 *nat.DNat44) vppclient.DataResyncDSL { + key := nat.DNAT44Key(nat44.Label) + dsl.txn.Put(key, nat44) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// IPSecSA adds request to create a new Security Association +func (dsl *DataResyncDSL) IPSecSA(sa *ipsec.SecurityAssociation) vppclient.DataResyncDSL { + key := ipsec.SAKey(sa.Index) + dsl.txn.Put(key, sa) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// IPSecSPD adds request to create a new Security Policy Database +func (dsl *DataResyncDSL) IPSecSPD(spd *ipsec.SecurityPolicyDatabase) vppclient.DataResyncDSL { + key := ipsec.SPDKey(spd.Index) + dsl.txn.Put(key, spd) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// PuntIPRedirect adds request to RESYNC a rule used to punt L3 traffic via interface. +func (dsl *DataResyncDSL) PuntIPRedirect(val *punt.IPRedirect) vppclient.DataResyncDSL { + key := punt.IPRedirectKey(val.L3Protocol, val.TxInterface) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// PuntToHost adds request to RESYNC a rule used to punt L4 traffic to a host. +func (dsl *DataResyncDSL) PuntToHost(val *punt.ToHost) vppclient.DataResyncDSL { + key := punt.ToHostKey(val.L3Protocol, val.L4Protocol, val.Port) + dsl.txn.Put(key, val) + dsl.txnKeys = append(dsl.txnKeys, key) + + return dsl +} + +// AppendKeys is a helper function that fills the keySet with values +// pointed to by the iterator . +func appendKeys(keys *keySet, it keyval.ProtoKeyIterator) { + for { + k, _, stop := it.GetNext() + if stop { + break + } + + (*keys)[k] = nil + } +} + +// KeySet is a helper type that reuses map keys to store values as a set. +// The values of the map are nil. +type keySet map[string] /*key*/ interface{} /*nil*/ + +// Send propagates the request to the plugins. +// It deletes obsolete keys if listKeys() (from constructor) function is not nil. +func (dsl *DataResyncDSL) Send() vppclient.Reply { + + for dsl.listKeys != nil { + toBeDeleted := keySet{} + + // fill all known keys of one VPP: + + keys, err := dsl.listKeys(intf.ModelInterface.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + keys, err = dsl.listKeys(l2.ModelBridgeDomain.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + keys, err = dsl.listKeys(l2.ModelXConnectPair.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + keys, err = dsl.listKeys(l3.ModelRoute.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + keys, err = dsl.listKeys(l3.ModelARPEntry.KeyPrefix()) + if err != nil { + break + } + appendKeys(&toBeDeleted, keys) + + // remove keys that are part of the transaction + for _, txnKey := range dsl.txnKeys { + delete(toBeDeleted, txnKey) + } + + for delKey := range toBeDeleted { + dsl.txn.Delete(delKey) + } + + break + } + + err := dsl.txn.Commit() + + return &Reply{err: err} +} diff --git a/clientv1/vpp/dbadapter/doc.go b/clientv2/vpp/dbadapter/doc.go similarity index 100% rename from clientv1/vpp/dbadapter/doc.go rename to clientv2/vpp/dbadapter/doc.go diff --git a/clientv1/vpp/doc.go b/clientv2/vpp/doc.go similarity index 100% rename from clientv1/vpp/doc.go rename to clientv2/vpp/doc.go diff --git a/clientv1/vpp/localclient/doc.go b/clientv2/vpp/localclient/doc.go similarity index 100% rename from clientv1/vpp/localclient/doc.go rename to clientv2/vpp/localclient/doc.go diff --git a/clientv2/vpp/localclient/localclient_api.go b/clientv2/vpp/localclient/localclient_api.go new file mode 100644 index 0000000000..e8a57a06e0 --- /dev/null +++ b/clientv2/vpp/localclient/localclient_api.go @@ -0,0 +1,38 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package localclient + +import ( + "github.com/ligato/cn-infra/datasync/kvdbsync/local" + "github.com/ligato/vpp-agent/clientv2/vpp" + "github.com/ligato/vpp-agent/clientv2/vpp/dbadapter" +) + +// PluginID defines the name of VPP (vppplugin) localclient plugin. +//const PluginID core.PluginName = "DefaultVppPlugins_LOCAL_CLIENT" + +// DataResyncRequest allows creating a RESYNC request using convenient RESYNC +// DSL and sending it locally through go channels (i.e. without using Data Store). +func DataResyncRequest(caller string) vppclient.DataResyncDSL { + return dbadapter.NewDataResyncDSL(local.NewProtoTxn(local.Get().PropagateResync), + nil /*no need to list anything*/) +} + +// DataChangeRequest allows creating Data Change request(s) using convenient +// Data Change DSL and sending it locally through go channels (i.e. without using +// Data Store). +func DataChangeRequest(caller string) vppclient.DataChangeDSL { + return dbadapter.NewDataChangeDSL(local.NewProtoTxn(local.Get().PropagateChanges)) +} diff --git a/clientv1/vpp/localclient/localclient_init.go b/clientv2/vpp/localclient/localclient_init.go similarity index 100% rename from clientv1/vpp/localclient/localclient_init.go rename to clientv2/vpp/localclient/localclient_init.go diff --git a/clientv1/vpp/remoteclient/doc.go b/clientv2/vpp/remoteclient/doc.go similarity index 100% rename from clientv1/vpp/remoteclient/doc.go rename to clientv2/vpp/remoteclient/doc.go diff --git a/clientv2/vpp/remoteclient/remoteclient_api.go b/clientv2/vpp/remoteclient/remoteclient_api.go new file mode 100644 index 0000000000..43647f3e31 --- /dev/null +++ b/clientv2/vpp/remoteclient/remoteclient_api.go @@ -0,0 +1,55 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remoteclient + +import ( + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/vpp-agent/clientv2/vpp" + "github.com/ligato/vpp-agent/clientv2/vpp/dbadapter" + //"github.com/ligato/vpp-agent/clientv2/vpp/grpcadapter" + //"github.com/ligato/vpp-agent/plugins/vpp/model/rpc" +) + +// DataResyncRequestDB allows creating a RESYNC request using convenient RESYNC +// DSL and sending it through the provided . +// User of the API does not need to be aware of keys. +// User of the API does not need to delete the obsolete objects/keys +// prior to RESYNC - it is handled by DataResyncDSL. +func DataResyncRequestDB(broker keyval.ProtoBroker) vppclient.DataResyncDSL { + return dbadapter.NewDataResyncDSL(broker.NewTxn(), broker.ListKeys) +} + +// DataChangeRequestDB allows createing Data Change requests using convenient +// Data Change DSL and sending it through the provided . +// User of the API does not need to be aware of keys. +func DataChangeRequestDB(broker keyval.ProtoBroker) vppclient.DataChangeDSL { + return dbadapter.NewDataChangeDSL(broker.NewTxn()) +} + +// TODO: GRPC TBD +/* +// DataResyncRequestGRPC allows sending RESYNC requests conveniently. +// User of the API does not need to be aware of keys. +// User of the API does not need to delete the obsolete objects/keys during RESYNC. +func DataResyncRequestGRPC(client rpc.DataResyncServiceClient) vppclient.DataResyncDSL { + return grpcadapter.NewDataResyncDSL(client) +} + +// DataChangeRequestGRPC allows sending Data Change requests conveniently (even without directly using Broker). +// User of the API does not need to be aware of keys. +func DataChangeRequestGRPC(client rpc.DataChangeServiceClient) vppclient.DataChangeDSL { + return grpcadapter.NewDataChangeDSL(client) +} +*/ diff --git a/cmd/agentctl/cmd/clean_cmd.go b/cmd/agentctl/cmd/clean_cmd.go index cde71d860b..3cfd2aa72e 100644 --- a/cmd/agentctl/cmd/clean_cmd.go +++ b/cmd/agentctl/cmd/clean_cmd.go @@ -14,6 +14,7 @@ package cmd +/* import ( "errors" "fmt" @@ -29,8 +30,7 @@ import ( "github.com/ligato/vpp-agent/cmd/agentctl/utils" "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" -) + "github.com/ligato/vpp-agent/plugins/vpp/model/l3") const dataTypeFlagName = "dataType" @@ -66,7 +66,7 @@ for all agents will be deleted.`, $ agentctl clean vpp1 -dataType %s,%s Delete all data for all agents (no filter): $ agentctl clean`, - status.StatusPrefix, status.StatusPrefix, interfaces.Prefix), + status.StatusPrefix, status.StatusPrefix , interfaces.Prefix), Run: cleanFunc, } @@ -119,3 +119,4 @@ func cleanFunc(cmd *cobra.Command, args []string) { } } +*/ diff --git a/cmd/agentctl/cmd/interface_cmd.go b/cmd/agentctl/cmd/interface_cmd.go index 2c87421c19..565e70333f 100644 --- a/cmd/agentctl/cmd/interface_cmd.go +++ b/cmd/agentctl/cmd/interface_cmd.go @@ -14,6 +14,7 @@ package cmd +/* import ( "github.com/ligato/vpp-agent/cmd/agentctl/impl" "github.com/ligato/vpp-agent/cmd/agentctl/utils" @@ -250,3 +251,4 @@ func initIfJSONCmd() { putInterfaceCommand.AddCommand(putIfJSONCmd) } +*/ diff --git a/cmd/agentctl/cmd/l2_cmd.go b/cmd/agentctl/cmd/l2_cmd.go index 3ecf14de7a..75183e4937 100644 --- a/cmd/agentctl/cmd/l2_cmd.go +++ b/cmd/agentctl/cmd/l2_cmd.go @@ -14,12 +14,7 @@ package cmd -import ( - "github.com/ligato/vpp-agent/cmd/agentctl/impl" - "github.com/ligato/vpp-agent/cmd/agentctl/utils" - "github.com/spf13/cobra" -) - +/* // 'putBridgeDomain' command can be used to put bridge domain configuration to etcd. This command can be used // with attribute flags (forward, learn, etc.) in order to change BD attributes // and it can also manipulate inner BD configuration (attach or remove interfaces, add or remove @@ -240,3 +235,4 @@ func init() { l2FibEntryCommand.Flags().BoolVarP(&l2FibEntryFlags.IsDelete, utils.IsDelete, "D", false, "Delete FIB entry") } +*/ diff --git a/cmd/agentctl/impl/bridge_domains.go b/cmd/agentctl/impl/bridge_domains.go index fd6bc7c501..c898b2e119 100644 --- a/cmd/agentctl/impl/bridge_domains.go +++ b/cmd/agentctl/impl/bridge_domains.go @@ -14,6 +14,7 @@ package impl +/* import ( "errors" @@ -253,3 +254,4 @@ func AddBridgeDomainFlags(cmd *cobra.Command) { cmd.Flags().Uint32VarP(&bdCommonFields.MacAge, "mac", "", 0, "MAC aging time in min, 0 for disabled aging ") } +*/ diff --git a/cmd/agentctl/impl/interfaces.go b/cmd/agentctl/impl/interfaces.go index fe4ba30df6..68355f3348 100644 --- a/cmd/agentctl/impl/interfaces.go +++ b/cmd/agentctl/impl/interfaces.go @@ -21,8 +21,8 @@ import ( "errors" "os" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" "github.com/ligato/vpp-agent/cmd/agentctl/utils" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/spf13/cobra" ) @@ -41,38 +41,39 @@ type IfaceCommonFields struct { var ifCommonFields IfaceCommonFields // PutAfPkt creates an Af-packet type interface. -func PutAfPkt(endpoints []string, label string, flags *interfaces.Interfaces_Interface_Afpacket) { +func PutAfPkt(endpoints []string, label string, flags *interfaces.AfpacketLink) { found, key, ifc, db := utils.GetInterfaceKeyAndValue(endpoints, label, ifCommonFields.Name) - processCommonIfFlags(found, interfaces.InterfaceType_AF_PACKET_INTERFACE, ifc) + processCommonIfFlags(found, interfaces.Interface_AF_PACKET, ifc) // Process Af-Packet specific flags. - if flags.HostIfName != "" { + /*if flags.HostIfName != "" { if ifc.Afpacket == nil { - ifc.Afpacket = &interfaces.Interfaces_Interface_Afpacket{} + ifc.Afpacket = &interfaces.AfpacketLink{} } ifc.Afpacket.HostIfName = flags.HostIfName - } + }*/ utils.WriteInterfaceToDb(db, key, ifc) } // PutEthernet creates an ethernet type interface. func PutEthernet(endpoints []string, label string) { found, key, ifc, db := utils.GetInterfaceKeyAndValue(endpoints, label, ifCommonFields.Name) - processCommonIfFlags(found, interfaces.InterfaceType_ETHERNET_CSMACD, ifc) + processCommonIfFlags(found, interfaces.Interface_DPDK, ifc) utils.WriteInterfaceToDb(db, key, ifc) } // PutLoopback creates a loopback type interface. func PutLoopback(endpoints []string, label string) { found, key, ifc, db := utils.GetInterfaceKeyAndValue(endpoints, label, ifCommonFields.Name) - processCommonIfFlags(found, interfaces.InterfaceType_SOFTWARE_LOOPBACK, ifc) + processCommonIfFlags(found, interfaces.Interface_SOFTWARE_LOOPBACK, ifc) utils.WriteInterfaceToDb(db, key, ifc) } +/* // PutMemif creates a memif type interface. -func PutMemif(endpoints []string, label string, flags *interfaces.Interfaces_Interface_Memif) { +func PutMemif(endpoints []string, label string, flags *interfaces.MemifLink) { found, key, ifc, db := utils.GetInterfaceKeyAndValue(endpoints, label, ifCommonFields.Name) - processCommonIfFlags(found, interfaces.InterfaceType_MEMORY_INTERFACE, ifc) + processCommonIfFlags(found, interfaces.Interface_MEMIF, ifc) // Process MEMIF-specific flags. if utils.IsFlagPresent(utils.MemifMaster) { @@ -173,7 +174,7 @@ func PutVxLan(endpoints []string, label string, flags *interfaces.Interfaces_Int } utils.WriteInterfaceToDb(db, key, ifc) } - +*/ // IfJSONPut creates an interface according to json configuration. func IfJSONPut(endpoints []string, label string) { bio := bufio.NewReader(os.Stdin) @@ -181,7 +182,7 @@ func IfJSONPut(endpoints []string, label string) { buf.ReadFrom(bio) input := buf.Bytes() - ifc := &interfaces.Interfaces_Interface{} + ifc := &interfaces.Interface{} err := json.Unmarshal(input, ifc) if err != nil { utils.ExitWithError(utils.ExitInvalidInput, errors.New("Invalid json, error "+err.Error())) @@ -219,25 +220,25 @@ func AddCommonIfPutFlags(cmd *cobra.Command) { cmd.Flags().StringSliceVar(&ifCommonFields.Ipv6Addrs, "ipv6-addr", nil, "Comma-separated list of IPv6 addresses in CIDR format, e.g. 2001:cdba::3257:9652/48") } -func processCommonIfFlags(found bool, ifType interfaces.InterfaceType, ifc *interfaces.Interfaces_Interface) *interfaces.Interfaces_Interface { +func processCommonIfFlags(found bool, ifType interfaces.Interface_Type, ifc *interfaces.Interface) *interfaces.Interface { if found && ifc.Type != ifType { utils.ExitWithError(utils.ExitInvalidInput, errors.New("Bad type for interface '"+ifCommonFields.Name+ "'. Interface with this name but a different type already exists.")) } - if ifType == interfaces.InterfaceType_TAP_INTERFACE { + /*if ifType == interfaces.InterfaceType_TAP_INTERFACE { ifc.Tap = &interfaces.Interfaces_Interface_Tap{HostIfName: ifCommonFields.Name} - } + }*/ // Set in case interface is empty. ifc.Name = ifCommonFields.Name ifc.Type = ifType ifc.Enabled = ifCommonFields.Enabled - if ifCommonFields.Desc != "" { + /*if ifCommonFields.Desc != "" { ifc.Description = ifCommonFields.Desc - } + }*/ if ifCommonFields.PhysAddr != "" { utils.ValidatePhyAddr(ifCommonFields.PhysAddr) ifc.PhysAddress = ifCommonFields.PhysAddr diff --git a/cmd/agentctl/testing/data.go b/cmd/agentctl/testing/data.go index 0f16bd17c6..caa3d8cccb 100644 --- a/cmd/agentctl/testing/data.go +++ b/cmd/agentctl/testing/data.go @@ -14,6 +14,7 @@ package testing +/* import ( "strconv" @@ -176,3 +177,4 @@ func JSONData() utils.EtcdDump { return etcdDump } +*/ diff --git a/cmd/agentctl/utils/common_utils.go b/cmd/agentctl/utils/common_utils.go index adf0ade3d0..0d5023a9f1 100644 --- a/cmd/agentctl/utils/common_utils.go +++ b/cmd/agentctl/utils/common_utils.go @@ -26,7 +26,7 @@ import ( "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/cn-infra/servicelabel" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" + //"github.com/ligato/vpp-agent/plugins/vpp/model/l3" ) // Common exit flags @@ -129,14 +129,14 @@ func ParseKey(key string) (label string, dataType string, name string, plugStatC return label, dataType, rebuildName(params), plugStatCfgRev } // Recognize static route. - if len(ps) > 6 && ps[4] == "vrf" && ps[6] == "fib" { + /*if len(ps) > 6 && ps[4] == "vrf" && ps[6] == "fib" { dataType += "/" + strings.TrimPrefix(l3.RoutesPrefix, l3.VrfPrefix) if len(ps) > 7 { params = append(params, ps[7:]...) } return label, dataType, rebuildName(params), plugStatCfgRev - } + }*/ dataType += "/" params = ps[5:] } else { diff --git a/cmd/agentctl/utils/common_utils_test.go b/cmd/agentctl/utils/common_utils_test.go index 0032dd49d1..a93520e6cc 100644 --- a/cmd/agentctl/utils/common_utils_test.go +++ b/cmd/agentctl/utils/common_utils_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "testing" @@ -152,3 +153,4 @@ func Test10ParseKeyVrf(t *testing.T) { gomega.Expect(dataType).To(gomega.BeEquivalentTo(l3.VrfPrefix)) gomega.Expect(params).To(gomega.BeEquivalentTo("vrf1")) } +*/ diff --git a/cmd/agentctl/utils/db_utils.go b/cmd/agentctl/utils/db_utils.go index 3ee9ef44f1..8e091e1e2e 100644 --- a/cmd/agentctl/utils/db_utils.go +++ b/cmd/agentctl/utils/db_utils.go @@ -24,9 +24,9 @@ import ( "github.com/ligato/cn-infra/db/keyval" "github.com/ligato/cn-infra/health/statuscheck/model/status" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" ) // VppMetaData defines the etcd metadata. @@ -46,28 +46,28 @@ type InterfaceWithMD struct { // and its etcd metadata. type IfConfigWithMD struct { Metadata VppMetaData - Interface *interfaces.Interfaces_Interface + Interface *interfaces.Interface } // IfStateWithMD contains a data record for interface state and its // etcd metadata. type IfStateWithMD struct { Metadata VppMetaData - InterfaceState *interfaces.InterfacesState_Interface + InterfaceState *interfaces.InterfaceState } // InterfaceErrorWithMD contains a data record for interface errors and its // etcd metadata. type InterfaceErrorWithMD struct { VppMetaData - InterfaceErrorList []*interfaces.InterfaceErrors_Interface + //InterfaceErrorList []*interfaces.InterfaceErrors_Interface } // BridgeDomainErrorWithMD contains a data record for bridge domain errors and its // etcd metadata. type BridgeDomainErrorWithMD struct { VppMetaData - BdErrorList []*l2.BridgeDomainErrors_BridgeDomain + BdErrorList []*l2.BridgeDomain } // BdWithMD contains a Bridge Domain data record and its etcd @@ -81,35 +81,35 @@ type BdWithMD struct { // metadata. type BdConfigWithMD struct { Metadata VppMetaData - BridgeDomain *l2.BridgeDomains_BridgeDomain + BridgeDomain *l2.BridgeDomain } // BdStateWithMD contains a Bridge Domain state data record and its etcd // metadata. type BdStateWithMD struct { - Metadata VppMetaData - BridgeDomainState *l2.BridgeDomainState_BridgeDomain + Metadata VppMetaData + //BridgeDomainState *l2.BridgeDomainState_BridgeDomain } // FibTableWithMD contains an FIB table data record and its etcd // metadata. type FibTableWithMD struct { VppMetaData - FibTable []*l2.FibTable_FibEntry + FibTable []*l2.FIBEntry } // XconnectWithMD contains an l2 cross-Connect data record and its // etcd metadata. type XconnectWithMD struct { VppMetaData - *l2.XConnectPairs_XConnectPair + *l2.XConnectPair } // StaticRoutesWithMD contains a static route data record and its // etcd metadata. type StaticRoutesWithMD struct { VppMetaData - Routes []*l3.StaticRoutes_Route + Routes []*l3.Route } // VppStatusWithMD contains a VPP Status data record and its etcd @@ -185,7 +185,7 @@ func (ed EtcdDump) ReadDataFromDb(db keyval.ProtoBroker, key string, return false, nil } - vd, ok := ed[label] + /*vd, ok := ed[label] if !ok { vd = newVppDataRecord() } @@ -208,7 +208,7 @@ func (ed EtcdDump) ReadDataFromDb(db keyval.ProtoBroker, key string, ed[label], err = readXconnectFromDb(db, vd, key, params) case l3.RoutesPrefix: ed[label], err = readRoutesFromDb(db, vd, key) - } + }*/ return true, err } @@ -226,7 +226,7 @@ func isItemAllowed(item string, filter []string) bool { } func readIfConfigFromDb(db keyval.ProtoBroker, vd *VppData, key string, name string) (*VppData, error) { - ifc := &interfaces.Interfaces_Interface{} + ifc := &interfaces.Interface{} if name == "" { fmt.Printf("WARNING: Invalid interface Key '%s'\n", key) return vd, nil @@ -243,7 +243,7 @@ func readIfConfigFromDb(db keyval.ProtoBroker, vd *VppData, key string, name str } func readIfStateFromDb(db keyval.ProtoBroker, vd *VppData, key string, name string) (*VppData, error) { - ifs := &interfaces.InterfacesState_Interface{} + ifs := &interfaces.InterfaceState{} if name == "" { fmt.Printf("WARNING: Invalid ifstate Key '%s'\n", key) return vd, nil @@ -258,6 +258,7 @@ func readIfStateFromDb(db keyval.ProtoBroker, vd *VppData, key string, name stri return vd, err } +/* func readInterfaceErrorFromDb(db keyval.ProtoBroker, vd *VppData, key string, name string) (*VppData, error) { ife := &interfaces.InterfaceErrors_Interface{} if name == "" { @@ -275,13 +276,13 @@ func readInterfaceErrorFromDb(db keyval.ProtoBroker, vd *VppData, key string, na return vd, err } - +*/ func readBdConfigFromDb(db keyval.ProtoBroker, vd *VppData, key string, name string) (*VppData, error) { if name == "" { fmt.Printf("WARNING: Invalid bridge domain config Key '%s'\n", key) return vd, nil } - bd := &l2.BridgeDomains_BridgeDomain{} + bd := &l2.BridgeDomain{} found, rev, err := readDataFromDb(db, key, bd) if found && err == nil { vd.BridgeDomains[name] = BdWithMD{ @@ -292,6 +293,7 @@ func readBdConfigFromDb(db keyval.ProtoBroker, vd *VppData, key string, name str return vd, err } +/* func readBdStateFromDb(db keyval.ProtoBroker, vd *VppData, key string, name string) (*VppData, error) { if name == "" { fmt.Printf("WARNING: Invalid bridge domain state Key '%s'\n", key) @@ -325,9 +327,9 @@ func readBdErrorFromDb(db keyval.ProtoBroker, vd *VppData, key string, name stri return vd, err } - +*/ func readFibFromDb(db keyval.ProtoBroker, vd *VppData, key string) (*VppData, error) { - fibEntry := &l2.FibTable_FibEntry{} + fibEntry := &l2.FIBEntry{} found, rev, err := readDataFromDb(db, key, fibEntry) if found && err == nil { fibTable := vd.FibTableEntries.FibTable @@ -343,7 +345,7 @@ func readXconnectFromDb(db keyval.ProtoBroker, vd *VppData, key string, name str fmt.Printf("WARNING: Invalid cross-connect Key '%s'\n", key) return vd, nil } - xc := &l2.XConnectPairs_XConnectPair{} + xc := &l2.XConnectPair{} found, rev, err := readDataFromDb(db, key, xc) if found && err == nil { vd.XConnectPairs[name] = @@ -353,7 +355,7 @@ func readXconnectFromDb(db keyval.ProtoBroker, vd *VppData, key string, name str } func readRoutesFromDb(db keyval.ProtoBroker, vd *VppData, key string) (*VppData, error) { - route := &l3.StaticRoutes_Route{} + route := &l3.Route{} found, rev, err := readDataFromDb(db, key, route) if found && err == nil { diff --git a/cmd/agentctl/utils/db_utils_test.go b/cmd/agentctl/utils/db_utils_test.go index 69cf673dfa..0ce0432497 100644 --- a/cmd/agentctl/utils/db_utils_test.go +++ b/cmd/agentctl/utils/db_utils_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "testing" @@ -120,3 +121,4 @@ func getInterfaceStatus() *interfaces.InterfacesState_Interface { }, } } +*/ diff --git a/cmd/agentctl/utils/interface_utils.go b/cmd/agentctl/utils/interface_utils.go index 99f14c9fc7..798ace51fe 100644 --- a/cmd/agentctl/utils/interface_utils.go +++ b/cmd/agentctl/utils/interface_utils.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" ) // Interface flag names @@ -43,14 +43,14 @@ const ( ) // WriteInterfaceToDb validates and writes interface to the etcd. -func WriteInterfaceToDb(db keyval.ProtoBroker, key string, ifc *interfaces.Interfaces_Interface) { +func WriteInterfaceToDb(db keyval.ProtoBroker, key string, ifc *interfaces.Interface) { validateInterface(ifc) db.Put(key, ifc) } // GetInterfaceKeyAndValue returns true if an interface with the specified name // was found together with the interface key, and data, and data broker. -func GetInterfaceKeyAndValue(endpoints []string, label string, ifName string) (bool, string, *interfaces.Interfaces_Interface, keyval.ProtoBroker) { +func GetInterfaceKeyAndValue(endpoints []string, label string, ifName string) (bool, string, *interfaces.Interface, keyval.ProtoBroker) { validateIfIdentifiers(label, ifName) db, err := GetDbForOneAgent(endpoints, label) if err != nil { @@ -58,7 +58,7 @@ func GetInterfaceKeyAndValue(endpoints []string, label string, ifName string) (b } key := interfaces.InterfaceKey(ifName) - ifc := &interfaces.Interfaces_Interface{} + ifc := &interfaces.Interface{} found, _, err := db.GetValue(key, ifc) if err != nil { @@ -143,7 +143,7 @@ func ValidateIpv6Addr(ipv6Addr string) bool { return match } -func validateInterface(ifc *interfaces.Interfaces_Interface) { +func validateInterface(ifc *interfaces.Interface) { fmt.Printf("Validating interface\n ifc: %+v\n", ifc) } diff --git a/cmd/agentctl/utils/l2_utils.go b/cmd/agentctl/utils/l2_utils.go index 0f2441ca8c..5d9f9bfe2e 100644 --- a/cmd/agentctl/utils/l2_utils.go +++ b/cmd/agentctl/utils/l2_utils.go @@ -14,6 +14,7 @@ package utils +/* import ( "errors" "fmt" @@ -103,3 +104,4 @@ func validateBdIdentifiers(label string, name string) { ExitWithError(ExitInvalidInput, errors.New("Missing bridge domain name")) } } +*/ diff --git a/cmd/agentctl/utils/print_json.go b/cmd/agentctl/utils/print_json.go index 84bc8cd6a3..64e4026631 100644 --- a/cmd/agentctl/utils/print_json.go +++ b/cmd/agentctl/utils/print_json.go @@ -16,16 +16,11 @@ package utils import ( "bytes" - "encoding/json" "fmt" - "sort" "strings" - - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + /*"github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/logrusorgru/aurora.git" -) + "github.com/ligato/vpp-agent/plugins/vpp/model/l3"*/) const ( // IfConfig labels used by json formatter @@ -62,7 +57,7 @@ func (ed EtcdDump) PrintDataAsJSON(filter []string) (*bytes.Buffer, error) { continue } - vd, _ := ed[key] + /*vd, _ := ed[key] // Obtain raw data. ifaceConfDataRoot, ifaceConfKeys := getInterfaceConfigData(vd.Interfaces) ifaceStateDataRoot, ifaceStateKeys := getInterfaceStateData(vd.Interfaces) @@ -126,7 +121,7 @@ func (ed EtcdDump) PrintDataAsJSON(filter []string) (*bytes.Buffer, error) { if string(jsL3FIBData) != emptyJSON { printLabel(buffer, key+": - "+L3FibConfig+"\n", indent, l3FibKeys) fmt.Fprintf(buffer, "%s\n", jsL3FIBData) - } + }*/ } @@ -168,6 +163,7 @@ func isNotInFilter(key string, filter []string) bool { return true } +/* // Get interface config data and create full interface config proto structure. func getInterfaceConfigData(interfaceData map[string]InterfaceWithMD) (*interfaces.Interfaces, []string) { // Config data @@ -290,3 +286,4 @@ func printLabel(buffer *bytes.Buffer, label string, prefix string, keyset []stri } fmt.Fprintf(buffer, ub) } +*/ diff --git a/cmd/agentctl/utils/print_json_test.go b/cmd/agentctl/utils/print_json_test.go index 5ea3fe4b17..6520e7be6e 100644 --- a/cmd/agentctl/utils/print_json_test.go +++ b/cmd/agentctl/utils/print_json_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "strings" "testing" @@ -131,3 +132,4 @@ func Test05PrintJsonEmptyBuffer(t *testing.T) { gomega.Expect(err).To(gomega.BeNil()) gomega.ContainSubstring("No data to display", result.String()) } +*/ diff --git a/cmd/agentctl/utils/print_table_test.go b/cmd/agentctl/utils/print_table_test.go index 0fc770fdc7..d8aa60fec0 100644 --- a/cmd/agentctl/utils/print_table_test.go +++ b/cmd/agentctl/utils/print_table_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "testing" @@ -214,3 +215,4 @@ func Test13TableActiveShortFlagWithFilter(t *testing.T) { utils.OutErrPkt, utils.Drop, utils.Ipv4Pkt, utils.Ipv6Pkt, utils.Punt) then.DoesNotContainItems(table.String(), "vpp-2", "vpp-3", "vpp-1-interface-2", "vpp-1-interface-3") } +*/ diff --git a/cmd/agentctl/utils/print_text.go b/cmd/agentctl/utils/print_text.go index 6319e6b74b..dce5548d31 100644 --- a/cmd/agentctl/utils/print_text.go +++ b/cmd/agentctl/utils/print_text.go @@ -24,7 +24,7 @@ import ( "bytes" "github.com/ligato/cn-infra/health/statuscheck/model/status" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" "github.com/logrusorgru/aurora.git" ) @@ -403,12 +403,12 @@ func setOsColor(arg status.OperationalState) string { } } -func setStsColor(kind string, arg interfaces.InterfacesState_Interface_Status) string { +func setStsColor(kind string, arg interfaces.InterfaceState_Status) string { sts := fmt.Sprintf("%s-%s", kind, arg) switch arg { - case interfaces.InterfacesState_Interface_UP: + case interfaces.InterfaceState_UP: return setGreen(sts) - case interfaces.InterfacesState_Interface_DOWN: + case interfaces.InterfaceState_DOWN: return setRed(sts) default: return sts diff --git a/cmd/agentctl/utils/print_text_test.go b/cmd/agentctl/utils/print_text_test.go index da35c3ef95..be1de5e526 100644 --- a/cmd/agentctl/utils/print_text_test.go +++ b/cmd/agentctl/utils/print_text_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "strconv" "strings" @@ -149,3 +150,4 @@ func Test03InterfaceStatsPrintText(t *testing.T) { gomega.Expect(strings.Count(treeOutput, flag)).To(gomega.BeEquivalentTo(6)) } } +*/ diff --git a/cmd/agentctl/utils/tree_writer_test.go b/cmd/agentctl/utils/tree_writer_test.go index a5f6226b35..a48f91120a 100644 --- a/cmd/agentctl/utils/tree_writer_test.go +++ b/cmd/agentctl/utils/tree_writer_test.go @@ -14,6 +14,7 @@ package utils_test +/* import ( "fmt" "testing" @@ -69,3 +70,4 @@ func getPrefix(level int) string { } return fmt.Sprintf("%d^@%s", level, prefix) } +*/ diff --git a/cmd/vpp-agent-ctl/README.md b/cmd/vpp-agent-ctl/README.md new file mode 100644 index 0000000000..e3928788b3 --- /dev/null +++ b/cmd/vpp-agent-ctl/README.md @@ -0,0 +1,67 @@ +#Vpp-agent-ctl + +The vpp-agent-ctl is testing/example utility which purpose is to store given key-value configuration to the ETCD database or read its content. The vpp-agent-ctl consists from two parts, basic crud commands and example data for every configuration type currently supported by the vpp-agent. + +The vpp-agent-ctl does not maintain ETCD connectivity, the link is established before every command execution and released after completion. + +## CRUD commands + +All those commands can be shown either calling binary without parameter, or with invalid parameter. + +**PUT** allows to store data in the ETCD. Put requires two parameters, key and value. The value is represented by .json file. Example json files are stored inside vpp-agent-ctl ([link to directory](json)) + +``` +vpp-agent-ctl -put +``` + +**GET** can be used to read configuration for given key. If the key does not exist, is not valid or is not set, command returns an empty value. + +``` +vpp-agent-ctl -get +``` + +**DEL** removes data from the ETCD, identified with provided key. + +``` +vpp-agent-ctl -del +``` + +**LIST** prints all keys currently present in the database. The command takes no parameter. + + ``` + vpp-agent-ctl -list + ``` + +**DUMP** returns all key-value pairs currently present in the database. The command takes no parameter. + +``` +vpp-agent-ctl -list +``` + +## Example pre-defined configurations + +For the quick testing or as a configuration example, the vpp-agent-ctl provides special commands for every available configuration type. Commands can be shown running `vpp-agent-ctl` without parameters. They are sorted per vpp-agent plugin always in pairs; one command to crate a configuration, the second one to remove it. + +Data put using command can be edited - all of them are available in [data package](data) separated in files according to plugins, with interface at the top so the desired configuration item can be easily found. Then, just edit the field(s) needed and `go build` the main file. Then, calling respective command will put the changed data. + +Example commands: + +1. To add access list with IP rules: + +``` +vpp-agent-clt -aclip +``` + +2. To add VxLAN interface + +``` +vpp-agent-ctl -vxlan +``` + +3. To delete TAP interface + +``` +vpp-agent-ctl -tapd +``` + +All the 'delete' cases are by default set to match with creating data (so every delete removes the data created by associated create command). \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/data/aclplugin.go b/cmd/vpp-agent-ctl/data/aclplugin.go new file mode 100644 index 0000000000..8f0924aff0 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/aclplugin.go @@ -0,0 +1,151 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + +// ACLCtl provides access list related methods for vpp-agent-ctl +type ACLCtl interface { + // PutIPAcl puts IPO access list config to the ETCD + PutIPAcl() error + // DeleteIPAcl removes IP access list config from the ETCD + DeleteIPAcl() error + // PutMACIPAcl puts MAC IP access list config to the ETCD + PutMACIPAcl() error + // DeleteMACIPAcl removes MAC IP access list config from the ETCD + DeleteMACIPAcl() error +} + +// PutIPAcl puts IPO access list config to the ETCD +func (ctl *VppAgentCtlImpl) PutIPAcl() error { + accessList := &acl.ACL{ + Name: "aclip1", + Rules: []*acl.ACL_Rule{ + // ACL IP rule + { + Action: acl.ACL_Rule_PERMIT, + IpRule: &acl.ACL_Rule_IpRule{ + Ip: &acl.ACL_Rule_IpRule_Ip{ + SourceNetwork: "192.168.1.1/32", + DestinationNetwork: "10.20.0.1/24", + }, + }, + }, + // ACL ICMP rule + { + Action: acl.ACL_Rule_PERMIT, + IpRule: &acl.ACL_Rule_IpRule{ + Icmp: &acl.ACL_Rule_IpRule_Icmp{ + Icmpv6: false, + IcmpCodeRange: &acl.ACL_Rule_IpRule_Icmp_Range{ + First: 150, + Last: 250, + }, + IcmpTypeRange: &acl.ACL_Rule_IpRule_Icmp_Range{ + First: 1150, + Last: 1250, + }, + }, + }, + }, + // ACL TCP rule + { + Action: acl.ACL_Rule_PERMIT, + IpRule: &acl.ACL_Rule_IpRule{ + Tcp: &acl.ACL_Rule_IpRule_Tcp{ + TcpFlagsMask: 20, + TcpFlagsValue: 10, + SourcePortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 150, + UpperPort: 250, + }, + DestinationPortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 1150, + UpperPort: 1250, + }, + }, + }, + }, + // ACL UDP rule + { + Action: acl.ACL_Rule_PERMIT, + IpRule: &acl.ACL_Rule_IpRule{ + Udp: &acl.ACL_Rule_IpRule_Udp{ + SourcePortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 150, + UpperPort: 250, + }, + DestinationPortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 1150, + UpperPort: 1250, + }, + }, + }, + }, + }, + Interfaces: &acl.ACL_Interfaces{ + Ingress: []string{"tap1", "tap2"}, + Egress: []string{"tap1", "tap2"}, + }, + } + + ctl.Log.Infof("Access list put: %v", accessList) + return ctl.broker.Put(acl.Key(accessList.Name), accessList) +} + +// DeleteIPAcl removes IP access list config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteIPAcl() error { + aclKey := acl.Key("aclip1") + + ctl.Log.Infof("Deleted acl: %v", aclKey) + _, err := ctl.broker.Delete(aclKey) + return err +} + +// PutMACIPAcl puts MAC IP access list config to the ETCD +func (ctl *VppAgentCtlImpl) PutMACIPAcl() error { + accessList := &acl.ACL{ + Name: "aclmac1", + // ACL rules + Rules: []*acl.ACL_Rule{ + // ACL MAC IP rule. Note: do not combine ACL ip and mac ip rules in single acl + { + Action: acl.ACL_Rule_PERMIT, + MacipRule: &acl.ACL_Rule_MacIpRule{ + SourceAddress: "192.168.0.1", + SourceAddressPrefix: uint32(16), + SourceMacAddress: "11:44:0A:B8:4A:35", + SourceMacAddressMask: "ff:ff:ff:ff:00:00", + }, + }, + }, + Interfaces: &acl.ACL_Interfaces{ + Ingress: []string{"tap1", "tap2"}, + Egress: []string{"tap1", "tap2"}, + }, + } + + ctl.Log.Infof("Access list put: %v", accessList) + return ctl.broker.Put(acl.Key(accessList.Name), accessList) +} + +// DeleteMACIPAcl removes MAC IP access list config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteMACIPAcl() error { + aclKey := acl.Key("aclmac1") + + ctl.Log.Infof("Deleted acl: %v", aclKey) + _, err := ctl.broker.Delete(aclKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/etcd.go b/cmd/vpp-agent-ctl/data/etcd.go new file mode 100644 index 0000000000..f05f445e65 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/etcd.go @@ -0,0 +1,192 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/datasync" + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/cn-infra/db/keyval/etcd" + "github.com/ligato/cn-infra/db/keyval/kvproto" +) + +// EtcdCtl provides ETCD crud methods for vpp-agent-ctl +type EtcdCtl interface { + // CreateEtcdClient creates a new connection to etcd + CreateEtcdClient(configFile string) (*etcd.BytesConnectionEtcd, keyval.ProtoBroker, error) + // ListAllAgentKeys prints all agent keys + ListAllAgentKeys() + // Put adds new data to etcd + Put(key string, file string) + // Del removes data from etcd + Del(key string) + // Get key value from the ETCD + Get(key string) + // Dump all values for given key prefix + Dump(key string) +} + +// ListAllAgentKeys prints all keys stored in the broker +func (ctl *VppAgentCtlImpl) ListAllAgentKeys() { + ctl.Log.Debug("listAllAgentKeys") + + it, err := ctl.bytesConnection.ListKeys(ctl.serviceLabel.GetAllAgentsPrefix()) + if err != nil { + ctl.Log.Error(err) + } + for { + key, _, stop := it.GetNext() + if stop { + break + } + ctl.Log.Infof("key: %s", key) + } +} + +// CreateEtcdClient uses environment variable or ETCD config file to establish connection +func (ctl *VppAgentCtlImpl) CreateEtcdClient(configFile string) (*etcd.BytesConnectionEtcd, keyval.ProtoBroker, error) { + var err error + + if configFile == "" { + configFile = os.Getenv("ETCD_CONFIG") + } + + cfg := &etcd.Config{} + if configFile != "" { + err = config.ParseConfigFromYamlFile(configFile, cfg) + if err != nil { + return nil, nil, err + } + } + etcdConfig, err := etcd.ConfigToClient(cfg) + if err != nil { + ctl.Log.Fatal(err) + } + + bDB, err := etcd.NewEtcdConnectionWithBytes(*etcdConfig, ctl.Log) + if err != nil { + return nil, nil, err + } + + return bDB, kvproto.NewProtoWrapperWithSerializer(bDB, &keyval.SerializerJSON{}). + NewBroker(ctl.serviceLabel.GetAgentPrefix()), nil +} + +// Get uses ETCD connection to get value for specific key +func (ctl *VppAgentCtlImpl) Get(key string) { + ctl.Log.Debug("GET ", key) + + data, found, _, err := ctl.bytesConnection.GetValue(key) + if err != nil { + ctl.Log.Error(err) + return + } + if !found { + ctl.Log.Debug("No value found for the key", key) + } + ctl.Log.Println(string(data)) +} + +// Put stores key/data value +func (ctl *VppAgentCtlImpl) Put(key string, file string) { + input, err := ctl.readData(file) + if err != nil { + ctl.Log.Fatal(err) + } + + ctl.Log.Println("DB putting ", key, " ", string(input)) + + err = ctl.bytesConnection.Put(key, input) + if err != nil { + ctl.Log.Panic("error putting the data ", key, " that to DB from ", file, ", err: ", err) + } + ctl.Log.Println("DB put successful ", key, " ", file) +} + +// Del removes data under provided key +func (ctl *VppAgentCtlImpl) Del(key string) { + ctl.Log.Debug("DEL ", key) + + found, err := ctl.bytesConnection.Delete(key, datasync.WithPrefix()) + if err != nil { + ctl.Log.Error(err) + return + } + if found { + ctl.Log.Debug("Data deleted:", key) + } else { + ctl.Log.Debug("No value found for the key", key) + } +} + +// Dump lists values under key. If no key is provided, all data is read. +func (ctl *VppAgentCtlImpl) Dump(key string) { + ctl.Log.Debug("DUMP ", key) + + data, err := ctl.bytesConnection.ListValues(key) + if err != nil { + ctl.Log.Error(err) + return + } + + var found bool + for { + kv, stop := data.GetNext() + if stop { + break + } + ctl.Log.Println(kv.GetKey()) + ctl.Log.Println(string(kv.GetValue())) + ctl.Log.Println() + found = true + } + if !found { + ctl.Log.Debug("No value found for the key", key) + } +} + +func (ctl *VppAgentCtlImpl) readData(file string) ([]byte, error) { + var input []byte + var err error + + if file == "-" { + // read JSON from STDIN + bio := bufio.NewReader(os.Stdin) + buf := new(bytes.Buffer) + if _, err = buf.ReadFrom(bio); err != nil { + ctl.Log.Errorf("error reading json: %v", err) + } + input = buf.Bytes() + } else { + // read JSON from file + input, err = ioutil.ReadFile(file) + if err != nil { + ctl.Log.Panic("error reading the data that needs to be written to DB from ", file, ", err: ", err) + } + } + + // validate the JSON + var js map[string]interface{} + if json.Unmarshal(input, &js) != nil { + ctl.Log.Panic("Not a valid JSON: ", string(input)) + } + return input, err +} diff --git a/cmd/vpp-agent-ctl/data/ifplugin.go b/cmd/vpp-agent-ctl/data/ifplugin.go new file mode 100644 index 0000000000..2e0f1600a2 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/ifplugin.go @@ -0,0 +1,381 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + linuxIf "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/namespace" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" +) + +// InterfacesCtl interface plugin related methods for vpp-agent-ctl (interfaces including linux ones) +type InterfacesCtl interface { + // PutPhysicalInterface puts ethernet type interface config to the ETCD + PutDPDKInterface() error + // DeleteDPDKInterface removes ethernet type interface config from the ETCD + DeleteDPDKInterface() error + // PutTap puts TAP type interface config to the ETCD + PutTap() error + // DeleteTap removes TAP type interface config from the ETCD + DeleteTap() error + // PutLoopback puts loopback type interface config to the ETCD + PutLoopback() error + // DeleteLoopback removes loopback type interface config from the ETCD + DeleteLoopback() error + // PutMemoryInterface puts memory type interface config to the ETCD + PutMemoryInterface() error + // DeleteMemoryInterface removes memory type interface config from the ETCD + DeleteMemoryInterface() error + // PutVxLan puts VxLAN type interface config to the ETCD + PutVxLan() error + // DeleteVxLan removes VxLAN type interface config from the ETCD + DeleteVxLan() error + // PutAfPacket puts Af-packet type interface config to the ETCD + PutAfPacket() error + // DeleteAfPacket removes AF-Packet type interface config from the ETCD + DeleteAfPacket() error + // PutIPSecTunnelInterface configures IPSec tunnel interface + PutIPSecTunnelInterface() error + // DeleteIPSecTunnelInterface removes IPSec tunnel interface + DeleteIPSecTunnelInterface() error + // PutVEthPair puts two VETH type interfaces to the ETCD + PutVEthPair() error + // DeleteVEthPair removes VETH pair interfaces from the ETCD + DeleteVEthPair() error + // PutLinuxTap puts linux TAP type interface configuration to the ETCD + PutLinuxTap() error + // DeleteLinuxTap removes linux TAP type interface configuration from the ETCD + DeleteLinuxTap() error +} + +// PutDPDKInterface puts ethernet type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutDPDKInterface() error { + ethernet := &interfaces.Interface{ + Name: "GigabitEthernet0/8/0", + Type: interfaces.Interface_DPDK, + Enabled: true, + IpAddresses: []string{ + "192.168.1.1", + "2001:db8:0:0:0:ff00:5168:2bc8/48", + }, + } + + ctl.Log.Infof("Interface put: %v", ethernet) + return ctl.broker.Put(interfaces.InterfaceKey(ethernet.Name), ethernet) +} + +// DeleteDPDKInterface removes ethernet type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteDPDKInterface() error { + ethernetKey := interfaces.InterfaceKey("GigabitEthernet0/8/0") + + ctl.Log.Infof("Interface delete: %v", ethernetKey) + _, err := ctl.broker.Delete(ethernetKey) + return err +} + +// PutTap puts TAP type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutTap() error { + tap := &interfaces.Interface{ + Name: "tap1", + Type: interfaces.Interface_TAP, + Enabled: true, + PhysAddress: "12:E4:0E:D5:BC:DC", + IpAddresses: []string{ + "192.168.20.3/24", + }, + Link: &interfaces.Interface_Tap{ + Tap: &interfaces.TapLink{ + HostIfName: "tap-host", + }, + }, + } + + ctl.Log.Infof("Interface put: %v", tap) + return ctl.broker.Put(interfaces.InterfaceKey(tap.Name), tap) +} + +// DeleteTap removes TAP type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteTap() error { + tapKey := interfaces.InterfaceKey("tap1") + + ctl.Log.Infof("Interface delete: %v", tapKey) + _, err := ctl.broker.Delete(tapKey) + return err +} + +// PutLoopback puts loopback type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutLoopback() error { + loopback := &interfaces.Interface{ + Name: "loop1", + Type: interfaces.Interface_SOFTWARE_LOOPBACK, + Enabled: true, + PhysAddress: "7C:4E:E7:8A:63:68", + Mtu: 1478, + IpAddresses: []string{ + "192.168.25.3/24", + "172.125.45.1/24", + }, + } + + ctl.Log.Infof("Interface put: %v", loopback) + return ctl.broker.Put(interfaces.InterfaceKey(loopback.Name), loopback) +} + +// DeleteLoopback removes loopback type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteLoopback() error { + loopbackKey := interfaces.InterfaceKey("loop1") + + ctl.Log.Infof("Interface delete: %v", loopbackKey) + _, err := ctl.broker.Delete(loopbackKey) + return err +} + +// PutMemoryInterface puts memif type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutMemoryInterface() error { + memif := &interfaces.Interface{ + Name: "memif1", + Type: interfaces.Interface_MEMIF, + Enabled: true, + PhysAddress: "4E:93:2A:38:A7:77", + Mtu: 1478, + IpAddresses: []string{ + "172.125.40.1/24", + }, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Secret: "secret", + Master: true, + SocketFilename: "/tmp/memif1.sock", + }, + }, + } + + ctl.Log.Infof("Interface put: %v", memif) + return ctl.broker.Put(interfaces.InterfaceKey(memif.Name), memif) +} + +// DeleteMemoryInterface removes memif type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteMemoryInterface() error { + memifKey := interfaces.InterfaceKey("memif1") + + ctl.Log.Infof("Interface delete: %v", memifKey) + _, err := ctl.broker.Delete(memifKey) + return err +} + +// PutVxLan puts VxLAN type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutVxLan() error { + vxlan := &interfaces.Interface{ + + Name: "vxlan1", + Type: interfaces.Interface_VXLAN_TUNNEL, + Enabled: true, + IpAddresses: []string{ + "172.125.40.1/24", + }, + Link: &interfaces.Interface_Vxlan{ + Vxlan: &interfaces.VxlanLink{ + SrcAddress: "192.168.42.1", + DstAddress: "192.168.42.2", + Vni: 13, + }, + }, + } + + ctl.Log.Infof("Interface put: %v", vxlan) + return ctl.broker.Put(interfaces.InterfaceKey(vxlan.Name), vxlan) +} + +// DeleteVxLan removes VxLAN type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteVxLan() error { + vxlanKey := interfaces.InterfaceKey("vxlan1") + + ctl.Log.Infof("Interface delete: %v", vxlanKey) + _, err := ctl.broker.Delete(vxlanKey) + return err +} + +// PutAfPacket puts Af-packet type interface config to the ETCD +func (ctl *VppAgentCtlImpl) PutAfPacket() error { + afPacket := &interfaces.Interface{ + Name: "afpacket1", + Type: interfaces.Interface_AF_PACKET, + Enabled: true, + Mtu: 1500, + IpAddresses: []string{ + "172.125.40.1/24", + "192.168.12.1/24", + "fd21:7408:186f::/48", + }, + Link: &interfaces.Interface_Afpacket{ + Afpacket: &interfaces.AfpacketLink{ + HostIfName: "lo", + }, + }, + } + + ctl.Log.Infof("Interface put: %v", afPacket) + return ctl.broker.Put(interfaces.InterfaceKey(afPacket.Name), afPacket) +} + +// DeleteAfPacket removes AF-Packet type interface config from the ETCD +func (ctl *VppAgentCtlImpl) DeleteAfPacket() error { + afPacketKey := interfaces.InterfaceKey("afpacket1") + + ctl.Log.Infof("Interface delete: %v", afPacketKey) + _, err := ctl.broker.Delete(afPacketKey) + return err +} + +// PutIPSecTunnelInterface configures IPSec tunnel interface +func (ctl *VppAgentCtlImpl) PutIPSecTunnelInterface() error { + tunnelIf := &interfaces.Interface{ + Name: "ipsec0", + Enabled: true, + IpAddresses: []string{"20.0.0.0/24"}, + Vrf: 0, + Type: interfaces.Interface_IPSEC_TUNNEL, + Link: &interfaces.Interface_Ipsec{ + Ipsec: &interfaces.IPSecLink{ + Esn: false, + AntiReplay: false, + LocalSpi: 1000, + RemoteSpi: 1001, + LocalIp: "10.0.0.2", + RemoteIp: "10.0.0.1", + CryptoAlg: 1, + LocalCryptoKey: "4a506a794f574265564551694d653768", + RemoteCryptoKey: "4a506a794f574265564551694d653768", + IntegAlg: 2, + LocalIntegKey: "4339314b55523947594d6d3547666b45764e6a58", + RemoteIntegKey: "4339314b55523947594d6d3547666b45764e6a58", + EnableUdpEncap: true, + }, + }, + } + ctl.Log.Infof("Interface put: %v", tunnelIf) + return ctl.broker.Put(interfaces.InterfaceKey(tunnelIf.Name), tunnelIf) +} + +// DeleteIPSecTunnelInterface removes IPSec tunnel interface +func (ctl *VppAgentCtlImpl) DeleteIPSecTunnelInterface() error { + tunnelKey := interfaces.InterfaceKey("ipsec0") + + ctl.Log.Infof("Interface delete: %v", tunnelKey) + _, err := ctl.broker.Delete(tunnelKey) + return err +} + +// PutVEthPair puts two VETH type interfaces to the ETCD +func (ctl *VppAgentCtlImpl) PutVEthPair() error { + // Note: VETH interfaces are created in pairs + veth1 := &linuxIf.Interface{ + Name: "veth1", + Type: linuxIf.Interface_VETH, + Enabled: true, + PhysAddress: "D2:74:8C:12:67:D2", + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns1", + Type: linux_namespace.NetNamespace_NSID, + }, + Mtu: 1500, + IpAddresses: []string{ + "192.168.22.1/24", + "10.0.2.2/24", + }, + Link: &linuxIf.Interface_Veth{ + Veth: &linuxIf.VethLink{ + PeerIfName: "veth2", + }, + }, + } + + veth2 := &linuxIf.Interface{ + Name: "veth2", + Type: linuxIf.Interface_VETH, + Enabled: true, + PhysAddress: "92:C7:42:67:AB:CD", + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns2", + Type: linux_namespace.NetNamespace_NSID, + }, + Mtu: 1500, + IpAddresses: []string{ + "192.168.22.5/24", + }, + Link: &linuxIf.Interface_Veth{ + Veth: &linuxIf.VethLink{ + PeerIfName: "veth1", + }, + }, + } + + ctl.Log.Infof("Interfaces put: %v, v%", veth1, veth2) + if err := ctl.broker.Put(linuxIf.InterfaceKey(veth1.Name), veth1); err != nil { + return err + } + return ctl.broker.Put(linuxIf.InterfaceKey(veth2.Name), veth2) +} + +// DeleteVEthPair removes VETH pair interfaces from the ETCD +func (ctl *VppAgentCtlImpl) DeleteVEthPair() error { + veth1Key := linuxIf.InterfaceKey("veth1") + veth2Key := linuxIf.InterfaceKey("veth2") + + ctl.Log.Infof("Interface delete: %v", veth1Key) + if _, err := ctl.broker.Delete(veth1Key); err != nil { + return err + } + ctl.Log.Infof("Interface delete: %v", veth2Key) + _, err := ctl.broker.Delete(veth2Key) + return err +} + +// PutLinuxTap puts linux TAP type interface configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutLinuxTap() error { + linuxTap := &linuxIf.Interface{ + Name: "tap1", + HostIfName: "tap-host", + Type: linuxIf.Interface_TAP_TO_VPP, + Enabled: true, + PhysAddress: "BC:FE:E9:5E:07:04", + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns2", + Type: linux_namespace.NetNamespace_NSID, + }, + Mtu: 1500, + IpAddresses: []string{ + "172.52.45.127/24", + }, + Link: &linuxIf.Interface_Tap{ + Tap: &linuxIf.TapLink{ + VppTapIfName: "tap-host", + }, + }, + } + + ctl.Log.Println(linuxTap) + return ctl.broker.Put(linuxIf.InterfaceKey(linuxTap.Name), linuxTap) +} + +// DeleteLinuxTap removes linux TAP type interface configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteLinuxTap() error { + linuxTapKey := linuxIf.InterfaceKey("tap1") + + ctl.Log.Println("Deleting", linuxTapKey) + _, err := ctl.broker.Delete(linuxTapKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/ipsecplugin.go b/cmd/vpp-agent-ctl/data/ipsecplugin.go new file mode 100644 index 0000000000..bfd6c6e2b1 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/ipsecplugin.go @@ -0,0 +1,123 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ipsec "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + +// IPSecCtl IPSec plugin related methods for vpp-agent-ctl (SPD, SA) +type IPSecCtl interface { + // PutIPSecSPD puts STD configuration to the ETCD + PutIPSecSPD() error + // DeleteIPSecSPD removes STD configuration from the ETCD + DeleteIPSecSPD() error + // PutIPSecSA puts two security association configurations to the ETCD + PutIPSecSA() error + // DeleteIPSecSA removes SA configuration from the ETCD + DeleteIPSecSA() error +} + +// PutIPSecSPD puts STD configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutIPSecSPD() error { + spd := ipsec.SecurityPolicyDatabase{ + Index: "1", + Interfaces: []*ipsec.SecurityPolicyDatabase_Interface{ + { + Name: "tap1", + }, + { + Name: "loop1", + }, + }, + PolicyEntries: []*ipsec.SecurityPolicyDatabase_PolicyEntry{ + { + Priority: 10, + IsOutbound: false, + RemoteAddrStart: "10.0.0.1", + RemoteAddrStop: "10.0.0.1", + LocalAddrStart: "10.0.0.2", + LocalAddrStop: "10.0.0.2", + Action: 3, + SaIndex: "1", + }, + { + Priority: 10, + IsOutbound: true, + RemoteAddrStart: "10.0.0.1", + RemoteAddrStop: "10.0.0.1", + LocalAddrStart: "10.0.0.2", + LocalAddrStop: "10.0.0.2", + Action: 3, + SaIndex: "2", + }, + }, + } + + ctl.Log.Infof("IPSec SPD put: %v", spd.Index) + return ctl.broker.Put(ipsec.SPDKey(spd.Index), &spd) +} + +// DeleteIPSecSPD removes STD configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteIPSecSPD() error { + spdKey := ipsec.SPDKey("1") + + ctl.Log.Infof("IPSec SPD delete: %v", spdKey) + _, err := ctl.broker.Delete(spdKey) + return err +} + +// PutIPSecSA puts two security association configurations to the ETCD +func (ctl *VppAgentCtlImpl) PutIPSecSA() error { + sa1 := ipsec.SecurityAssociation{ + Index: "1", + Spi: 1001, + Protocol: 1, + CryptoAlg: 1, + CryptoKey: "4a506a794f574265564551694d653768", + IntegAlg: 2, + IntegKey: "4339314b55523947594d6d3547666b45764e6a58", + EnableUdpEncap: true, + } + sa2 := ipsec.SecurityAssociation{ + Index: "2", + Spi: 1000, + Protocol: 1, + CryptoAlg: 1, + CryptoKey: "4a506a794f574265564551694d653768", + IntegAlg: 2, + IntegKey: "4339314b55523947594d6d3547666b45764e6a58", + EnableUdpEncap: false, + } + + ctl.Log.Infof("IPSec SA put: %v", sa1.Index) + if err := ctl.broker.Put(ipsec.SAKey(sa1.Index), &sa1); err != nil { + return err + } + ctl.Log.Infof("IPSec SA put: %v", sa2.Index) + return ctl.broker.Put(ipsec.SAKey(sa2.Index), &sa2) +} + +// DeleteIPSecSA removes SA configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteIPSecSA() error { + saKey1 := ipsec.SAKey("1") + saKey2 := ipsec.SAKey("2") + + ctl.Log.Infof("IPSec SA delete: %v", saKey1) + if _, err := ctl.broker.Delete(saKey1); err != nil { + return err + } + ctl.Log.Infof("IPSec SA delete: %v", saKey2) + _, err := ctl.broker.Delete(saKey2) + return err +} diff --git a/cmd/vpp-agent-ctl/data/l2plugin.go b/cmd/vpp-agent-ctl/data/l2plugin.go new file mode 100644 index 0000000000..f9d364d104 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/l2plugin.go @@ -0,0 +1,126 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + +// L2Ctl L2 plugin related methods for vpp-agent-ctl (bridge domains, FIBs, L2 cross connects) +type L2Ctl interface { + // PutBridgeDomain puts L2 bridge domain configuration to the ETCD + PutBridgeDomain() error + // DeleteBridgeDomain removes bridge domain configuration from the ETCD + DeleteBridgeDomain() error + // PutFib puts L2 FIB entry configuration to the ETCD + PutFib() error + // DeleteFib removes FIB entry configuration from the ETCD + DeleteFib() error + // PutXConn puts L2 cross connect configuration to the ETCD + PutXConn() error + // DeleteXConn removes cross connect configuration from the ETCD + DeleteXConn() error +} + +// PutBridgeDomain puts L2 bridge domain configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutBridgeDomain() error { + bd := &l2.BridgeDomain{ + Name: "bd1", + Learn: true, + ArpTermination: true, + Flood: true, + UnknownUnicastFlood: true, + Forward: true, + MacAge: 0, + Interfaces: []*l2.BridgeDomain_Interface{ + { + Name: "loop1", + BridgedVirtualInterface: true, + SplitHorizonGroup: 0, + }, + { + Name: "tap1", + BridgedVirtualInterface: false, + SplitHorizonGroup: 1, + }, + { + Name: "memif1", + BridgedVirtualInterface: false, + SplitHorizonGroup: 2, + }, + }, + ArpTerminationTable: []*l2.BridgeDomain_ArpTerminationEntry{ + { + IpAddress: "192.168.50.20", + PhysAddress: "A7:5D:44:D8:E6:51", + }, + }, + } + + ctl.Log.Infof("Bridge domain put: %v", bd) + return ctl.broker.Put(l2.BridgeDomainKey(bd.Name), bd) +} + +// DeleteBridgeDomain removes bridge domain configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteBridgeDomain() error { + bdKey := l2.BridgeDomainKey("bd1") + + ctl.Log.Infof("Bridge domain delete: %v", bdKey) + _, err := ctl.broker.Delete(bdKey) + return err +} + +// PutFib puts L2 FIB entry configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutFib() error { + fib := &l2.FIBEntry{ + PhysAddress: "EA:FE:3C:64:A7:44", + BridgeDomain: "bd1", + OutgoingInterface: "loop1", + StaticConfig: true, + BridgedVirtualInterface: true, + Action: l2.FIBEntry_FORWARD, // or DROP + + } + + ctl.Log.Infof("FIB put: %v", fib) + return ctl.broker.Put(l2.FIBKey(fib.BridgeDomain, fib.PhysAddress), fib) +} + +// DeleteFib removes FIB entry configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteFib() error { + fibKey := l2.FIBKey("bd1", "EA:FE:3C:64:A7:44") + + ctl.Log.Infof("FIB delete: %v", fibKey) + _, err := ctl.broker.Delete(fibKey) + return err +} + +// PutXConn puts L2 cross connect configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutXConn() error { + xc := &l2.XConnectPair{ + ReceiveInterface: "tap1", + TransmitInterface: "loop1", + } + + ctl.Log.Infof("FIB put: %v", xc) + return ctl.broker.Put(l2.XConnectKey(xc.ReceiveInterface), xc) +} + +// DeleteXConn removes cross connect configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteXConn() error { + xcKey := l2.XConnectKey("loop1") + + ctl.Log.Infof("FIB delete: %v", xcKey) + _, err := ctl.broker.Delete(xcKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/l3plugins.go b/cmd/vpp-agent-ctl/data/l3plugins.go new file mode 100644 index 0000000000..0d9cf1e572 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/l3plugins.go @@ -0,0 +1,265 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + linuxL3 "github.com/ligato/vpp-agent/api/models/linux/l3" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" +) + +// L3Ctl L3 plugin related methods for vpp-agent-ctl (including linux) +type L3Ctl interface { + // PutRoute puts VPP route configuration to the ETCD + PutRoute() error + // DeleteRoute removes VPP route configuration from the ETCD + DeleteRoute() error + // PutInterVrfRoute puts inter-VRF VPP route configuration to the ETCD + PutInterVrfRoute() error + // DeleteRoute removes VPP route configuration from the ETCD + DeleteInterVrfRoute() error + // PutInterVrfRoute puts inter-VRF VPP route configuration with next hop to the ETCD + PutNextHopRoute() error + // DeleteNextHopRoute removes VPP route configuration from the ETCD + DeleteNextHopRoute() error + // PutLinuxRoute puts linux route configuration to the ETCD + PutLinuxRoute() error + // DeleteLinuxRoute removes linux route configuration from the ETCD + DeleteLinuxRoute() error + // PutLinuxDefaultRoute puts linux default route configuration to the ETCD + PutLinuxDefaultRoute() error + // DeleteLinuxDefaultRoute removes linux default route configuration from the ETCD + DeleteLinuxDefaultRoute() error + // PutArp puts VPP ARP entry configuration to the ETCD + PutArp() error + // PutArp puts VPP ARP entry configuration to the ETCD + DeleteArp() error + // PutProxyArp puts VPP proxy ARP configuration to the ETCD + PutProxyArp() error + // DeleteProxyArp removes VPP proxy ARP configuration from the ETCD + DeleteProxyArp() error + // SetIPScanNeigh puts VPP IP scan neighbor configuration to the ETCD + SetIPScanNeigh() error + // UnsetIPScanNeigh removes VPP IP scan neighbor configuration from the ETCD + UnsetIPScanNeigh() error + // CreateLinuxArp puts linux ARP entry configuration to the ETCD + PutLinuxArp() error + // DeleteLinuxArp removes Linux ARP entry configuration from the ETCD + DeleteLinuxArp() error +} + +// PutRoute puts VPP route configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutRoute() error { + route := &l3.Route{ + VrfId: 0, + DstNetwork: "10.1.1.3/32", + NextHopAddr: "192.168.1.13", + Weight: 6, + OutgoingInterface: "tap1", + } + + ctl.Log.Infof("Route put: %v", route) + return ctl.broker.Put(l3.RouteKey(route.VrfId, route.DstNetwork, route.NextHopAddr), route) +} + +// DeleteRoute removes VPP route configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteRoute() error { + routeKey := l3.RouteKey(0, "10.1.1.3/32", "192.168.1.13") + + ctl.Log.Infof("Route delete: %v", routeKey) + _, err := ctl.broker.Delete(routeKey) + return err +} + +// PutInterVrfRoute puts inter-VRF VPP route configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutInterVrfRoute() error { + route := &l3.Route{ + Type: l3.Route_INTER_VRF, + VrfId: 0, + DstNetwork: "1.2.3.4/32", + ViaVrfId: 1, + } + + ctl.Log.Infof("Route put: %v", route) + return ctl.broker.Put(l3.RouteKey(route.VrfId, route.DstNetwork, route.NextHopAddr), route) +} + +// DeleteInterVrfRoute removes VPP route configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteInterVrfRoute() error { + routeKey := l3.RouteKey(0, "1.2.3.4/32", "") + + ctl.Log.Infof("Route delete: %v", routeKey) + _, err := ctl.broker.Delete(routeKey) + return err +} + +// PutNextHopRoute puts inter-VRF VPP route configuration with next hop to the ETCD +func (ctl *VppAgentCtlImpl) PutNextHopRoute() error { + route := &l3.Route{ + Type: l3.Route_INTER_VRF, + VrfId: 1, + DstNetwork: "10.1.1.3/32", + NextHopAddr: "192.168.1.13", + ViaVrfId: 0, + } + + ctl.Log.Infof("Route put: %v", route) + return ctl.broker.Put(l3.RouteKey(route.VrfId, route.DstNetwork, route.NextHopAddr), route) +} + +// DeleteNextHopRoute removes VPP route configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteNextHopRoute() error { + routeKey := l3.RouteKey(1, "10.1.1.3/32", "192.168.1.13") + + ctl.Log.Infof("Route delete: %v", routeKey) + _, err := ctl.broker.Delete(routeKey) + return err +} + +// PutLinuxRoute puts linux route configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutLinuxRoute() error { + linuxRoute := &linuxL3.Route{ + DstNetwork: "10.0.2.0/24", + OutgoingInterface: "veth1", + Metric: 100, + } + + ctl.Log.Infof("Route put: %v", linuxRoute) + return ctl.broker.Put(linuxL3.RouteKey(linuxRoute.DstNetwork, linuxRoute.OutgoingInterface), linuxRoute) +} + +// DeleteLinuxRoute removes linux route configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteLinuxRoute() error { + linuxRouteKey := linuxL3.RouteKey("10.0.2.0/24", "veth1") + + ctl.Log.Println("Linux route delete: %v", linuxRouteKey) + _, err := ctl.broker.Delete(linuxRouteKey) + return err +} + +// PutLinuxDefaultRoute puts linux default route configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutLinuxDefaultRoute() error { + linuxRoute := &linuxL3.Route{ + GwAddr: "10.0.2.2", + OutgoingInterface: "veth1", + Metric: 100, + } + + ctl.Log.Infof("Linux default route put: %v", linuxRoute) + return ctl.broker.Put(linuxL3.RouteKey(linuxRoute.DstNetwork, linuxRoute.OutgoingInterface), linuxRoute) +} + +// DeleteLinuxDefaultRoute removes linux default route configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteLinuxDefaultRoute() error { + linuxRouteKey := linuxL3.RouteKey("0.0.0.0/32", "veth1") + + ctl.Log.Info("Linux route delete: %v", linuxRouteKey) + _, err := ctl.broker.Delete(linuxRouteKey) + return err +} + +// PutArp puts VPP ARP entry configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutArp() error { + arp := &l3.ARPEntry{ + Interface: "tap1", + IpAddress: "192.168.10.21", + PhysAddress: "59:6C:45:59:8E:BD", + Static: true, + } + + ctl.Log.Infof("ARP put: %v", arp) + return ctl.broker.Put(l3.ArpEntryKey(arp.Interface, arp.IpAddress), arp) +} + +// DeleteArp removes VPP ARP entry configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteArp() error { + arpKey := l3.ArpEntryKey("tap1", "192.168.10.21") + + ctl.Log.Info("Linux route delete: %v", arpKey) + _, err := ctl.broker.Delete(arpKey) + return err +} + +// PutProxyArp puts VPP proxy ARP configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutProxyArp() error { + proxyArp := &l3.ProxyARP{ + Interfaces: []*l3.ProxyARP_Interface{ + { + Name: "tap1", + }, + { + Name: "tap2", + }, + }, + Ranges: []*l3.ProxyARP_Range{ + { + FirstIpAddr: "10.0.0.1", + LastIpAddr: "10.0.0.3", + }, + }, + } + + ctl.Log.Infof("Proxy ARP put: %v", proxyArp) + return ctl.broker.Put(l3.ProxyARPKey(), proxyArp) +} + +// DeleteProxyArp removes VPP proxy ARPconfiguration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteProxyArp() error { + ctl.Log.Info("Proxy ARP deleted") + _, err := ctl.broker.Delete(l3.ProxyARPKey()) + return err +} + +// SetIPScanNeigh puts VPP IP scan neighbor configuration to the ETCD +func (ctl *VppAgentCtlImpl) SetIPScanNeigh() error { + ipScanNeigh := &l3.IPScanNeighbor{ + Mode: l3.IPScanNeighbor_BOTH, + ScanInterval: 11, + MaxProcTime: 36, + MaxUpdate: 5, + ScanIntDelay: 16, + StaleThreshold: 26, + } + + ctl.Log.Info("IP scan neighbor set") + return ctl.broker.Put(l3.IPScanNeighborKey(), ipScanNeigh) +} + +// UnsetIPScanNeigh removes VPP IP scan neighbor configuration from the ETCD +func (ctl *VppAgentCtlImpl) UnsetIPScanNeigh() error { + ctl.Log.Info("IP scan neighbor unset") + _, err := ctl.broker.Delete(l3.IPScanNeighborKey()) + return err +} + +// PutLinuxArp puts linux ARP entry configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutLinuxArp() error { + linuxArp := &linuxL3.ARPEntry{ + Interface: "veth1", + IpAddress: "130.0.0.1", + HwAddress: "46:06:18:DB:05:3A", + } + + ctl.Log.Info("Linux ARP put: %v", linuxArp) + return ctl.broker.Put(linuxL3.ArpKey(linuxArp.Interface, linuxArp.IpAddress), linuxArp) +} + +// DeleteLinuxArp removes Linux ARP entry configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteLinuxArp() error { + linuxArpKey := linuxL3.ArpKey("veth1", "130.0.0.1") + + ctl.Log.Info("Linux ARP delete: %v", linuxArpKey) + _, err := ctl.broker.Delete(linuxArpKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/natplugin.go b/cmd/vpp-agent-ctl/data/natplugin.go new file mode 100644 index 0000000000..aeeebe7add --- /dev/null +++ b/cmd/vpp-agent-ctl/data/natplugin.go @@ -0,0 +1,136 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + +// NatCtl NAT plugin related methods for vpp-agent-ctl +type NatCtl interface { + // PutGlobalNat puts global NAT44 configuration to the ETCD + PutGlobalNat() error + // DeleteGlobalNat removes global NAT configuration from the ETCD + DeleteGlobalNat() error + // PutDNat puts DNAT configuration to the ETCD + PutDNat() error + // DeleteDNat removes DNAT configuration from the ETCD + DeleteDNat() error +} + +// PutGlobalNat puts global NAT44 configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutGlobalNat() error { + natGlobal := &nat.Nat44Global{ + Forwarding: false, + NatInterfaces: []*nat.Nat44Global_Interface{ + { + Name: "tap1", + IsInside: false, + OutputFeature: false, + }, + { + Name: "tap2", + IsInside: false, + OutputFeature: true, + }, + { + Name: "tap3", + IsInside: true, + OutputFeature: false, + }, + }, + AddressPool: []*nat.Nat44Global_Address{ + { + VrfId: 0, + Address: "192.168.0.1", + TwiceNat: false, + }, + { + VrfId: 0, + Address: "175.124.0.1", + TwiceNat: false, + }, + { + VrfId: 0, + Address: "10.10.0.1", + TwiceNat: false, + }, + }, + VirtualReassembly: &nat.VirtualReassembly{ + Timeout: 10, + MaxReassemblies: 20, + MaxFragments: 10, + DropFragments: true, + }, + } + + ctl.Log.Info("Global NAT put") + return ctl.broker.Put(nat.GlobalNAT44Key(), natGlobal) +} + +// DeleteGlobalNat removes global NAT configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteGlobalNat() error { + ctl.Log.Info("Global NAT delete") + _, err := ctl.broker.Delete(nat.GlobalNAT44Key()) + return err +} + +// PutDNat puts DNAT configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutDNat() error { + dNat := &nat.DNat44{ + Label: "dnat1", + StMappings: []*nat.DNat44_StaticMapping{ + { + ExternalInterface: "tap1", + ExternalIp: "192.168.0.1", + ExternalPort: 8989, + LocalIps: []*nat.DNat44_StaticMapping_LocalIP{ + { + VrfId: 0, + LocalIp: "172.124.0.2", + LocalPort: 6500, + Probability: 40, + }, + { + VrfId: 0, + LocalIp: "172.125.10.5", + LocalPort: 2300, + Probability: 40, + }, + }, + Protocol: 1, + TwiceNat: nat.DNat44_StaticMapping_ENABLED, + }, + }, + IdMappings: []*nat.DNat44_IdentityMapping{ + { + VrfId: 0, + IpAddress: "10.10.0.1", + Port: 2525, + Protocol: 0, + }, + }, + } + + ctl.Log.Info("DNAT put: %v", dNat.Label) + return ctl.broker.Put(nat.DNAT44Key(dNat.Label), dNat) +} + +// DeleteDNat removes DNAT configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteDNat() error { + dNat := nat.DNAT44Key("dnat1") + + ctl.Log.Infof("DNAt delete: %v", dNat) + _, err := ctl.broker.Delete(dNat) + return err +} diff --git a/cmd/vpp-agent-ctl/data/puntplugin.go b/cmd/vpp-agent-ctl/data/puntplugin.go new file mode 100644 index 0000000000..55b518829b --- /dev/null +++ b/cmd/vpp-agent-ctl/data/puntplugin.go @@ -0,0 +1,99 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + punt "github.com/ligato/vpp-agent/api/models/vpp/punt" +) + +// PuntCtl punt plugin related methods for vpp-agent-ctl (bridge domains, FIBs, L2 cross connects) +type PuntCtl interface { + // PutPunt puts punt configuration to the ETCD + PutPunt() error + // DeletePunt removes punt configuration from the ETCD + DeletePunt() error + // RegisterPuntViaSocket registers punt via socket to the ETCD + RegisterPuntViaSocket() error + // DeregisterPuntViaSocket removes punt socket registration from the ETCD + DeregisterPuntViaSocket() error + // PutIPRedirect puts IP redirect configuration to the ETCD + PutIPRedirect() error + // DeleteIPRedirect removes IP redirect from the ETCD + DeleteIPRedirect() error +} + +// PutPunt puts punt configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutPunt() error { + puntCfg := &punt.ToHost{ + L3Protocol: punt.L3Protocol_IPv4, + L4Protocol: punt.L4Protocol_UDP, + Port: 9000, + } + + ctl.Log.Info("Punt put: %v", puntCfg) + return ctl.broker.Put(punt.ToHostKey(puntCfg.L3Protocol, puntCfg.L4Protocol, puntCfg.Port), puntCfg) +} + +// DeletePunt removes punt configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeletePunt() error { + puntKey := punt.ToHostKey(punt.L3Protocol_IPv4, punt.L4Protocol_UDP, 9000) + + ctl.Log.Info("Punt delete: %v", puntKey) + _, err := ctl.broker.Delete(puntKey) + return err +} + +// RegisterPuntViaSocket registers punt via socket to the ETCD +func (ctl *VppAgentCtlImpl) RegisterPuntViaSocket() error { + puntCfg := &punt.ToHost{ + L3Protocol: punt.L3Protocol_IPv4, + L4Protocol: punt.L4Protocol_UDP, + Port: 9000, + SocketPath: "/tmp/socket/path", + } + + ctl.Log.Info("Punt via socket put: %v", puntCfg) + return ctl.broker.Put(punt.ToHostKey(puntCfg.L3Protocol, puntCfg.L4Protocol, puntCfg.Port), puntCfg) +} + +// DeregisterPuntViaSocket removes punt socket registration from the ETCD +func (ctl *VppAgentCtlImpl) DeregisterPuntViaSocket() error { + puntKey := punt.ToHostKey(punt.L3Protocol_IPv4, punt.L4Protocol_UDP, 9000) + + ctl.Log.Info("Punt via socket delete: %v", puntKey) + _, err := ctl.broker.Delete(puntKey) + return err +} + +// PutIPRedirect puts IP redirect configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutIPRedirect() error { + puntCfg := &punt.IPRedirect{ + L3Protocol: punt.L3Protocol_IPv4, + TxInterface: "tap1", + NextHop: "192.168.0.1", + } + + ctl.Log.Info("IP redirect put: %v", puntCfg) + return ctl.broker.Put(punt.IPRedirectKey(puntCfg.L3Protocol, puntCfg.TxInterface), puntCfg) +} + +// DeleteIPRedirect removes IP redirect from the ETCD +func (ctl *VppAgentCtlImpl) DeleteIPRedirect() error { + puntKey := punt.IPRedirectKey(punt.L3Protocol_IPv4, "tap1") + + ctl.Log.Info("IP redirect delete: %v", puntKey) + _, err := ctl.broker.Delete(puntKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/stnplugin.go b/cmd/vpp-agent-ctl/data/stnplugin.go new file mode 100644 index 0000000000..04aa14727c --- /dev/null +++ b/cmd/vpp-agent-ctl/data/stnplugin.go @@ -0,0 +1,45 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import stn "github.com/ligato/vpp-agent/api/models/vpp/stn" + +// StnCtl STN plugin related methods for vpp-agent-ctl +type StnCtl interface { + // PutStn puts STN configuration to the ETCD + PutStn() error + // DeleteStn removes STN configuration from the ETCD + DeleteStn() error +} + +// PutStn puts STN configuration to the ETCD +func (ctl *VppAgentCtlImpl) PutStn() error { + stnRule := &stn.Rule{ + IpAddress: "192.168.50.12", + Interface: "memif1", + } + + ctl.Log.Infof("STN put: %v", stnRule) + return ctl.broker.Put(stn.Key(stnRule.Interface, stnRule.IpAddress), stnRule) +} + +// DeleteStn removes STN configuration from the ETCD +func (ctl *VppAgentCtlImpl) DeleteStn() error { + stnRuleKey := stn.Key("memif1", "192.168.50.12") + + ctl.Log.Infof("STN delete: %v", stnRuleKey) + _, err := ctl.broker.Delete(stnRuleKey) + return err +} diff --git a/cmd/vpp-agent-ctl/data/vpp-agent-ctl.go b/cmd/vpp-agent-ctl/data/vpp-agent-ctl.go new file mode 100644 index 0000000000..2f076288e7 --- /dev/null +++ b/cmd/vpp-agent-ctl/data/vpp-agent-ctl.go @@ -0,0 +1,74 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package data + +import ( + "fmt" + "github.com/ligato/cn-infra/db/keyval" + "github.com/ligato/cn-infra/db/keyval/etcd" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/servicelabel" +) + +// VppAgentCtl is test tool for testingVPP Agent plugins. In addition to testing, the vpp-agent-ctl tool can +// be used to demonstrate the usage of VPP Agent plugins and their APIs. +type VppAgentCtl interface { + // GetCommands returns provided command set + GetCommands() []string + + // Etcd access + EtcdCtl + // Other interfaces with configuration related methods + ACLCtl + InterfacesCtl + IPSecCtl + L2Ctl + L3Ctl + NatCtl + PuntCtl + StnCtl +} + +// VppAgentCtlImpl is a ctl context +type VppAgentCtlImpl struct { + Log logging.Logger + commands []string + serviceLabel servicelabel.Plugin + bytesConnection *etcd.BytesConnectionEtcd + broker keyval.ProtoBroker +} + +// NewVppAgentCtl creates new VppAgentCtl object with initialized fields +func NewVppAgentCtl(etcdCfg string, cmdSet []string) (*VppAgentCtlImpl, error) { + var err error + ctl := &VppAgentCtlImpl{ + Log: logrus.DefaultLogger(), + commands: cmdSet, + } + + if err = ctl.serviceLabel.Init(); err != nil { + return nil, fmt.Errorf("failed to init servicvice label plugin") + } + // Establish ETCD connection + ctl.bytesConnection, ctl.broker, err = ctl.CreateEtcdClient(etcdCfg) + + return ctl, err +} + +// GetCommands returns origin al vpp-agent-ctl commands +func (ctl *VppAgentCtlImpl) GetCommands() []string { + return ctl.commands +} diff --git a/cmd/vpp-agent-ctl/data_cmd.go b/cmd/vpp-agent-ctl/data_cmd.go deleted file mode 100644 index d7f2346b8a..0000000000 --- a/cmd/vpp-agent-ctl/data_cmd.go +++ /dev/null @@ -1,1447 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// package vpp-agent-ctl implements the vpp-agent-ctl test tool for testing -// VPP Agent plugins. In addition to testing, the vpp-agent-ctl tool can -// be used to demonstrate the usage of VPP Agent plugins and their APIs. - -package main - -import ( - "log" - "os" - - "github.com/ligato/vpp-agent/plugins/vpp/model/punt" - - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/servicelabel" - linuxIf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - linuxL3 "github.com/ligato/vpp-agent/plugins/linux/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/bfd" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/ipsec" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/l4" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" - "github.com/ligato/vpp-agent/plugins/vpp/model/stn" - "github.com/namsral/flag" -) - -// VppAgentCtl is ctl context -type VppAgentCtl struct { - Log logging.Logger - Commands []string - serviceLabel servicelabel.Plugin - bytesConnection *etcd.BytesConnectionEtcd - broker keyval.ProtoBroker -} - -// Init creates new VppAgentCtl object with initialized fields -func initCtl(etcdCfg string, cmdSet []string) (*VppAgentCtl, error) { - var err error - ctl := &VppAgentCtl{ - Log: logrus.DefaultLogger(), - Commands: cmdSet, - } - // Parse service label - flag.CommandLine.ParseEnv(os.Environ()) - ctl.serviceLabel.Init() - // Establish ETCD connection - ctl.bytesConnection, ctl.broker, err = ctl.createEtcdClient(etcdCfg) - - return ctl, err -} - -// Access lists - -// CreateACL puts access list config to the ETCD -func (ctl *VppAgentCtl) createACL() { - accessList := acl.AccessLists{ - Acls: []*acl.AccessLists_Acl{ - // Single ACL entry - { - AclName: "acl1", - // ACL rules - Rules: []*acl.AccessLists_Acl_Rule{ - // ACL IP rule - { - AclAction: acl.AclAction_PERMIT, - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: &acl.AccessLists_Acl_Rule_Match_IpRule{ - Ip: &acl.AccessLists_Acl_Rule_Match_IpRule_Ip{ - SourceNetwork: "192.168.1.1/32", - DestinationNetwork: "10.20.0.1/24", - }, - }, - }, - }, - // ACL ICMP rule - { - AclAction: acl.AclAction_PERMIT, - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: &acl.AccessLists_Acl_Rule_Match_IpRule{ - Icmp: &acl.AccessLists_Acl_Rule_Match_IpRule_Icmp{ - Icmpv6: false, - IcmpCodeRange: &acl.AccessLists_Acl_Rule_Match_IpRule_Icmp_Range{ - First: 150, - Last: 250, - }, - IcmpTypeRange: &acl.AccessLists_Acl_Rule_Match_IpRule_Icmp_Range{ - First: 1150, - Last: 1250, - }, - }, - }, - }, - }, - // ACL TCP rule - { - AclAction: acl.AclAction_PERMIT, - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: &acl.AccessLists_Acl_Rule_Match_IpRule{ - Tcp: &acl.AccessLists_Acl_Rule_Match_IpRule_Tcp{ - TcpFlagsMask: 20, - TcpFlagsValue: 10, - SourcePortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 150, - UpperPort: 250, - }, - DestinationPortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 1150, - UpperPort: 1250, - }, - }, - }, - }, - }, - // ACL UDP rule - { - AclAction: acl.AclAction_PERMIT, - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: &acl.AccessLists_Acl_Rule_Match_IpRule{ - Udp: &acl.AccessLists_Acl_Rule_Match_IpRule_Udp{ - SourcePortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 150, - UpperPort: 250, - }, - DestinationPortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 1150, - UpperPort: 1250, - }, - }, - }, - }, - }, - // ACL MAC IP rule. Note: do not combine ACL ip and mac ip rules in single acl - //{ - // Actions: &acl.AccessLists_Acl_Rule_Actions{ - // AclAction: acl.AclAction_PERMIT, - // }, - // Match: &acl.AccessLists_Acl_Rule_Match{ - // MacipRule: &acl.AccessLists_Acl_Rule_Match_MacIpRule{ - // SourceAddress: "192.168.0.1", - // SourceAddressPrefix: uint32(16), - // SourceMacAddress: "11:44:0A:B8:4A:35", - // SourceMacAddressMask: "ff:ff:ff:ff:00:00", - // }, - // }, - //}, - }, - Interfaces: &acl.AccessLists_Acl_Interfaces{ - Ingress: []string{"tap1", "tap2"}, - Egress: []string{"tap1", "tap2"}, - }, - }, - }, - } - - ctl.Log.Print(accessList.Acls[0]) - ctl.broker.Put(acl.Key(accessList.Acls[0].AclName), accessList.Acls[0]) -} - -// DeleteACL removes access list config from the ETCD -func (ctl *VppAgentCtl) deleteACL() { - aclKey := acl.Key("acl1") - - ctl.Log.Println("Deleting", aclKey) - ctl.broker.Delete(aclKey) -} - -// Bidirectional forwarding detection - -// CreateBfdSession puts bidirectional forwarding detection session config to the ETCD -func (ctl *VppAgentCtl) createBfdSession() { - session := bfd.SingleHopBFD{ - Sessions: []*bfd.SingleHopBFD_Session{ - { - Interface: "memif1", - Enabled: true, - SourceAddress: "172.125.40.1", - DestinationAddress: "20.10.0.5", - RequiredMinRxInterval: 8, - DesiredMinTxInterval: 3, - DetectMultiplier: 9, - Authentication: &bfd.SingleHopBFD_Session_Authentication{ - KeyId: 1, - AdvertisedKeyId: 1, - }, - }, - }, - } - - ctl.Log.Println(session) - ctl.broker.Put(bfd.SessionKey(session.Sessions[0].Interface), session.Sessions[0]) -} - -// DeleteBfdSession removes bidirectional forwarding detection session config from the ETCD -func (ctl *VppAgentCtl) deleteBfdSession() { - sessionKey := bfd.SessionKey("memif1") - - ctl.Log.Println("Deleting", sessionKey) - ctl.broker.Delete(sessionKey) -} - -// CreateBfdKey puts bidirectional forwarding detection authentication key config to the ETCD -func (ctl *VppAgentCtl) createBfdKey() { - authKey := bfd.SingleHopBFD{ - Keys: []*bfd.SingleHopBFD_Key{ - { - Name: "bfdKey1", - Id: 1, - AuthenticationType: bfd.SingleHopBFD_Key_METICULOUS_KEYED_SHA1, // or Keyed sha1 - Secret: "1981491891941891", - }, - }, - } - - ctl.Log.Println(authKey) - ctl.broker.Put(bfd.AuthKeysKey(string(authKey.Keys[0].Id)), authKey.Keys[0]) -} - -// DeleteBfdKey removes bidirectional forwarding detection authentication key config from the ETCD -func (ctl *VppAgentCtl) deleteBfdKey() { - bfdAuthKeyKey := bfd.AuthKeysKey(string(1)) - - ctl.Log.Println("Deleting", bfdAuthKeyKey) - ctl.broker.Delete(bfdAuthKeyKey) -} - -// CreateBfdEcho puts bidirectional forwarding detection echo detection config to the ETCD -func (ctl *VppAgentCtl) createBfdEcho() { - echoFunction := bfd.SingleHopBFD{ - EchoFunction: &bfd.SingleHopBFD_EchoFunction{ - EchoSourceInterface: "memif1", - }, - } - - ctl.Log.Println(echoFunction) - ctl.broker.Put(bfd.EchoFunctionKey("memif1"), echoFunction.EchoFunction) -} - -// DeleteBfdEcho removes bidirectional forwarding detection echo detection config from the ETCD -func (ctl *VppAgentCtl) deleteBfdEcho() { - echoFunctionKey := bfd.EchoFunctionKey("memif1") - - ctl.Log.Println("Deleting", echoFunctionKey) - ctl.broker.Delete(echoFunctionKey) -} - -// VPP interfaces - -// CreateEthernet puts ethernet type interface config to the ETCD -func (ctl *VppAgentCtl) createEthernet() { - ethernet := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "GigabitEthernet0/8/0", - Type: interfaces.InterfaceType_ETHERNET_CSMACD, - Enabled: true, - IpAddresses: []string{ - "192.168.1.1", - "2001:db8:0:0:0:ff00:5168:2bc8/48", - }, - //RxPlacementSettings: &interfaces.Interfaces_Interface_RxPlacementSettings{ - // Queue: 0, - // Worker: 1, - //}, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - }, - }, - } - - ctl.Log.Println(ethernet) - ctl.broker.Put(interfaces.InterfaceKey(ethernet.Interfaces[0].Name), ethernet.Interfaces[0]) -} - -// DeleteEthernet removes ethernet type interface config from the ETCD -func (ctl *VppAgentCtl) deleteEthernet() { - ethernetKey := interfaces.InterfaceKey("GigabitEthernet0/8/0") - - ctl.Log.Println("Deleting", ethernetKey) - ctl.broker.Delete(ethernetKey) -} - -// CreateTap puts TAP type interface config to the ETCD -func (ctl *VppAgentCtl) createTap() { - tap := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "tap1", - Type: interfaces.InterfaceType_TAP_INTERFACE, - Enabled: true, - PhysAddress: "12:E4:0E:D5:BC:DC", - IpAddresses: []string{ - "192.168.20.3/24", - }, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - Tap: &interfaces.Interfaces_Interface_Tap{ - HostIfName: "tap-host", - }, - }, - }, - } - - ctl.Log.Println(tap) - ctl.broker.Put(interfaces.InterfaceKey(tap.Interfaces[0].Name), tap.Interfaces[0]) -} - -// DeleteTap removes TAP type interface config from the ETCD -func (ctl *VppAgentCtl) deleteTap() { - tapKey := interfaces.InterfaceKey("tap1") - - ctl.Log.Println("Deleting", tapKey) - ctl.broker.Delete(tapKey) -} - -// CreateLoopback puts loopback type interface config to the ETCD -func (ctl *VppAgentCtl) createLoopback() { - loopback := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "loop1", - Type: interfaces.InterfaceType_SOFTWARE_LOOPBACK, - Enabled: true, - PhysAddress: "7C:4E:E7:8A:63:68", - Mtu: 1478, - IpAddresses: []string{ - "192.168.25.3/24", - "172.125.45.1/24", - }, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - }, - }, - } - - ctl.Log.Println(loopback) - ctl.broker.Put(interfaces.InterfaceKey(loopback.Interfaces[0].Name), loopback.Interfaces[0]) -} - -// DeleteLoopback removes loopback type interface config from the ETCD -func (ctl *VppAgentCtl) deleteLoopback() { - loopbackKey := interfaces.InterfaceKey("loop1") - - ctl.Log.Println("Deleting", loopbackKey) - ctl.broker.Delete(loopbackKey) -} - -// CreateMemif puts memif type interface config to the ETCD -func (ctl *VppAgentCtl) createMemif() { - memif := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "memif1", - Type: interfaces.InterfaceType_MEMORY_INTERFACE, - Enabled: true, - PhysAddress: "4E:93:2A:38:A7:77", - Mtu: 1478, - IpAddresses: []string{ - "172.125.40.1/24", - }, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - Memif: &interfaces.Interfaces_Interface_Memif{ - Id: 1, - Secret: "secret", - Master: true, - SocketFilename: "/tmp/memif1.sock", - }, - }, - }, - } - - ctl.Log.Println(memif) - ctl.broker.Put(interfaces.InterfaceKey(memif.Interfaces[0].Name), memif.Interfaces[0]) -} - -// DeleteMemif removes memif type interface config from the ETCD -func (ctl *VppAgentCtl) deleteMemif() { - memifKey := interfaces.InterfaceKey("memif1") - - ctl.Log.Println("Deleting", memifKey) - ctl.broker.Delete(memifKey) -} - -// CreateVxLan puts VxLAN type interface config to the ETCD -func (ctl *VppAgentCtl) createVxlan() { - vxlan := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "vxlan1", - Type: interfaces.InterfaceType_VXLAN_TUNNEL, - Enabled: true, - Mtu: 1478, - IpAddresses: []string{ - "172.125.40.1/24", - }, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - Vxlan: &interfaces.Interfaces_Interface_Vxlan{ - //Multicast: "if1", - SrcAddress: "192.168.42.1", - DstAddress: "192.168.42.2", - Vni: 13, - }, - }, - }, - } - - ctl.Log.Println(vxlan) - ctl.broker.Put(interfaces.InterfaceKey(vxlan.Interfaces[0].Name), vxlan.Interfaces[0]) -} - -// DeleteVxlan removes VxLAN type interface config from the ETCD -func (ctl *VppAgentCtl) deleteVxlan() { - vxlanKey := interfaces.InterfaceKey("vxlan1") - - ctl.Log.Println("Deleting", vxlanKey) - ctl.broker.Delete(vxlanKey) -} - -// CreateVmxNet3 puts vmxNet3 type interface config to the ETCD -func (ctl *VppAgentCtl) createVmxNet3() { - vxlan := &interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "vmxnet3-a/14/19/1e", - Type: interfaces.InterfaceType_VMXNET3_INTERFACE, - Enabled: true, - Mtu: 1478, - IpAddresses: []string{ - "172.125.40.1/24", - }, - VmxNet3: &interfaces.Interfaces_Interface_VmxNet3{ - EnableElog: true, - RxqSize: 2048, - TxqSize: 512, - }, - }, - }, - } - - ctl.Log.Println(vxlan) - ctl.broker.Put(interfaces.InterfaceKey(vxlan.Interfaces[0].Name), vxlan.Interfaces[0]) -} - -// DeleteVxlan removes VxLAN type interface config from the ETCD -func (ctl *VppAgentCtl) deleteVmxNet3() { - vmxnet3Key := interfaces.InterfaceKey("vmxnet3-a/14/19") - - ctl.Log.Println("Deleting", vmxnet3Key) - ctl.broker.Delete(vmxnet3Key) -} - -// CreateAfPacket puts Af-packet type interface config to the ETCD -func (ctl *VppAgentCtl) createAfPacket() { - ifs := interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "afpacket1", - Type: interfaces.InterfaceType_AF_PACKET_INTERFACE, - Enabled: true, - Mtu: 1500, - IpAddresses: []string{ - "172.125.40.1/24", - "192.168.12.1/24", - "fd21:7408:186f::/48", - }, - //Unnumbered: &interfaces.Interfaces_Interface_Unnumbered{ - // IsUnnumbered: true, - // InterfaceWithIP: "memif1", - //}, - Afpacket: &interfaces.Interfaces_Interface_Afpacket{ - HostIfName: "lo", - }, - }, - }, - } - - ctl.Log.Println(ifs) - ctl.broker.Put(interfaces.InterfaceKey(ifs.Interfaces[0].Name), ifs.Interfaces[0]) -} - -// DeleteAfPacket removes AF-Packet type interface config from the ETCD -func (ctl *VppAgentCtl) deleteAfPacket() { - afPacketKey := interfaces.InterfaceKey("afpacket1") - - ctl.Log.Println("Deleting", afPacketKey) - ctl.broker.Delete(afPacketKey) -} - -// Linux interfaces - -// CreateVethPair puts two VETH type interfaces to the ETCD -func (ctl *VppAgentCtl) createVethPair() { - // Note: VETH interfaces are created in pairs - veths := linuxIf.LinuxInterfaces{ - Interface: []*linuxIf.LinuxInterfaces_Interface{ - { - Name: "veth1", - Type: linuxIf.LinuxInterfaces_VETH, - Enabled: true, - PhysAddress: "D2:74:8C:12:67:D2", - Namespace: &linuxIf.LinuxInterfaces_Interface_Namespace{ - Name: "ns1", - Type: linuxIf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - }, - Mtu: 1500, - IpAddresses: []string{ - "192.168.22.1/24", - "10.0.2.2/24", - }, - Veth: &linuxIf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth2", - }, - }, - { - Name: "veth2", - Type: linuxIf.LinuxInterfaces_VETH, - Enabled: true, - PhysAddress: "92:C7:42:67:AB:CD", - Namespace: &linuxIf.LinuxInterfaces_Interface_Namespace{ - Name: "ns2", - Type: linuxIf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - }, - Mtu: 1500, - IpAddresses: []string{ - "192.168.22.5/24", - }, - Veth: &linuxIf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth1", - }, - }, - }, - } - - ctl.Log.Println(veths) - ctl.broker.Put(linuxIf.InterfaceKey(veths.Interface[0].Name), veths.Interface[0]) - ctl.broker.Put(linuxIf.InterfaceKey(veths.Interface[1].Name), veths.Interface[1]) -} - -// DeleteVethPair removes VETH pair interfaces from the ETCD -func (ctl *VppAgentCtl) deleteVethPair() { - veth1Key := linuxIf.InterfaceKey("veth1") - veth2Key := linuxIf.InterfaceKey("veth2") - - ctl.Log.Println("Deleting", veth1Key) - ctl.broker.Delete(veth1Key) - ctl.Log.Println("Deleting", veth2Key) - ctl.broker.Delete(veth2Key) -} - -// CreateLinuxTap puts linux TAP type interface configuration to the ETCD -func (ctl *VppAgentCtl) createLinuxTap() { - linuxTap := linuxIf.LinuxInterfaces{ - Interface: []*linuxIf.LinuxInterfaces_Interface{ - { - Name: "tap1", - HostIfName: "tap-host", - Type: linuxIf.LinuxInterfaces_AUTO_TAP, - Enabled: true, - PhysAddress: "BC:FE:E9:5E:07:04", - Namespace: &linuxIf.LinuxInterfaces_Interface_Namespace{ - Name: "ns1", - Type: linuxIf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - }, - Mtu: 1500, - IpAddresses: []string{ - "172.52.45.127/24", - }, - }, - }, - } - - ctl.Log.Println(linuxTap) - ctl.broker.Put(linuxIf.InterfaceKey(linuxTap.Interface[0].Name), linuxTap.Interface[0]) -} - -// DeleteLinuxTap removes linux TAP type interface configuration from the ETCD -func (ctl *VppAgentCtl) deleteLinuxTap() { - linuxTapKey := linuxIf.InterfaceKey("tap1") - - ctl.Log.Println("Deleting", linuxTapKey) - ctl.broker.Delete(linuxTapKey) -} - -// IPsec - -// createIPsecSPD puts STD configuration to the ETCD -func (ctl *VppAgentCtl) createIPsecSPD() { - spd := ipsec.SecurityPolicyDatabases_SPD{ - Name: "spd1", - Interfaces: []*ipsec.SecurityPolicyDatabases_SPD_Interface{ - { - Name: "tap1", - }, - { - Name: "loop1", - }, - }, - PolicyEntries: []*ipsec.SecurityPolicyDatabases_SPD_PolicyEntry{ - { - Priority: 100, - IsOutbound: false, - Action: 0, - Protocol: 50, - }, - { - Priority: 100, - IsOutbound: true, - Action: 0, - Protocol: 50, - }, - { - Priority: 10, - IsOutbound: false, - RemoteAddrStart: "10.0.0.1", - RemoteAddrStop: "10.0.0.1", - LocalAddrStart: "10.0.0.2", - LocalAddrStop: "10.0.0.2", - Action: 3, - Sa: "sa1", - }, - { - Priority: 10, - IsOutbound: true, - RemoteAddrStart: "10.0.0.1", - RemoteAddrStop: "10.0.0.1", - LocalAddrStart: "10.0.0.2", - LocalAddrStop: "10.0.0.2", - Action: 3, - Sa: "sa2", - }, - }, - } - - ctl.Log.Println(spd) - ctl.broker.Put(ipsec.SPDKey(spd.Name), &spd) -} - -// deleteIPsecSPD removes STD configuration from the ETCD -func (ctl *VppAgentCtl) deleteIPsecSPD() { - stdKey := ipsec.SPDKey("spd1") - - ctl.Log.Println("Deleting", stdKey) - ctl.broker.Delete(stdKey) -} - -// creteIPsecSA puts two security association configurations to the ETCD -func (ctl *VppAgentCtl) createIPsecSA() { - sa1 := ipsec.SecurityAssociations_SA{ - Name: "sa1", - Spi: 1001, - Protocol: 1, - CryptoAlg: 1, - CryptoKey: "4a506a794f574265564551694d653768", - IntegAlg: 2, - IntegKey: "4339314b55523947594d6d3547666b45764e6a58", - EnableUdpEncap: true, - } - sa2 := ipsec.SecurityAssociations_SA{ - Name: "sa2", - Spi: 1000, - Protocol: 1, - CryptoAlg: 1, - CryptoKey: "4a506a794f574265564551694d653768", - IntegAlg: 2, - IntegKey: "4339314b55523947594d6d3547666b45764e6a58", - EnableUdpEncap: false, - } - - ctl.Log.Println(sa1) - ctl.broker.Put(ipsec.SAKey(sa1.Name), &sa1) - ctl.Log.Println(sa2) - ctl.broker.Put(ipsec.SAKey(sa2.Name), &sa2) -} - -// deleteIPsecSA removes SA configuration from the ETCD -func (ctl *VppAgentCtl) deleteIPsecSA() { - saKey1 := ipsec.SPDKey("sa1") - saKey2 := ipsec.SPDKey("sa2") - - ctl.Log.Println("Deleting", saKey1) - ctl.broker.Delete(saKey1) - ctl.Log.Println("Deleting", saKey2) - ctl.broker.Delete(saKey2) -} - -// createIPSecTunnelInterface configures IPSec tunnel interface -func (ctl *VppAgentCtl) createIPSecTunnelInterface() { - tunnelIf := ipsec.TunnelInterfaces_Tunnel{ - Name: "ipsec0", - Esn: false, - AntiReplay: false, - LocalSpi: 1000, - RemoteSpi: 1001, - LocalIp: "10.0.0.2", - RemoteIp: "10.0.0.1", - CryptoAlg: 1, - LocalCryptoKey: "4a506a794f574265564551694d653768", - RemoteCryptoKey: "4a506a794f574265564551694d653768", - IntegAlg: 2, - LocalIntegKey: "4339314b55523947594d6d3547666b45764e6a58", - RemoteIntegKey: "4339314b55523947594d6d3547666b45764e6a58", - Enabled: true, - IpAddresses: []string{"20.0.0.0/24"}, - //UnnumberedName: "tap1", - Vrf: 0, - } - - ctl.Log.Println(tunnelIf) - ctl.broker.Put(ipsec.TunnelKey(tunnelIf.Name), &tunnelIf) -} - -// deleteIPSecTunnelInterface removes IPSec tunnel interface -func (ctl *VppAgentCtl) deleteIPSecTunnelInterface() { - tunnelKey := ipsec.TunnelKey("ipsec0") - - ctl.Log.Println("Deleting", tunnelKey) - ctl.broker.Delete(tunnelKey) -} - -// STN - -// CreateStn puts STN configuration to the ETCD -func (ctl *VppAgentCtl) createStn() { - stnRule := stn.STN_Rule{ - RuleName: "rule1", - IpAddress: "192.168.50.12", - Interface: "memif1", - } - - ctl.Log.Println(stnRule) - ctl.broker.Put(stn.Key(stnRule.RuleName), &stnRule) -} - -// DeleteStn removes STN configuration from the ETCD -func (ctl *VppAgentCtl) deleteStn() { - stnRuleKey := stn.Key("rule1") - - ctl.Log.Println("Deleting", stnRuleKey) - ctl.broker.Delete(stnRuleKey) -} - -// Network address translation - -// CreateGlobalNat puts global NAT44 configuration to the ETCD -func (ctl *VppAgentCtl) createGlobalNat() { - natGlobal := &nat.Nat44Global{ - Forwarding: false, - NatInterfaces: []*nat.Nat44Global_NatInterface{ - { - Name: "tap1", - IsInside: false, - OutputFeature: false, - }, - { - Name: "tap2", - IsInside: false, - OutputFeature: true, - }, - { - Name: "tap3", - IsInside: true, - OutputFeature: false, - }, - }, - AddressPools: []*nat.Nat44Global_AddressPool{ - { - VrfId: 0, - FirstSrcAddress: "192.168.0.1", - TwiceNat: false, - }, - { - VrfId: 0, - FirstSrcAddress: "175.124.0.1", - LastSrcAddress: "175.124.0.3", - TwiceNat: false, - }, - { - VrfId: 0, - FirstSrcAddress: "10.10.0.1", - LastSrcAddress: "10.10.0.2", - TwiceNat: false, - }, - }, - VirtualReassemblyIpv4: &nat.Nat44Global_VirtualReassembly{ - Timeout: 10, - MaxReass: 20, - MaxFrag: 10, - DropFrag: true, - }, - VirtualReassemblyIpv6: &nat.Nat44Global_VirtualReassembly{ - Timeout: 15, - MaxReass: 25, - MaxFrag: 15, - DropFrag: false, - }, - } - - ctl.Log.Println(natGlobal) - ctl.broker.Put(nat.GlobalPrefix, natGlobal) -} - -// DeleteGlobalNat removes global NAT configuration from the ETCD -func (ctl *VppAgentCtl) deleteGlobalNat() { - globalNat := nat.GlobalPrefix - - ctl.Log.Println("Deleting", globalNat) - ctl.broker.Delete(globalNat) -} - -// CreateSNat puts SNAT configuration to the ETCD -func (ctl *VppAgentCtl) createSNat() { - // Note: SNAT not implemented - sNat := &nat.Nat44SNat_SNatConfig{ - Label: "snat1", - } - - ctl.Log.Println(sNat) - ctl.broker.Put(nat.SNatKey(sNat.Label), sNat) -} - -// DeleteSNat removes SNAT configuration from the ETCD -func (ctl *VppAgentCtl) deleteSNat() { - sNat := nat.SNatKey("snat1") - - ctl.Log.Println("Deleting", sNat) - ctl.broker.Delete(sNat) -} - -// CreateDNat puts DNAT configuration to the ETCD -func (ctl *VppAgentCtl) createDNat() { - // DNat config - dNat := &nat.Nat44DNat_DNatConfig{ - Label: "dnat1", - StMappings: []*nat.Nat44DNat_DNatConfig_StaticMapping{ - { - ExternalInterface: "tap1", - ExternalIp: "192.168.0.1", - ExternalPort: 8989, - LocalIps: []*nat.Nat44DNat_DNatConfig_StaticMapping_LocalIP{ - { - VrfId: 0, - LocalIp: "172.124.0.2", - LocalPort: 6500, - Probability: 40, - }, - { - VrfId: 0, - LocalIp: "172.125.10.5", - LocalPort: 2300, - Probability: 40, - }, - }, - Protocol: 1, - TwiceNat: nat.TwiceNatMode_ENABLED, - }, - }, - IdMappings: []*nat.Nat44DNat_DNatConfig_IdentityMapping{ - { - VrfId: 0, - IpAddress: "10.10.0.1", - Port: 2525, - Protocol: 0, - }, - }, - } - - ctl.Log.Println(dNat) - ctl.broker.Put(nat.DNatKey(dNat.Label), dNat) -} - -// DeleteDNat removes DNAT configuration from the ETCD -func (ctl *VppAgentCtl) deleteDNat() { - dNat := nat.DNatKey("dnat1") - - ctl.Log.Println("Deleting", dNat) - ctl.broker.Delete(dNat) -} - -// Bridge domains - -// CreateBridgeDomain puts L2 bridge domain configuration to the ETCD -func (ctl *VppAgentCtl) createBridgeDomain() { - bd := l2.BridgeDomains{ - BridgeDomains: []*l2.BridgeDomains_BridgeDomain{ - { - Name: "bd1", - Learn: true, - ArpTermination: true, - Flood: true, - UnknownUnicastFlood: true, - Forward: true, - MacAge: 0, - Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ - { - Name: "loop1", - BridgedVirtualInterface: true, - SplitHorizonGroup: 0, - }, - { - Name: "tap1", - BridgedVirtualInterface: false, - SplitHorizonGroup: 1, - }, - { - Name: "memif1", - BridgedVirtualInterface: false, - SplitHorizonGroup: 2, - }, - }, - ArpTerminationTable: []*l2.BridgeDomains_BridgeDomain_ArpTerminationEntry{ - { - IpAddress: "192.168.50.20", - PhysAddress: "A7:5D:44:D8:E6:51", - }, - }, - }, - }, - } - - ctl.Log.Println(bd) - ctl.broker.Put(l2.BridgeDomainKey(bd.BridgeDomains[0].Name), bd.BridgeDomains[0]) -} - -// DeleteBridgeDomain removes bridge domain configuration from the ETCD -func (ctl *VppAgentCtl) deleteBridgeDomain() { - bdKey := l2.BridgeDomainKey("bd1") - - ctl.Log.Println("Deleting", bdKey) - ctl.broker.Delete(bdKey) -} - -// FIB - -// CreateFib puts L2 FIB entry configuration to the ETCD -func (ctl *VppAgentCtl) createFib() { - fib := l2.FibTable{ - FibTableEntries: []*l2.FibTable_FibEntry{ - { - PhysAddress: "EA:FE:3C:64:A7:44", - BridgeDomain: "bd1", - OutgoingInterface: "loop1", - StaticConfig: true, - BridgedVirtualInterface: true, - Action: l2.FibTable_FibEntry_FORWARD, // or DROP - }, - }, - } - - ctl.Log.Println(fib) - ctl.broker.Put(l2.FibKey(fib.FibTableEntries[0].BridgeDomain, fib.FibTableEntries[0].PhysAddress), fib.FibTableEntries[0]) -} - -// DeleteFib removes FIB entry configuration from the ETCD -func (ctl *VppAgentCtl) deleteFib() { - fibKey := l2.FibKey("bd1", "EA:FE:3C:64:A7:44") - - ctl.Log.Println("Deleting", fibKey) - ctl.broker.Delete(fibKey) -} - -// L2 xConnect - -// CreateXConn puts L2 cross connect configuration to the ETCD -func (ctl *VppAgentCtl) createXConn() { - xc := l2.XConnectPairs{ - XConnectPairs: []*l2.XConnectPairs_XConnectPair{ - { - ReceiveInterface: "tap1", - TransmitInterface: "loop1", - }, - }, - } - - ctl.Log.Println(xc) - ctl.broker.Put(l2.XConnectKey(xc.XConnectPairs[0].ReceiveInterface), xc.XConnectPairs[0]) -} - -// DeleteXConn removes cross connect configuration from the ETCD -func (ctl *VppAgentCtl) deleteXConn() { - xcKey := l2.XConnectKey("loop1") - - ctl.Log.Println("Deleting", xcKey) - ctl.broker.Delete(xcKey) -} - -// VPP routes - -// CreateRoute puts VPP route configuration to the ETCD -func (ctl *VppAgentCtl) createRoute() { - routes := l3.StaticRoutes{ - Routes: []*l3.StaticRoutes_Route{ - { - VrfId: 0, - DstIpAddr: "10.1.1.3/32", - NextHopAddr: "192.168.1.13", - Weight: 6, - OutgoingInterface: "tap1", - }, - // inter-vrf route without next hop addr (recursive lookup) - //{ - // Type: l3.StaticRoutes_Route_INTER_VRF, - // VrfId: 0, - // DstIpAddr: "1.2.3.4/32", - // ViaVrfId: 1, - //}, - // inter-vrf route with next hop addr - //{ - // Type: l3.StaticRoutes_Route_INTER_VRF, - // VrfId: 1, - // DstIpAddr: "10.1.1.3/32", - // NextHopAddr: "192.168.1.13", - // ViaVrfId: 0, - //}, - }, - } - - for _, r := range routes.Routes { - ctl.Log.Print(r) - ctl.broker.Put(l3.RouteKey(r.VrfId, r.DstIpAddr, r.NextHopAddr), r) - } -} - -// DeleteRoute removes VPP route configuration from the ETCD -func (ctl *VppAgentCtl) deleteRoute() { - routeKey := l3.RouteKey(0, "10.1.1.3/32", "192.168.1.13") - - ctl.Log.Println("Deleting", routeKey) - ctl.broker.Delete(routeKey) -} - -// Linux routes - -// CreateLinuxRoute puts linux route configuration to the ETCD -func (ctl *VppAgentCtl) createLinuxRoute() { - linuxRoutes := linuxL3.LinuxStaticRoutes{ - Route: []*linuxL3.LinuxStaticRoutes_Route{ - // Static route - { - Name: "route1", - DstIpAddr: "10.0.2.0/24", - Interface: "veth1", - Metric: 100, - Namespace: &linuxL3.LinuxStaticRoutes_Route_Namespace{ - Name: "ns1", - Type: linuxL3.LinuxStaticRoutes_Route_Namespace_NAMED_NS, - }, - }, - // Default route - { - Name: "defRoute", - Default: true, - GwAddr: "10.0.2.2", - Interface: "veth1", - Metric: 100, - Namespace: &linuxL3.LinuxStaticRoutes_Route_Namespace{ - Name: "ns1", - Type: linuxL3.LinuxStaticRoutes_Route_Namespace_NAMED_NS, - }, - }, - }, - } - - ctl.Log.Println(linuxRoutes) - ctl.broker.Put(linuxL3.StaticRouteKey(linuxRoutes.Route[0].Name), linuxRoutes.Route[0]) - ctl.broker.Put(linuxL3.StaticRouteKey(linuxRoutes.Route[1].Name), linuxRoutes.Route[1]) -} - -// DeleteLinuxRoute removes linux route configuration from the ETCD -func (ctl *VppAgentCtl) deleteLinuxRoute() { - linuxStaticRouteKey := linuxL3.StaticRouteKey("route1") - linuxDefaultRouteKey := linuxL3.StaticRouteKey("defRoute") - - ctl.Log.Println("Deleting", linuxStaticRouteKey) - ctl.broker.Delete(linuxStaticRouteKey) - ctl.Log.Println("Deleting", linuxDefaultRouteKey) - ctl.broker.Delete(linuxDefaultRouteKey) -} - -// VPP ARP - -// CreateArp puts VPP ARP entry configuration to the ETCD -func (ctl *VppAgentCtl) createArp() { - arp := l3.ArpTable{ - ArpEntries: []*l3.ArpTable_ArpEntry{ - { - Interface: "tap1", - IpAddress: "192.168.10.21", - PhysAddress: "59:6C:45:59:8E:BD", - Static: true, - }, - }, - } - - ctl.Log.Println(arp) - ctl.broker.Put(l3.ArpEntryKey(arp.ArpEntries[0].Interface, arp.ArpEntries[0].IpAddress), arp.ArpEntries[0]) -} - -// DeleteArp removes VPP ARP entry configuration from the ETCD -func (ctl *VppAgentCtl) deleteArp() { - arpKey := l3.ArpEntryKey("tap1", "192.168.10.21") - - ctl.Log.Println("Deleting", arpKey) - ctl.broker.Delete(arpKey) -} - -// AddProxyArpInterfaces puts VPP proxy ARP interface configuration to the ETCD -func (ctl *VppAgentCtl) addProxyArpInterfaces() { - proxyArpIf := l3.ProxyArpInterfaces{ - InterfaceLists: []*l3.ProxyArpInterfaces_InterfaceList{ - { - Label: "proxyArpIf1", - Interfaces: []*l3.ProxyArpInterfaces_InterfaceList_Interface{ - { - Name: "tap1", - }, - { - Name: "tap2", - }, - }, - }, - }, - } - - log.Println(proxyArpIf) - ctl.broker.Put(l3.ProxyArpInterfaceKey(proxyArpIf.InterfaceLists[0].Label), proxyArpIf.InterfaceLists[0]) -} - -// DeleteProxyArpInterfaces removes VPP proxy ARP interface configuration from the ETCD -func (ctl *VppAgentCtl) deleteProxyArpInterfaces() { - arpKey := l3.ProxyArpInterfaceKey("proxyArpIf1") - - ctl.Log.Println("Deleting", arpKey) - ctl.broker.Delete(arpKey) -} - -// AddProxyArpRanges puts VPP proxy ARP range configuration to the ETCD -func (ctl *VppAgentCtl) addProxyArpRanges() { - proxyArpRng := l3.ProxyArpRanges{ - RangeLists: []*l3.ProxyArpRanges_RangeList{ - { - Label: "proxyArpRng1", - Ranges: []*l3.ProxyArpRanges_RangeList_Range{ - { - FirstIp: "124.168.10.5", - LastIp: "124.168.10.10", - }, - { - FirstIp: "172.154.10.5", - LastIp: "172.154.10.10", - }, - }, - }, - }, - } - - log.Println(proxyArpRng) - ctl.broker.Put(l3.ProxyArpRangeKey(proxyArpRng.RangeLists[0].Label), proxyArpRng.RangeLists[0]) -} - -// DeleteProxyArpranges removes VPP proxy ARP range configuration from the ETCD -func (ctl *VppAgentCtl) deleteProxyArpRanges() { - arpKey := l3.ProxyArpRangeKey("proxyArpRng1") - - ctl.Log.Println("Deleting", arpKey) - ctl.broker.Delete(arpKey) -} - -// SetIPScanNeigh puts VPP IP scan neighbor configuration to the ETCD -func (ctl *VppAgentCtl) setIPScanNeigh() { - ipScanNeigh := &l3.IPScanNeighbor{ - Mode: l3.IPScanNeighbor_BOTH, - ScanInterval: 11, - MaxProcTime: 36, - MaxUpdate: 5, - ScanIntDelay: 16, - StaleThreshold: 26, - } - - log.Println(ipScanNeigh) - ctl.broker.Put(l3.IPScanNeighPrefix, ipScanNeigh) -} - -// UnsetIPScanNeigh removes VPP IP scan neighbor configuration from the ETCD -func (ctl *VppAgentCtl) unsetIPScanNeigh() { - ctl.Log.Println("Deleting", l3.IPScanNeighPrefix) - ctl.broker.Delete(l3.IPScanNeighPrefix) -} - -// Linux ARP - -// CreateLinuxArp puts linux ARP entry configuration to the ETCD -func (ctl *VppAgentCtl) createLinuxArp() { - linuxArp := linuxL3.LinuxStaticArpEntries{ - ArpEntry: []*linuxL3.LinuxStaticArpEntries_ArpEntry{ - { - Name: "arp1", - Interface: "veth1", - Namespace: &linuxL3.LinuxStaticArpEntries_ArpEntry_Namespace{ - Name: "ns1", - Type: linuxL3.LinuxStaticArpEntries_ArpEntry_Namespace_NAMED_NS, - }, - IpAddr: "130.0.0.1", - HwAddress: "46:06:18:DB:05:3A", - State: &linuxL3.LinuxStaticArpEntries_ArpEntry_NudState{ - Type: linuxL3.LinuxStaticArpEntries_ArpEntry_NudState_PERMANENT, // or NOARP, REACHABLE, STALE - }, - IpFamily: &linuxL3.LinuxStaticArpEntries_ArpEntry_IpFamily{ - Family: linuxL3.LinuxStaticArpEntries_ArpEntry_IpFamily_IPV4, // or IPv6, ALL, MPLS - }, - }, - }, - } - - ctl.Log.Println(linuxArp) - ctl.broker.Put(linuxL3.StaticArpKey(linuxArp.ArpEntry[0].Name), linuxArp.ArpEntry[0]) -} - -// DeleteLinuxArp removes Linux ARP entry configuration from the ETCD -func (ctl *VppAgentCtl) deleteLinuxArp() { - linuxArpKey := linuxL3.StaticArpKey("arp1") - - ctl.Log.Println("Deleting", linuxArpKey) - ctl.broker.Delete(linuxArpKey) -} - -// L4 plugin - -// EnableL4Features enables L4 configuration on the VPP -func (ctl *VppAgentCtl) enableL4Features() { - l4Features := &l4.L4Features{ - Enabled: true, - } - - ctl.Log.Println(l4Features) - ctl.broker.Put(l4.FeatureKey(), l4Features) -} - -// DisableL4Features disables L4 configuration on the VPP -func (ctl *VppAgentCtl) disableL4Features() { - l4Features := &l4.L4Features{ - Enabled: false, - } - - ctl.Log.Println(l4Features) - ctl.broker.Put(l4.FeatureKey(), l4Features) -} - -// CreateAppNamespace puts application namespace configuration to the ETCD -func (ctl *VppAgentCtl) createAppNamespace() { - appNs := l4.AppNamespaces{ - AppNamespaces: []*l4.AppNamespaces_AppNamespace{ - { - NamespaceId: "appns1", - Secret: 1, - Interface: "tap1", - }, - }, - } - - ctl.Log.Println(appNs) - ctl.broker.Put(l4.AppNamespacesKey(appNs.AppNamespaces[0].NamespaceId), appNs.AppNamespaces[0]) -} - -// DeleteAppNamespace removes application namespace configuration from the ETCD -func (ctl *VppAgentCtl) deleteAppNamespace() { - // Note: application namespace cannot be removed, missing API in VPP - ctl.Log.Println("App namespace delete not supported") -} - -// RegisterPunt puts punt configuration to the ETCD -func (ctl *VppAgentCtl) registerPunt() { - puntVal := &punt.Punt{ - Name: "punt1", - L3Protocol: punt.L3Protocol_IPv4, - L4Protocol: punt.L4Protocol_UDP, - Port: 8990, - SocketPath: "/tmp/socket/punt", - } - - ctl.Log.Println(puntVal) - ctl.broker.Put(punt.Key(puntVal.Name), puntVal) -} - -// DeleteAppNamespace removes application namespace configuration from the ETCD -func (ctl *VppAgentCtl) deregisterPunt() { - puntVal := &punt.Punt{ - Name: "punt1", - } - - ctl.Log.Println(puntVal) - ctl.broker.Delete(punt.Key(puntVal.Name)) -} - -// TXN transactions - -// CreateTxn demonstrates transaction - two interfaces and bridge domain put to the ETCD using txn -func (ctl *VppAgentCtl) createTxn() { - ifs := interfaces.Interfaces{ - Interfaces: []*interfaces.Interfaces_Interface{ - { - Name: "tap1", - Type: interfaces.InterfaceType_TAP_INTERFACE, - Enabled: true, - Mtu: 1500, - IpAddresses: []string{ - "10.4.4.1/24", - }, - Tap: &interfaces.Interfaces_Interface_Tap{ - HostIfName: "tap1", - }, - }, - { - Name: "tap2", - Type: interfaces.InterfaceType_TAP_INTERFACE, - Enabled: true, - Mtu: 1500, - IpAddresses: []string{ - "10.4.4.2/24", - }, - Tap: &interfaces.Interfaces_Interface_Tap{ - HostIfName: "tap2", - }, - }, - }, - } - - bd := l2.BridgeDomains{ - BridgeDomains: []*l2.BridgeDomains_BridgeDomain{ - { - Name: "bd1", - Flood: false, - UnknownUnicastFlood: false, - Forward: true, - Learn: true, - ArpTermination: false, - MacAge: 0, - Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ - { - Name: "tap1", - BridgedVirtualInterface: true, - SplitHorizonGroup: 0, - }, - { - Name: "tap2", - BridgedVirtualInterface: false, - SplitHorizonGroup: 0, - }, - }, - }, - }, - } - - t := ctl.broker.NewTxn() - t.Put(interfaces.InterfaceKey(ifs.Interfaces[0].Name), ifs.Interfaces[0]) - t.Put(interfaces.InterfaceKey(ifs.Interfaces[1].Name), ifs.Interfaces[1]) - t.Put(l2.BridgeDomainKey(bd.BridgeDomains[0].Name), bd.BridgeDomains[0]) - - t.Commit() -} - -// DeleteTxn demonstrates transaction - two interfaces and bridge domain removed from the ETCD using txn -func (ctl *VppAgentCtl) deleteTxn() { - ctl.Log.Println("Deleting txn items") - t := ctl.broker.NewTxn() - t.Delete(interfaces.InterfaceKey("tap1")) - t.Delete(interfaces.InterfaceKey("tap2")) - t.Delete(l2.BridgeDomainKey("bd1")) - - t.Commit() -} - -// Error reporting - -// ReportIfaceErrorState reports interface status data to the ETCD -func (ctl *VppAgentCtl) reportIfaceErrorState() { - ifErr, err := ctl.broker.ListValues(interfaces.ErrorPrefix) - if err != nil { - ctl.Log.Fatal(err) - return - } - for { - kv, allReceived := ifErr.GetNext() - if allReceived { - break - } - entry := &interfaces.InterfaceErrors_Interface{} - err := kv.GetValue(entry) - if err != nil { - ctl.Log.Fatal(err) - return - } - ctl.Log.Println(entry) - } -} - -// ReportBdErrorState reports bridge domain status data to the ETCD -func (ctl *VppAgentCtl) reportBdErrorState() { - bdErr, err := ctl.broker.ListValues(l2.BdErrPrefix) - if err != nil { - ctl.Log.Fatal(err) - return - } - for { - kv, allReceived := bdErr.GetNext() - if allReceived { - break - } - entry := &l2.BridgeDomainErrors_BridgeDomain{} - err := kv.GetValue(entry) - if err != nil { - ctl.Log.Fatal(err) - return - } - - ctl.Log.Println(entry) - } -} diff --git a/cmd/vpp-agent-ctl/etcd.go b/cmd/vpp-agent-ctl/etcd.go deleted file mode 100644 index 6339c1dbec..0000000000 --- a/cmd/vpp-agent-ctl/etcd.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2018 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// package vpp-agent-ctl implements the vpp-agent-ctl test tool for testing -// VPP Agent plugins. In addition to testing, the vpp-agent-ctl tool can -// be used to demonstrate the usage of VPP Agent plugins and their APIs. - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - - "github.com/ligato/cn-infra/config" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/db/keyval" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/db/keyval/kvproto" -) - -// CreateEtcdClient uses environment variable or ETCD config file to establish connection -func (ctl *VppAgentCtl) createEtcdClient(configFile string) (*etcd.BytesConnectionEtcd, keyval.ProtoBroker, error) { - var err error - - if configFile == "" { - configFile = os.Getenv("ETCD_CONFIG") - } - - cfg := &etcd.Config{} - if configFile != "" { - err := config.ParseConfigFromYamlFile(configFile, cfg) - if err != nil { - return nil, nil, err - } - } - etcdConfig, err := etcd.ConfigToClient(cfg) - if err != nil { - ctl.Log.Fatal(err) - } - - bDB, err := etcd.NewEtcdConnectionWithBytes(*etcdConfig, ctl.Log) - if err != nil { - return nil, nil, err - } - - return bDB, kvproto.NewProtoWrapperWithSerializer(bDB, &keyval.SerializerJSON{}). - NewBroker(ctl.serviceLabel.GetAgentPrefix()), nil -} - -// ListAllAgentKeys prints all keys stored in the broker -func (ctl *VppAgentCtl) listAllAgentKeys() { - ctl.Log.Debug("listAllAgentKeys") - - it, err := ctl.broker.ListKeys(ctl.serviceLabel.GetAllAgentsPrefix()) - if err != nil { - ctl.Log.Error(err) - } - for { - key, _, stop := it.GetNext() - if stop { - break - } - //ctl.Log.Println("key: ", key) - fmt.Println("key: ", key) - } -} - -// EtcdGet uses ETCD connection to get value for specific key -func (ctl *VppAgentCtl) etcdGet(key string) { - ctl.Log.Debug("GET ", key) - - data, found, _, err := ctl.bytesConnection.GetValue(key) - if err != nil { - ctl.Log.Error(err) - return - } - if !found { - ctl.Log.Debug("No value found for the key", key) - } - //ctl.Log.Println(string(data)) - fmt.Println(string(data)) -} - -// EtcdPut stores key/data value -func (ctl *VppAgentCtl) etcdPut(key string, file string) { - input, err := ctl.readData(file) - if err != nil { - ctl.Log.Fatal(err) - } - - ctl.Log.Println("DB putting ", key, " ", string(input)) - - err = ctl.bytesConnection.Put(key, input) - if err != nil { - ctl.Log.Panic("error putting the data ", key, " that to DB from ", file, ", err: ", err) - } - ctl.Log.Println("DB put successful ", key, " ", file) -} - -// EtcdDel removes data under provided key -func (ctl *VppAgentCtl) etcdDel(key string) { - ctl.Log.Debug("DEL ", key) - - found, err := ctl.bytesConnection.Delete(key, datasync.WithPrefix()) - if err != nil { - ctl.Log.Error(err) - return - } - if found { - ctl.Log.Debug("Data deleted:", key) - } else { - ctl.Log.Debug("No value found for the key", key) - } -} - -// EtcdDump lists values under key. If no key is provided, all data is read. -func (ctl *VppAgentCtl) etcdDump(key string) { - ctl.Log.Debug("DUMP ", key) - - data, err := ctl.bytesConnection.ListValues(key) - if err != nil { - ctl.Log.Error(err) - return - } - - var found bool - for { - kv, stop := data.GetNext() - if stop { - break - } - //ctl.Log.Println(kv.GetKey()) - //ctl.Log.Println(string(kv.GetValue())) - //ctl.Log.Println() - fmt.Println(kv.GetKey()) - fmt.Println(string(kv.GetValue())) - fmt.Println() - found = true - } - if !found { - ctl.Log.Debug("No value found for the key", key) - } -} - -func (ctl *VppAgentCtl) readData(file string) ([]byte, error) { - var input []byte - var err error - - if file == "-" { - // read JSON from STDIN - bio := bufio.NewReader(os.Stdin) - buf := new(bytes.Buffer) - buf.ReadFrom(bio) - input = buf.Bytes() - } else { - // read JSON from file - input, err = ioutil.ReadFile(file) - if err != nil { - ctl.Log.Panic("error reading the data that needs to be written to DB from ", file, ", err: ", err) - } - } - - // validate the JSON - var js map[string]interface{} - if json.Unmarshal(input, &js) != nil { - ctl.Log.Panic("Not a valid JSON: ", string(input)) - } - return input, err -} diff --git a/cmd/vpp-agent-ctl/json/acl-macip.json b/cmd/vpp-agent-ctl/json/acl-macip.json index 5dba0ffeef..33005c124d 100644 --- a/cmd/vpp-agent-ctl/json/acl-macip.json +++ b/cmd/vpp-agent-ctl/json/acl-macip.json @@ -1,5 +1,5 @@ { - "acl_name": "acl1", + "name": "acl1", "interfaces": { "egress": [ "tap1", @@ -12,14 +12,12 @@ }, "rules": [ { - "acl_action": 1, - "match": { - "macip_rule": { - "source_address": "192.168.0.1", - "source_address_prefix": 16, - "source_mac_address": "b2:74:8c:12:67:d2", - "source_mac_address_mask": "ff:ff:ff:ff:00:00" - } + "action": 1, + "macip_rule": { + "source_address": "192.168.0.1", + "source_address_prefix": 16, + "source_mac_address": "b2:74:8c:12:67:d2", + "source_mac_address_mask": "ff:ff:ff:ff:00:00" } } ] diff --git a/cmd/vpp-agent-ctl/json/acl-tcp.json b/cmd/vpp-agent-ctl/json/acl-tcp.json index c17579fe02..d336c92e18 100644 --- a/cmd/vpp-agent-ctl/json/acl-tcp.json +++ b/cmd/vpp-agent-ctl/json/acl-tcp.json @@ -1,5 +1,5 @@ { - "acl_name": "acl1", + "name": "acl1", "interfaces": { "egress": [ "tap1", @@ -12,25 +12,23 @@ }, "rules": [ { - "acl_action": 1, - "match": { - "ip_rule": { - "ip": { - "destination_network": "10.20.1.0/24", - "source_network": "192.168.1.2/32" + "action": 1, + "ip_rule": { + "ip": { + "destination_network": "10.20.1.0/24", + "source_network": "192.168.1.2/32" + }, + "tcp": { + "destination_port_range": { + "lower_port": 1150, + "upper_port": 1250 }, - "tcp": { - "destination_port_range": { - "lower_port": 1150, - "upper_port": 1250 - }, - "source_port_range": { - "lower_port": 150, - "upper_port": 250 - }, - "tcp_flags_mask": 20, - "tcp_flags_value": 10 - } + "source_port_range": { + "lower_port": 150, + "upper_port": 250 + }, + "tcp_flags_mask": 20, + "tcp_flags_value": 10 } } } diff --git a/cmd/vpp-agent-ctl/json/acl-udp.json b/cmd/vpp-agent-ctl/json/acl-udp.json index efda861a28..56ae7da2b2 100644 --- a/cmd/vpp-agent-ctl/json/acl-udp.json +++ b/cmd/vpp-agent-ctl/json/acl-udp.json @@ -1,5 +1,5 @@ { - "acl_name": "acl1", + "name": "acl1", "interfaces": { "egress": [ "tap1", @@ -12,22 +12,20 @@ }, "rules": [ { - "acl_action": 1, - "match": { - "ip_rule": { - "ip": { - "destination_network": "", - "source_network": "192.168.1.2/32" + "action": 1, + "ip_rule": { + "ip": { + "destination_network": "", + "source_network": "192.168.1.2/32" + }, + "udp": { + "destination_port_range": { + "lower_port": 1150, + "upper_port": 1250 }, - "udp": { - "destination_port_range": { - "lower_port": 1150, - "upper_port": 1250 - }, - "source_port_range": { - "lower_port": 150, - "upper_port": 250 - } + "source_port_range": { + "lower_port": 150, + "upper_port": 250 } } } diff --git a/cmd/vpp-agent-ctl/json/afpacket.json b/cmd/vpp-agent-ctl/json/afpacket.json index 8a8bc747c9..a124197fc3 100644 --- a/cmd/vpp-agent-ctl/json/afpacket.json +++ b/cmd/vpp-agent-ctl/json/afpacket.json @@ -9,5 +9,5 @@ ], "name": "afpacket1", "phys_address": "b4:e6:1c:a1:0d:31", - "type": 4 + "type": "AF_PACKET" } diff --git a/cmd/vpp-agent-ctl/json/app-ns.json b/cmd/vpp-agent-ctl/json/app-ns.json deleted file mode 100644 index 7a6e9462ce..0000000000 --- a/cmd/vpp-agent-ctl/json/app-ns.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "namespace_id": "ns1", - "secret": 123456, - "interface": "tap1" -} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/arp.json b/cmd/vpp-agent-ctl/json/arp.json index d4bbe8833a..c249284ddd 100644 --- a/cmd/vpp-agent-ctl/json/arp.json +++ b/cmd/vpp-agent-ctl/json/arp.json @@ -2,5 +2,4 @@ "interface": "tap1", "ip_address": "192.168.10.21", "phys_address": "59:6C:45:59:8E:BD", - "static": true } \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/bfd-key.json b/cmd/vpp-agent-ctl/json/bfd-key.json deleted file mode 100644 index 4a97f0b58d..0000000000 --- a/cmd/vpp-agent-ctl/json/bfd-key.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "authentication_type": 0, - "id": 1, - "secret": "123456789" -} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/bfd-session.json b/cmd/vpp-agent-ctl/json/bfd-session.json deleted file mode 100644 index 4d5ffe7e39..0000000000 --- a/cmd/vpp-agent-ctl/json/bfd-session.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "desired_min_tx_interval": 5, - "destination_address": "10.10.0.5", - "detect_multiplier": 10, - "interface": "tap1", - "required_min_rx_interval": 6, - "source_address": "192.168.1.1" -} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/enable-l4.json b/cmd/vpp-agent-ctl/json/enable-l4.json deleted file mode 100644 index 37c8ceff4b..0000000000 --- a/cmd/vpp-agent-ctl/json/enable-l4.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "enabled": true -} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/ip-redirect.json b/cmd/vpp-agent-ctl/json/ip-redirect.json new file mode 100644 index 0000000000..9e4ab2c948 --- /dev/null +++ b/cmd/vpp-agent-ctl/json/ip-redirect.json @@ -0,0 +1,5 @@ +{ + "l3_protocol": "IPv4", + "tx_interface": "tap1", + "next_hop": "192.168.0.1" +} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/ipsec-sa10.json b/cmd/vpp-agent-ctl/json/ipsec-sa10.json index 23a1e32e54..a4ac0ab089 100644 --- a/cmd/vpp-agent-ctl/json/ipsec-sa10.json +++ b/cmd/vpp-agent-ctl/json/ipsec-sa10.json @@ -1,5 +1,5 @@ { - "name":"sa10", + "index":"10", "spi": 1001, "protocol": 1, "crypto_alg": 1, diff --git a/cmd/vpp-agent-ctl/json/ipsec-sa20.json b/cmd/vpp-agent-ctl/json/ipsec-sa20.json index a1c912b5ef..411746d600 100644 --- a/cmd/vpp-agent-ctl/json/ipsec-sa20.json +++ b/cmd/vpp-agent-ctl/json/ipsec-sa20.json @@ -1,5 +1,5 @@ { - "name":"sa20", + "index":"20", "spi": 1000, "protocol": 1, "crypto_alg": 1, diff --git a/cmd/vpp-agent-ctl/json/ipsec-spd.json b/cmd/vpp-agent-ctl/json/ipsec-spd.json index 4aafba0541..c0819d3e90 100644 --- a/cmd/vpp-agent-ctl/json/ipsec-spd.json +++ b/cmd/vpp-agent-ctl/json/ipsec-spd.json @@ -1,21 +1,9 @@ { - "name":"spd1", + "index":"1", "interfaces": [ - { "name": "afp1" } + { "name": "tap1" } ], "policy_entries": [ - { - "priority": 100, - "is_outbound": false, - "action": 0, - "protocol": 50 - }, - { - "priority": 100, - "is_outbound": true, - "action": 0, - "protocol": 50 - }, { "priority": 10, "is_outbound": false, @@ -24,7 +12,7 @@ "local_addr_start": "10.0.0.2", "local_addr_stop": "10.0.0.2", "action": 3, - "sa": "sa20" + "sa": "20" }, { "priority": 10, @@ -34,7 +22,7 @@ "local_addr_start": "10.0.0.2", "local_addr_stop": "10.0.0.2", "action": 3, - "sa": "sa10" + "sa": "10" } ] } \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/ipsec-tunnel.json b/cmd/vpp-agent-ctl/json/ipsec-tunnel.json new file mode 100644 index 0000000000..09bae0915e --- /dev/null +++ b/cmd/vpp-agent-ctl/json/ipsec-tunnel.json @@ -0,0 +1,25 @@ +{ + "link": { + "ip_sec": { + "Esn": false, + "AntiReplay": false, + "LocalSpi": 1000, + "RemoteSpi": 1001, + "LocalIp": "10.0.0.2", + "RemoteIp": "10.0.0.1", + "CryptoAlg": 1, + "LocalCryptoKey": "4a506a794f574265564551694d653768", + "RemoteCryptoKey": "4a506a794f574265564551694d653768", + "IntegAlg": 2, + "LocalIntegKey": "4339314b55523947594d6d3547666b45764e6a58", + "RemoteIntegKey": "4339314b55523947594d6d3547666b45764e6a58" + } + }, + "enabled": true, + "mtu": 1500, + "ip_addresses": [ + "20.0.0.0/24" + ], + "name": "ipsec1", + "type": 8 +} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/linux-tap.json b/cmd/vpp-agent-ctl/json/linux-tap.json index 943c531b6d..d7503b58d9 100644 --- a/cmd/vpp-agent-ctl/json/linux-tap.json +++ b/cmd/vpp-agent-ctl/json/linux-tap.json @@ -1,18 +1,23 @@ { - "name": "tap1", - "host_if_name": "linux-tap", - "enabled": true, - "type": 1, - "mtu": 1500, - "phys_address": "92:c7:42:67:ab:cc", - "namespace": { - "type": 2, - "name": "ns1" - }, - "ip_addresses": [ - "172.52.45.127/24" - ], - "tap": { - "temp_if_name": "tap1" - } + "name": "tap1", + "host_if_name": "linux-tap", + "enabled": true, + "type": 1, + "mtu": 1500, + "phys_address": "BC:FE:E9:5E:07:04", + "namespace": { + "type": 1, + "reference": "ns1" + }, + "ip_addresses": [ + "172.52.45.127/24" + ], + "tap": { + "temp_if_name": "tap1" + }, + "link": { + "tap": { + "vpp_tap_if_name": "linux_tap" + } + } } \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/loop.json b/cmd/vpp-agent-ctl/json/loop.json new file mode 100644 index 0000000000..a9cfb4aaa5 --- /dev/null +++ b/cmd/vpp-agent-ctl/json/loop.json @@ -0,0 +1,10 @@ +{ + "enabled": true, + "mtu": 1500, + "ip_addresses": [ + "192.168.1.1/24" + ], + "name": "loop1", + "phys_address": "06:9e:df:66:54:47", + "type": "SOFTWARE_LOOPBACK" +} diff --git a/cmd/vpp-agent-ctl/json/memif.json b/cmd/vpp-agent-ctl/json/memif.json index bebfe2a895..cebb485ba1 100644 --- a/cmd/vpp-agent-ctl/json/memif.json +++ b/cmd/vpp-agent-ctl/json/memif.json @@ -10,5 +10,5 @@ "socket_filename": "/tmp/memif1.sock" }, "name": "memif1", - "type": 2 + "type": "MEMIF" } diff --git a/cmd/vpp-agent-ctl/json/punt-socket-register.json b/cmd/vpp-agent-ctl/json/punt-socket-register.json deleted file mode 100644 index 3fa9e75290..0000000000 --- a/cmd/vpp-agent-ctl/json/punt-socket-register.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "punt1", - "l3_protocol": 4, - "l4_protocol": 17, - "port": 9000, - "socket_path": "/tmp/socket/punt" -} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/punt-to-host.json b/cmd/vpp-agent-ctl/json/punt-to-host.json new file mode 100644 index 0000000000..ce02ea20e1 --- /dev/null +++ b/cmd/vpp-agent-ctl/json/punt-to-host.json @@ -0,0 +1,6 @@ +{ + "l3_protocol": "IPv4", + "l4_protocol": "UDP", + "port": 9000, + "socket_path": "/tmp/socket/punt" +} \ No newline at end of file diff --git a/cmd/vpp-agent-ctl/json/tap.json b/cmd/vpp-agent-ctl/json/tap.json index 24d6408219..8ceadf0b73 100644 --- a/cmd/vpp-agent-ctl/json/tap.json +++ b/cmd/vpp-agent-ctl/json/tap.json @@ -5,9 +5,9 @@ "enabled": true, "mtu": 1500, "ip_addresses": [ - "192.168.1.1/24" + "192.168.25.3/24" ], "name": "tap1", - "phys_address": "06:9e:df:66:54:47", - "type": 3 + "phys_address": "7C:4E:E7:8A:63:68", + "type": "TAP" } diff --git a/cmd/vpp-agent-ctl/json/veth1.json b/cmd/vpp-agent-ctl/json/veth1.json index 685638a811..a503fcb0d7 100644 --- a/cmd/vpp-agent-ctl/json/veth1.json +++ b/cmd/vpp-agent-ctl/json/veth1.json @@ -6,14 +6,16 @@ "2001:db8:0:0:0:ff00:89e3:bb42/48" ], "namespace": { - "name": "ns1", - "type": 2 + "reference": "ns1", + "type": 1 }, "name": "veth1", "host_if_name": "veth1Name", "phys_address": "d2:74:8c:12:67:d2", - "type": 0, - "veth": { - "peer_if_name": "veth2" + "type": 1, + "link": { + "veth": { + "peer_if_name": "veth2" + } } } diff --git a/cmd/vpp-agent-ctl/json/veth2.json b/cmd/vpp-agent-ctl/json/veth2.json index 31c7f65ba4..ffc55019bd 100644 --- a/cmd/vpp-agent-ctl/json/veth2.json +++ b/cmd/vpp-agent-ctl/json/veth2.json @@ -6,14 +6,16 @@ "2001:842:0:0:0:ff00:13c7:1245/48" ], "namespace": { - "name": "ns2", - "type": 2 + "reference": "ns2", + "type": 1 }, "name": "veth2", "host_if_name": "veth2Name", "phys_address": "92:c7:42:67:ab:cd", "type": 0, - "veth": { - "peer_if_name": "veth1" + "link": { + "veth": { + "peer_if_name": "veth1" + } } } diff --git a/cmd/vpp-agent-ctl/json/vxlan.json b/cmd/vpp-agent-ctl/json/vxlan.json new file mode 100644 index 0000000000..266c4c001f --- /dev/null +++ b/cmd/vpp-agent-ctl/json/vxlan.json @@ -0,0 +1,13 @@ +{ + "vxlan": { + "src_address": "192.168.42.1", + "dst_address": "192.168.42.2", + "vni": 10 + }, + "enabled": true, + "ip_addresses": [ + "192.168.25.3/24" + ], + "name": "vxlan1", + "type": "VXLAN_TUNNEL" +} diff --git a/cmd/vpp-agent-ctl/main.go b/cmd/vpp-agent-ctl/main.go new file mode 100644 index 0000000000..29eb2e948a --- /dev/null +++ b/cmd/vpp-agent-ctl/main.go @@ -0,0 +1,312 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// package vpp-agent-ctl implements the vpp-agent-ctl test tool for testing +// VPP Agent plugins. In addition to testing, the vpp-agent-ctl tool can +// be used to demonstrate the usage of VPP Agent plugins and their APIs. + +package main + +import ( + "bytes" + "fmt" + "os" + "strings" + + "github.com/ligato/vpp-agent/cmd/vpp-agent-ctl/data" + "github.com/namsral/flag" + + "github.com/ligato/cn-infra/logging/logrus" +) + +func main() { + // Read args + args := os.Args + argsLen := len(args) + + // First argument is not a command + if argsLen == 1 { + usage() + return + } + // Check if second argument is a command or path to the ETCD config file + var etcdCfg string + var cmdSet []string + if argsLen >= 2 && !strings.HasPrefix(args[1], "-") { + etcdCfg = args[1] + // Remove first two arguments + cmdSet = args[2:] + } else { + // Remove first argument + cmdSet = args[1:] + } + // Parse service label + if err := flag.CommandLine.ParseEnv(os.Environ()); err != nil { + fmt.Printf("failed to parse environment variables") + return + } + ctl, err := data.NewVppAgentCtl(etcdCfg, cmdSet) + if err != nil { + // Error is already printed in 'bytes_broker_impl.go' + usage() + return + } + + do(ctl) +} + +func do(ctl data.VppAgentCtl) { + commands := ctl.GetCommands() + switch commands[0] { + case "-list": + // List all keys + ctl.ListAllAgentKeys() + case "-dump": + if len(commands) >= 2 { + // Dump specific key + ctl.Dump(commands[1]) + } else { + // Dump all keys + ctl.Dump("") + } + case "-get": + if len(commands) >= 2 { + // Get key + ctl.Get(commands[1]) + } + case "-del": + if len(commands) >= 2 { + // Del key + ctl.Del(commands[1]) + } + case "-put": + if len(commands) >= 3 { + ctl.Put(commands[1], commands[2]) + } + default: + var err error + switch commands[0] { + // ACL plugin + case "-aclip": + err = ctl.PutIPAcl() + case "-aclipd": + err = ctl.DeleteIPAcl() + case "-aclmac": + err = ctl.PutMACIPAcl() + case "-aclmacd": + err = ctl.DeleteMACIPAcl() + // VPP interface plugin + case "-eth": + err = ctl.PutDPDKInterface() + case "-ethd": + err = ctl.DeleteDPDKInterface() + case "-tap": + err = ctl.PutTap() + case "-tapd": + err = ctl.DeleteTap() + case "-loop": + err = ctl.PutLoopback() + case "-loopd": + err = ctl.DeleteLoopback() + case "-memif": + err = ctl.PutMemoryInterface() + case "-memifd": + err = ctl.DeleteMemoryInterface() + case "-vxlan": + err = ctl.PutVxLan() + case "-vxland": + err = ctl.DeleteVxLan() + case "-afpkt": + err = ctl.PutAfPacket() + case "-afpktd": + err = ctl.DeleteAfPacket() + case "-ipsectun": + err = ctl.PutIPSecTunnelInterface() + case "-ipsectund": + err = ctl.DeleteIPSecTunnelInterface() + // Linux interface plugin + case "-veth": + err = ctl.PutVEthPair() + case "-vethd": + err = ctl.DeleteVEthPair() + case "-ltap": + err = ctl.PutLinuxTap() + case "-ltapd": + err = ctl.DeleteLinuxTap() + // IPSec plugin + case "-spd": + err = ctl.PutIPSecSPD() + case "-spdd": + err = ctl.DeleteIPSecSPD() + case "-sa": + err = ctl.PutIPSecSA() + case "-sad": + err = ctl.DeleteIPSecSA() + // L2 plugin + case "-bd": + err = ctl.PutBridgeDomain() + case "-bdd": + err = ctl.DeleteBridgeDomain() + case "-fib": + err = ctl.PutFib() + case "-fibd": + err = ctl.DeleteFib() + case "-xconn": + err = ctl.PutXConn() + case "-xconnd": + err = ctl.DeleteXConn() + // VPP L3 plugin + case "-route": + err = ctl.PutRoute() + case "-routed": + err = ctl.DeleteRoute() + case "-routeint": + err = ctl.PutInterVrfRoute() + case "-routeintd": + err = ctl.DeleteInterVrfRoute() + case "-routenh": + err = ctl.PutNextHopRoute() + case "-routenhd": + err = ctl.DeleteNextHopRoute() + case "-arp": + err = ctl.PutArp() + case "-arpd": + err = ctl.DeleteArp() + case "-proxyarp": + err = ctl.PutProxyArp() + case "-proxyarpd": + err = ctl.DeleteProxyArp() + case "-ipscan": + err = ctl.SetIPScanNeigh() + case "-ipscand": + err = ctl.UnsetIPScanNeigh() + // Linux L3 plugin + case "-lroute": + err = ctl.PutLinuxRoute() + case "-lrouted": + err = ctl.DeleteLinuxRoute() + case "-larp": + err = ctl.PutLinuxArp() + case "-larpd": + err = ctl.DeleteLinuxArp() + // NAT plugin + case "-gnat": + err = ctl.PutGlobalNat() + case "-gnatd": + err = ctl.DeleteGlobalNat() + case "-dnat": + err = ctl.PutDNat() + case "-dnatd": + err = ctl.DeleteDNat() + // Punt plugin + case "-punt": + err = ctl.PutPunt() + case "-puntd": + err = ctl.DeletePunt() + case "-rsocket": + err = ctl.RegisterPuntViaSocket() + case "-dsocket": + err = ctl.DeregisterPuntViaSocket() + case "-ipredir": + err = ctl.PutIPRedirect() + case "-ipredird": + err = ctl.DeleteIPRedirect() + // STN plugin + case "-stn": + err = ctl.PutStn() + case "-stnd": + err = ctl.DeleteStn() + default: + usage() + } + if err != nil { + fmt.Printf("error calling '%s': %v", commands[0], err) + } + } +} + +// Show command info TODO punt +func usage() { + var buffer bytes.Buffer + // Crud operation + _, err := buffer.WriteString(` + + Example tool for VPP configuration. + + Crud operations with .json (files can be found in /json): + -put + -get + -del + -dump + -list + + Prearranged flags (create, delete) sorted by plugin: + + Access list plugin: + -aclip, -aclipd - Access List with IP rules + -aclmac -aclmacd - Access List with MAC IP rule + + Interface plugin: + -eth, -ethd - Physical interface + -tap, -tapd - TAP type interface + -loop, -loopd - Loop type interface + -memif, -memifd - Memif type interface + -vxlan, -vxland - VxLAN type interface + -afpkt, -afpktd - af_packet type interface + -ipsectun, -ipsectund - IPSec tunnel interface + + Linux interface plugin: + -veth, -vethd - Linux VETH interface pair + -ltap, -ltapd - Linux TAP interface + + IPSec plugin: + -spd, -spdd - IPSec security policy database + -sa, -sad - IPSec security associations + + L2 plugin: + -bd, -bdd - Bridge doamin + -fib, -fibd - L2 FIB + -xconn, -xconnd - L2 X-Connect + + L3 plugin: + -route, -routed - L3 route + -routeint, -routeintd - L3 inter-vrf route + -routenh, -routenhd - L3 next-hop route + -arp, -arpd - ARP entry + -proxyarp, -proxyarpd - Proxy ARP configuration + -ipscan -ipscand - VPP IP scan neighbor + + Linux L3 plugin: + -lroute, -lrouted - Linux route + -larp, -larpd - Linux ARP entry + + NAT plugin: + -gnat, -gnatd - Global NAT configuration + -dnat, -dnatd - DNAT configuration + + Punt plugin: + -punt, -puntd - Punt to host + -rsocket, -dsocket - Punt to host via socket registration + -ipredir, -ipredird - IP redirect + + STN plugin: + -stn, -stnd - STN rule + `) + + if err != nil { + logrus.DefaultLogger().Error(err) + } else { + logrus.DefaultLogger().Print(buffer.String()) + } +} diff --git a/cmd/vpp-agent-ctl/topology/static_routing.sh b/cmd/vpp-agent-ctl/topology/static_routing.sh deleted file mode 100755 index 6a5800089e..0000000000 --- a/cmd/vpp-agent-ctl/topology/static_routing.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -VSWITCH_NAME="vpp1" -RPD_NAME="rpd" - -# -# memif: 8.42.0.2 memif: 8.42.0.1 -# +---------------+ +---------------+ -# | | | | -# | | | | -# | VSWITCH +-------------------+ RPD | -# | | | | -# | | | | -# +---------------+ +---------------+ -# route: 112.1.1.3/32 via 8.42.0.1 loop: 112.1.1.3 -# - -# VSWITCH - add static route to 112.1.1.3/32 via 8.42.0.1 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/112.1.1.3/32/8.42.0.1 - << EOF -{ - "description": "Static route", - "dst_ip_addr": "112.1.1.3/32", - "next_hop_addr": "8.42.0.1" -} -EOF - -# VSWITCH - create memif master to RPD -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-rpd - << EOF -{ - "name": "memif-to-rpd", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": true, - "key": 1, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "8.42.0.2/24" - ] -} -EOF - -# RPD - create memif slave to VSWITCH -vpp-agent-ctl -put /vnf-agent/${RPD_NAME}/vpp/config/v1/interface/memif-to-vswitch - << EOF -{ - "name": "memif-to-vswitch", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "key": 1, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "8.42.0.1/24" - ] -} -EOF - -# RPD - create a loopback interface -vpp-agent-ctl -put /vnf-agent/${RPD_NAME}/vpp/config/v1/interface/loop1 - << EOF -{ - "name": "loop1", - "enabled": true, - "mtu": 1500, - "phys_address": "8a:f1:be:90:00:dd", - "ip_addresses": [ - "112.1.1.3/24" - ] -} -EOF diff --git a/cmd/vpp-agent-ctl/topology/topology.sh b/cmd/vpp-agent-ctl/topology/topology.sh deleted file mode 100755 index fb63b40633..0000000000 --- a/cmd/vpp-agent-ctl/topology/topology.sh +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env bash - -VSWITCH_NAME="vpp1" -RNG_NAME="rng-vpp" -USSCHED_NAME="ussched-vpp" -VNF_NAME="vnf-vpp" - -# sudo docker run -it --name vpp1 -e MICROSERVICE_LABEL=vpp1 -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent bash -# sudo docker run -it --name rng -e MICROSERVICE_LABEL=rng-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent -# sudo docker run -it --name ussched -e MICROSERVICE_LABEL=ussched-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent -# sudo docker run -it --name vnf -e MICROSERVICE_LABEL=vnf-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent - -# VSWITCH - configure physical interface GigabitEthernet0/8/0 -# !!! needs to exist and be whitelisted in VPP, e.g. dpdk { dev 0000:00:08.0 } !!! -# This works for my VirtualBox ethernet interface: -# modprobe igb_uio -# vpp unix { interactive } dpdk { dev 0000:00:08.0 uio-driver igb_uio } -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/GigabitEthernet0/8/0 - << EOF -{ - "name": "GigabitEthernet0/8/0", - "type": 1, - "enabled": true, - "mtu": 1500, - "ip_addresses": [ - "8.42.0.2/24" - ] -} -EOF - -# VSWITCH - create a loopback interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop1 - << EOF -{ - "name": "loop1", - "enabled": true, - "phys_address": "8a:f1:be:90:00:dd", - "mtu": 1500, - "ip_addresses": [ - "6.0.0.100/24" - ] -} -EOF - -# VSWITCH - create a vxlan interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/vxlan1 - << EOF -{ - "name": "vxlan1", - "type": 5, - "enabled": true, - "vxlan": { - "src_address": "8.42.0.2", - "dst_address": "8.42.0.1", - "vni": 13 - } -} -EOF - -# VSWITCH - create a BVI loopback interface for B2 (extension to the cCMTS topology) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop-bvi2 - << EOF -{ - "name": "loop-bvi2", - "enabled": true, - "mtu": 1500, - "ip_addresses": [ - "10.10.1.1/24" - ] -} -EOF - -# VSWITCH - add static route to 6.0.0.0/24 via GigabitEthernet0/8/0 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/6.0.0.0/24/8.42.0.1 - << EOF -{ - "description": "Static route", - "dst_ip_addr": "6.0.0.0/24", - "next_hop_addr": "8.42.0.1", - "outgoing_interface": "GigabitEthernet0/8/0" -} -EOF - -# VSWITCH - create memif master to RNG (bridge domain B2) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-rng - << EOF -{ - "name": "memif-to-rng", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 1, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# RNG - create memif slave to VSWITCH -vpp-agent-ctl -put /vnf-agent/${RNG_NAME}/vpp/config/v1/interface/memif-to-vswitch - << EOF -{ - "name": "memif-to-vswitch", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 1, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.4/24" - ] -} -EOF - - -# VSWITCH - create memif master to USSCHED (bridge domain B2) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-ussched - << EOF -{ - "name": "memif-to-ussched", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 2, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# USSCHED - create memif slave to VSWITCH -vpp-agent-ctl -put /vnf-agent/${USSCHED_NAME}/vpp/config/v1/interface/memif-to-vswitch - << EOF -{ - "name": "memif-to-vswitch", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 2, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.3/24" - ] -} -EOF - -# VSWITCH - create memif to VNF 1 (bridge domain B1) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-vnf-1 - << EOF -{ - "name": "memif-to-vnf-1", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 3, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# VNF - create memif slave 1 to VSWITCH -vpp-agent-ctl -put /vnf-agent/${VNF_NAME}/vpp/config/v1/interface/memif-to-vswitch-1 - << EOF -{ - "name": "memif-to-vswitch-1", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 3, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.2/24" - ] -} -EOF - -# VSWITCH - create memif to vnf 2 (bridge domain B2) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-vnf-2 - << EOF -{ - "name": "memif-to-vnf-2", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 4, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# VNF - create memif slave 2 to VSWITCH -vpp-agent-ctl -put /vnf-agent/${VNF_NAME}/vpp/config/v1/interface/memif-to-vswitch-2 - << EOF -{ - "name": "memif-to-vswitch-2", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 4, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "166.111.8.2" - ] -} -EOF - -# VSWITCH - create bridge domain B2 (needs to be called after the interfaces have been created) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/bd/B2 - << EOF -{ - "name": "B2", - "flood": true, - "unknown_unicast_flood": true, - "forward": true, - "learn": true, - "arp_termination": true, - "interfaces": [ - { - "name": "memif-to-rng" - }, - { - "name": "memif-to-ussched" - }, - { - "name": "memif-to-vnf-1" - }, - { - "name": "loop-bvi2", - "bridged_virtual_interface": true - } - ] -} -EOF - -# VSWITCH - create bridge domain B1 (needs to be called after the interfaces have been created) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/bd/B1 - << EOF -{ - "name": "B1", - "flood": true, - "unknown_unicast_flood": true, - "forward": true, - "learn": true, - "interfaces": [ - { - "name": "memif-to-vnf-2" - }, - { - "name": "vxlan1" - } - ] -} -EOF diff --git a/cmd/vpp-agent-ctl/topology/topology_modify.sh b/cmd/vpp-agent-ctl/topology/topology_modify.sh deleted file mode 100755 index 6dbed7e3ba..0000000000 --- a/cmd/vpp-agent-ctl/topology/topology_modify.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -VSWITCH_NAME="vpp1" -RNG_NAME="rng-vpp" -USSCHED_NAME="ussched-vpp" -VNF_NAME="vnf-vpp" - -# VSWITCH - change IP & MAC of the loopback interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop1 - << EOF -{ - "name": "loop1", - "enabled": true, - "phys_address": "8a:f1:be:90:00:bb", - "mtu": 1500, - "ip_addresses": [ - "6.0.0.101/24" - ] -} -EOF - -# VSWITCH - delete memif master to RNG (bridge domain B2) -vpp-agent-ctl -del /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-rng - -# RNG - delete memif slave to VSWITCH -vpp-agent-ctl -del /vnf-agent/${RNG_NAME}/vpp/config/v1/interface/memif-to-vswitch - -# VSWITCH - add one more static route -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/20.5.0.0/24/8.42.0.1 - << EOF -{ - "description": "Static route 2", - "dst_ip_addr": "20.5.0.0/24", - "next_hop_addr": "8.42.0.1", - "outgoing_interface": "GigabitEthernet0/8/0" -} -EOF - -# VSWITCH - remove deleted interface + BVI interface from bridge domain -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/bd/B2 - << EOF -{ - "name": "B2", - "flood": true, - "unknown_unicast_flood": true, - "forward": true, - "learn": true, - "arp_termination": true, - "interfaces": [ - { - "name": "memif-to-ussched" - }, - { - "name": "memif-to-vnf-1" - } - ] -} -EOF diff --git a/cmd/vpp-agent-ctl/topology/topology_xcon.sh b/cmd/vpp-agent-ctl/topology/topology_xcon.sh deleted file mode 100755 index c22377c753..0000000000 --- a/cmd/vpp-agent-ctl/topology/topology_xcon.sh +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/env bash - -VSWITCH_NAME="vpp1" -RNG_NAME="rng-vpp" -USSCHED_NAME="ussched-vpp" -VNF_NAME="vnf-vpp" - -# sudo docker run -it --name vpp1 -e MICROSERVICE_LABEL=vpp1 -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent bash -# sudo docker run -it --name rng -e MICROSERVICE_LABEL=rng-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent -# sudo docker run -it --name ussched -e MICROSERVICE_LABEL=ussched-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent -# sudo docker run -it --name vnf -e MICROSERVICE_LABEL=vnf-vpp -v/tmp/:/tmp/ --privileged --rm dev_vpp_agent - -# VSWITCH - configure physical interface GigabitEthernet0/8/0 -# !!! needs to exist and be whitelisted in VPP, e.g. dpdk { dev 0000:00:08.0 } !!! -# This works for my VirtualBox ethernet interface: -# modprobe igb_uio -# vpp unix { interactive } dpdk { dev 0000:00:08.0 uio-driver igb_uio } -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/GigabitEthernet0/8/0 - << EOF -{ - "name": "GigabitEthernet0/8/0", - "type": 1, - "enabled": true, - "mtu": 1500, - "ip_addresses": [ - "8.42.0.2/24" - ] -} -EOF - -# VSWITCH - create a loopback interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop1 - << EOF -{ - "name": "loop1", - "enabled": true, - "phys_address": "8a:f1:be:90:00:dd", - "mtu": 1500, - "ip_addresses": [ - "6.0.0.100/24" - ] -} -EOF - -# VSWITCH - create a vxlan interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/vxlan1 - << EOF -{ - "name": "vxlan1", - "type": 5, - "enabled": true, - "vxlan": { - "src_address": "8.42.0.2", - "dst_address": "8.42.0.1", - "vni": 13 - } -} -EOF - -# VSWITCH - create a BVI loopback interface for B2 (extension to the cCMTS topology) -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop-bvi2 - << EOF -{ - "name": "loop-bvi2", - "enabled": true, - "mtu": 1500, - "ip_addresses": [ - "10.10.1.1/24" - ] -} -EOF - -# VSWITCH - add static route to 6.0.0.0/24 via GigabitEthernet0/8/0 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/6.0.0.0/24/8.42.0.1 - << EOF -{ - "description": "Static route", - "dst_ip_addr": "6.0.0.0/24", - "next_hop_addr": "8.42.0.1", - "outgoing_interface": "GigabitEthernet0/8/0" -} -EOF - -# VSWITCH - create memif master to RNG -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-rng - << EOF -{ - "name": "memif-to-rng", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 1, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# RNG - create memif slave to VSWITCH -vpp-agent-ctl -put /vnf-agent/${RNG_NAME}/vpp/config/v1/interface/memif-to-vswitch - << EOF -{ - "name": "memif-to-vswitch", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 1, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.4/24" - ] -} -EOF - - -# VSWITCH - create memif master to USSCHED -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-ussched - << EOF -{ - "name": "memif-to-ussched", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 2, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# USSCHED - create memif slave to VSWITCH -vpp-agent-ctl -put /vnf-agent/${USSCHED_NAME}/vpp/config/v1/interface/memif-to-vswitch - << EOF -{ - "name": "memif-to-vswitch", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 2, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.3/24" - ] -} -EOF - -# VSWITCH - create memif to VNF 1 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-vnf-1 - << EOF -{ - "name": "memif-to-vnf-1", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 3, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# VNF - create memif slave 1 to VSWITCH -vpp-agent-ctl -put /vnf-agent/${VNF_NAME}/vpp/config/v1/interface/memif-to-vswitch-1 - << EOF -{ - "name": "memif-to-vswitch-1", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 3, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "10.10.1.2/24" - ] -} -EOF - -# VSWITCH - create memif to vnf 2 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-vnf-2 - << EOF -{ - "name": "memif-to-vnf-2", - "type": 2, - "enabled": true, - "memif": { - "master": true, - "id": 4, - "socket_filename": "/tmp/memif.sock" - } -} -EOF - -# VNF - create memif slave 2 to VSWITCH -vpp-agent-ctl -put /vnf-agent/${VNF_NAME}/vpp/config/v1/interface/memif-to-vswitch-2 - << EOF -{ - "name": "memif-to-vswitch-2", - "type": 2, - "enabled": true, - "mtu": 1500, - "memif": { - "master": false, - "id": 4, - "socket_filename": "/tmp/memif.sock" - }, - "ip_addresses": [ - "166.111.8.2" - ] -} -EOF - -# VSWITCH - create cross-connection between interfaces memif-to-rng and memif-to-ussched -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/xconnect/memif-to-rng - << EOF -{ - "receive_interface": "memif-to-rng", - "transmit_interface": "memif-to-ussched" -} -EOF - -# VSWITCH - create cross-connection between interfaces memif-to-vnf-1 and memif-to-rng -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/xconnect/memif-to-vnf-1 - << EOF -{ - "receive_interface": "memif-to-vnf-1", - "transmit_interface": "memif-to-rng" -} -EOF - -# VSWITCH - create cross-connection between interfaces memif-to-ussched and memif-to-vnf-1 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/xconnect/memif-to-ussched - << EOF -{ - "receive_interface": "memif-to-ussched", - "transmit_interface": "memif-to-vnf-1" -} -EOF - -# VSWITCH - create cross-connection between interfaces memif-to-vnf-2 and vxlan1 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/xconnect/memif-to-vnf-2 - << EOF -{ - "receive_interface": "memif-to-vnf-2", - "transmit_interface": "vxlan1" -} -EOF diff --git a/cmd/vpp-agent-ctl/topology/topology_xcon_modify.sh b/cmd/vpp-agent-ctl/topology/topology_xcon_modify.sh deleted file mode 100755 index b99d4c39db..0000000000 --- a/cmd/vpp-agent-ctl/topology/topology_xcon_modify.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -VSWITCH_NAME="vpp1" -RNG_NAME="rng-vpp" -USSCHED_NAME="ussched-vpp" -VNF_NAME="vnf-vpp" - -# VSWITCH - change IP & MAC of the loopback interface -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/loop1 - << EOF -{ - "name": "loop1", - "enabled": true, - "phys_address": "8a:f1:be:90:00:bb", - "mtu": 1500, - "ip_addresses": [ - "6.0.0.101/24" - ] -} -EOF - -# VSWITCH - delete memif master to RNG -vpp-agent-ctl -del /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/interface/memif-to-rng - -# RNG - delete memif slave to VSWITCH -vpp-agent-ctl -del /vnf-agent/${RNG_NAME}/vpp/config/v1/interface/memif-to-vswitch - -# VSWITCH - add static route to 6.0.0.0/24 via GigabitEthernet0/8/0 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/6.0.0.0/24/8.42.0.1 - << EOF -{ - "description": "Static route", - "dst_ip_addr": "6.0.0.0/24", - "next_hop_addr": "8.42.0.1", - "outgoing_interface": "GigabitEthernet0/8/0" -} -EOF - -# VSWITCH - add static route to 20.5.0.0/24 via GigabitEthernet0/8/0 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/vrf/0/fib/20.5.0.0/24/8.42.0.1 - << EOF -{ - "description": "Static route 2", - "dst_ip_addr": "20.5.0.0/24", - "next_hop_addr": "8.42.0.1", - "outgoing_interface": "GigabitEthernet0/8/0" -} -EOF - -# VSWITCH - create cross-connection between interfaces memif-to-ussched and memif-to-vnf-1 -vpp-agent-ctl -put /vnf-agent/${VSWITCH_NAME}/vpp/config/v1/xconnect/memif-to-ussched - << EOF -{ - "receive_interface": "memif-to-ussched", - "transmit_interface": "memif-to-vnf-1" -} -EOF diff --git a/cmd/vpp-agent-ctl/vpp-agent-ctl.go b/cmd/vpp-agent-ctl/vpp-agent-ctl.go deleted file mode 100644 index ec4aa37976..0000000000 --- a/cmd/vpp-agent-ctl/vpp-agent-ctl.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// package vpp-agent-ctl implements the vpp-agent-ctl test tool for testing -// VPP Agent plugins. In addition to testing, the vpp-agent-ctl tool can -// be used to demonstrate the usage of VPP Agent plugins and their APIs. -package main - -import ( - "bytes" - "os" - "strings" - - "github.com/ligato/cn-infra/logging/logrus" -) - -func main() { - // Read args - args := os.Args - argsLen := len(args) - - // First argument is not a command - if argsLen == 1 { - usage() - return - } - // Check if second argument is a command or path to the ETCD config file - var etcdCfg string - var cmdSet []string - if argsLen >= 2 && !strings.HasPrefix(args[1], "-") { - etcdCfg = args[1] - // Remove first two arguments - cmdSet = args[2:] - } else { - // Remove first argument - cmdSet = args[1:] - } - ctl, err := initCtl(etcdCfg, cmdSet) - if err != nil { - // Error is already printed in 'bytes_broker_impl.go' - usage() - return - } - - do(ctl) -} - -func do(ctl *VppAgentCtl) { - switch ctl.Commands[0] { - case "-list": - // List all keys - ctl.listAllAgentKeys() - case "-dump": - if len(ctl.Commands) >= 2 { - // Dump specific key - ctl.etcdDump(ctl.Commands[1]) - } else { - // Dump all keys - ctl.etcdDump("") - } - case "-get": - if len(ctl.Commands) >= 2 { - // Get key - ctl.etcdGet(ctl.Commands[1]) - } - case "-del": - if len(ctl.Commands) >= 2 { - // Del key - ctl.etcdDel(ctl.Commands[1]) - } - case "-put": - if len(ctl.Commands) >= 3 { - ctl.etcdPut(ctl.Commands[1], ctl.Commands[2]) - } - default: - switch ctl.Commands[0] { - // ACL - case "-acl": - ctl.createACL() - case "-acld": - ctl.deleteACL() - // BFD - case "-bfds": - ctl.createBfdSession() - case "-bfdsd": - ctl.deleteBfdSession() - case "-bfdk": - ctl.createBfdKey() - case "-bfdkd": - ctl.deleteBfdKey() - case "-bfde": - ctl.createBfdEcho() - case "-bfded": - ctl.deleteBfdEcho() - // VPP interfaces - case "-eth": - ctl.createEthernet() - case "-ethd": - ctl.deleteEthernet() - case "-tap": - ctl.createTap() - case "-tapd": - ctl.deleteTap() - case "-loop": - ctl.createLoopback() - case "-loopd": - ctl.deleteLoopback() - case "-memif": - ctl.createMemif() - case "-memifd": - ctl.deleteMemif() - case "-vxlan": - ctl.createVxlan() - case "-vxland": - ctl.deleteVxlan() - case "-vmxnt": - ctl.createVmxNet3() - case "-vmxntd": - ctl.deleteVmxNet3() - case "-afpkt": - ctl.createAfPacket() - case "-afpktd": - ctl.deleteAfPacket() - // Linux interfaces - case "-veth": - ctl.createVethPair() - case "-vethd": - ctl.deleteVethPair() - case "-ltap": - ctl.createLinuxTap() - case "-ltapd": - ctl.deleteLinuxTap() - // IPsec - case "-spd": - ctl.createIPsecSPD() - case "-spdd": - ctl.deleteIPsecSPD() - case "-sa": - ctl.createIPsecSA() - case "-sad": - ctl.deleteIPsecSA() - case "-tun": - ctl.createIPSecTunnelInterface() - case "-tund": - ctl.deleteIPSecTunnelInterface() - // STN - case "-stn": - ctl.createStn() - case "-stnd": - ctl.deleteStn() - // NAT - case "-gnat": - ctl.createGlobalNat() - case "-gnatd": - ctl.deleteGlobalNat() - case "-snat": - ctl.createSNat() - case "-snatd": - ctl.deleteSNat() - case "-dnat": - ctl.createDNat() - case "-dnatd": - ctl.deleteDNat() - // Bridge domains - case "-bd": - ctl.createBridgeDomain() - case "-bdd": - ctl.deleteBridgeDomain() - // FIB - case "-fib": - ctl.createFib() - case "-fibd": - ctl.deleteFib() - // L2 xConnect - case "-xconn": - ctl.createXConn() - case "-xconnd": - ctl.deleteXConn() - // VPP routes - case "-route": - ctl.createRoute() - case "-routed": - ctl.deleteRoute() - // Linux routes - case "-lrte": - ctl.createLinuxRoute() - case "-lrted": - ctl.deleteLinuxRoute() - // VPP ARP - case "-arp": - ctl.createArp() - case "-arpd": - ctl.deleteArp() - case "-prxi": - ctl.addProxyArpInterfaces() - case "-prxid": - ctl.deleteProxyArpInterfaces() - case "-prxr": - ctl.addProxyArpRanges() - case "-prxrd": - ctl.deleteProxyArpRanges() - case "-ipscn": - ctl.setIPScanNeigh() - case "-ipscnd": - ctl.unsetIPScanNeigh() - // Linux ARP - case "-larp": - ctl.createLinuxArp() - case "-larpd": - ctl.deleteLinuxArp() - // L4 plugin - case "-el4": - ctl.enableL4Features() - case "-dl4": - ctl.disableL4Features() - case "-appns": - ctl.createAppNamespace() - case "-appnsd": - ctl.deleteAppNamespace() - // Punt - case "-puntr": - ctl.registerPunt() - case "-puntd": - ctl.deregisterPunt() - // TXN (transaction) - case "-txn": - ctl.createTxn() - case "-txnd": - ctl.deleteTxn() - // Error reporting - case "-errIf": - ctl.reportIfaceErrorState() - case "-errBd": - ctl.reportBdErrorState() - default: - usage() - } - } -} - -// Show command info -func usage() { - var buffer bytes.Buffer - // Crud operation - buffer.WriteString(` - - Crud operations with .json: - -put - -get - -del - -dump - -list - - Prearranged flags (create, delete): - -acl, -acld - Access List - -bfds, -bfdsd - BFD session - -bfdk, -bfdkd - BFD authentication key - -bfde, -bfded - BFD echo function - -eth, -ethd - Physical interface - -tap, -tapd - TAP type interface - -loop, -loopd - Loop type interface - -memif, -memifd - Memif type interface - -vxlan, -vxland - VxLAN type interface - -vmxnt, -vmxntd - VmxNet3 type interface - -afpkt, -afpktd - af_packet type interface - -veth, -vethd - Linux VETH interface pair - -ltap, -ltapd - Linux TAP interface - -spd, -spdd - IPSec security policy database - -sa, -sad - IPSec security associations - -tun -tund - IPSec tunnel interface - -stn, -stnd - STN rule - -gnat, -gnatd - Global NAT configuration - -snat, -snatd - SNAT configuration - -dnat, -dnatd - DNAT configuration - -bd, -bdd - Bridge doamin - -fib, -fibd - L2 FIB - -xconn, -xconnd - L2 X-Connect - -route, -routed - L3 route - -arp, -arpd - ARP entry - -prxi, -prxid - Proxy ARP interfaces - -prxr, -prxrd - Proxy ARP ranges - -lrte, -lrted - Linux route - -larp, -larpd - Linux ARP entry - -ipscn -ipscnd - VPP IP scan neighbor - -el4, -dl4 - L4 features - -appns, -appnsd - Application namespace - -puntr, -puntd - Register/Deregister punt - - Other: - -txn, -txnd - Transaction - -errIf - Interface error state report - -errBd - Bridge domain error state report - `) - - logrus.DefaultLogger().Print(buffer.String()) -} diff --git a/cmd/vpp-agent/app/vpp_agent.go b/cmd/vpp-agent/app/vpp_agent.go new file mode 100644 index 0000000000..8264f4dc36 --- /dev/null +++ b/cmd/vpp-agent/app/vpp_agent.go @@ -0,0 +1,189 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package app + +import ( + "github.com/ligato/cn-infra/datasync" + "github.com/ligato/cn-infra/datasync/kvdbsync" + "github.com/ligato/cn-infra/datasync/kvdbsync/local" + "github.com/ligato/cn-infra/datasync/msgsync" + "github.com/ligato/cn-infra/datasync/resync" + "github.com/ligato/cn-infra/db/keyval/consul" + "github.com/ligato/cn-infra/db/keyval/etcd" + "github.com/ligato/cn-infra/db/keyval/redis" + "github.com/ligato/cn-infra/health/probe" + "github.com/ligato/cn-infra/health/statuscheck" + "github.com/ligato/cn-infra/logging/logmanager" + "github.com/ligato/cn-infra/messaging/kafka" + + "github.com/ligato/vpp-agent/plugins/configurator" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linux_l3plugin "github.com/ligato/vpp-agent/plugins/linux/l3plugin" + linux_nsplugin "github.com/ligato/vpp-agent/plugins/linux/nsplugin" + "github.com/ligato/vpp-agent/plugins/orchestrator" + "github.com/ligato/vpp-agent/plugins/restapi" + "github.com/ligato/vpp-agent/plugins/telemetry" + "github.com/ligato/vpp-agent/plugins/vpp/aclplugin" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin" + "github.com/ligato/vpp-agent/plugins/vpp/l3plugin" + "github.com/ligato/vpp-agent/plugins/vpp/natplugin" + "github.com/ligato/vpp-agent/plugins/vpp/puntplugin" + "github.com/ligato/vpp-agent/plugins/vpp/stnplugin" +) + +// VPPAgent defines plugins which will be loaded and their order. +// Note: the plugin itself is loaded after all its dependencies. It means that the VPP plugin is first in the list +// despite it needs to be loaded after the linux plugin. +type VPPAgent struct { + LogManager *logmanager.Plugin + + // VPP & Linux are first to ensure that + // all their descriptors are regitered to KVScheduler + // before orchestrator that starts watch for their NB key prefixes. + VPP + Linux + + Orchestrator *orchestrator.Plugin + + ETCDDataSync *kvdbsync.Plugin + ConsulDataSync *kvdbsync.Plugin + RedisDataSync *kvdbsync.Plugin + + Configurator *configurator.Plugin + RESTAPI *restapi.Plugin + Probe *probe.Plugin + Telemetry *telemetry.Plugin +} + +// New creates new VPPAgent instance. +func New() *VPPAgent { + etcdDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin)) + consulDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&consul.DefaultPlugin)) + redisDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&redis.DefaultPlugin)) + + writers := datasync.KVProtoWriters{ + etcdDataSync, + consulDataSync, + redisDataSync, + } + statuscheck.DefaultPlugin.Transport = writers + + ifStatePub := msgsync.NewPlugin( + msgsync.UseMessaging(&kafka.DefaultPlugin), + msgsync.UseConf(msgsync.Config{ + Topic: "if_state", + }), + ) + + // Set watcher for KVScheduler. + watchers := datasync.KVProtoWatchers{ + local.DefaultRegistry, + etcdDataSync, + consulDataSync, + redisDataSync, + } + orchestrator.DefaultPlugin.Watcher = watchers + + ifplugin.DefaultPlugin.NotifyStates = ifStatePub + ifplugin.DefaultPlugin.PublishStatistics = writers + puntplugin.DefaultPlugin.PublishState = writers + + // connect IfPlugins for Linux & VPP + linux_ifplugin.DefaultPlugin.VppIfPlugin = &ifplugin.DefaultPlugin + ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + ifplugin.DefaultPlugin.NsPlugin = &linux_nsplugin.DefaultPlugin + + vpp := DefaultVPP() + linux := DefaultLinux() + + return &VPPAgent{ + LogManager: &logmanager.DefaultPlugin, + Orchestrator: &orchestrator.DefaultPlugin, + ETCDDataSync: etcdDataSync, + ConsulDataSync: consulDataSync, + RedisDataSync: redisDataSync, + VPP: vpp, + Linux: linux, + Configurator: &configurator.DefaultPlugin, + RESTAPI: &restapi.DefaultPlugin, + Probe: &probe.DefaultPlugin, + Telemetry: &telemetry.DefaultPlugin, + } +} + +// Init initializes main plugin. +func (VPPAgent) Init() error { + return nil +} + +// AfterInit executes resync. +func (VPPAgent) AfterInit() error { + // manually start resync after all plugins started + resync.DefaultPlugin.DoResync() + //orchestrator.DefaultPlugin.InitialSync() + return nil +} + +// Close could close used resources. +func (VPPAgent) Close() error { + return nil +} + +// String returns name of the plugin. +func (VPPAgent) String() string { + return "VPPAgent" +} + +// VPP contains all VPP plugins. +type VPP struct { + ACLPlugin *aclplugin.ACLPlugin + IfPlugin *ifplugin.IfPlugin + IPSecPlugin *ipsecplugin.IPSecPlugin + L2Plugin *l2plugin.L2Plugin + L3Plugin *l3plugin.L3Plugin + NATPlugin *natplugin.NATPlugin + PuntPlugin *puntplugin.PuntPlugin + STNPlugin *stnplugin.STNPlugin +} + +func DefaultVPP() VPP { + return VPP{ + ACLPlugin: &aclplugin.DefaultPlugin, + IfPlugin: &ifplugin.DefaultPlugin, + IPSecPlugin: &ipsecplugin.DefaultPlugin, + L2Plugin: &l2plugin.DefaultPlugin, + L3Plugin: &l3plugin.DefaultPlugin, + NATPlugin: &natplugin.DefaultPlugin, + PuntPlugin: &puntplugin.DefaultPlugin, + STNPlugin: &stnplugin.DefaultPlugin, + } +} + +// Linux contains all Linux plugins. +type Linux struct { + IfPlugin *linux_ifplugin.IfPlugin + L3Plugin *linux_l3plugin.L3Plugin + NSPlugin *linux_nsplugin.NsPlugin +} + +func DefaultLinux() Linux { + return Linux{ + IfPlugin: &linux_ifplugin.DefaultPlugin, + L3Plugin: &linux_l3plugin.DefaultPlugin, + NSPlugin: &linux_nsplugin.DefaultPlugin, + } +} diff --git a/cmd/vpp-agent/main.go b/cmd/vpp-agent/main.go index 51b84ac192..fa444e7e61 100644 --- a/cmd/vpp-agent/main.go +++ b/cmd/vpp-agent/main.go @@ -17,18 +17,31 @@ package main import ( + "fmt" "os" "github.com/ligato/cn-infra/agent" "github.com/ligato/cn-infra/logging" log "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/app" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" ) +const logo = ` __ + _ _____ ___ _______ ____ ____ ___ / /_ +| |/ / _ \/ _ /___/ _ '/ _ '/ -_/ _ / __/ +|___/ .__/ .__/ \_'_/\_' /\__/_//_\__/ %s + /_/ /_/ /___/ + +` + +var vppAgent = app.New() + func main() { - vppAgent := app.New() + fmt.Fprintf(os.Stdout, logo, agent.BuildVersion) - a := agent.NewAgent(agent.AllPlugins(vppAgent)) + a := agent.NewAgent( + agent.AllPlugins(vppAgent), + ) if err := a.Run(); err != nil { log.DefaultLogger().Fatal(err) diff --git a/docker/dev/Dockerfile b/docker/dev/Dockerfile index 6ecba04267..4b82eeafda 100644 --- a/docker/dev/Dockerfile +++ b/docker/dev/Dockerfile @@ -33,10 +33,10 @@ FROM ${BASE_IMG} as dev-stage RUN apt-get update \ && apt-get install -y --no-install-recommends \ autoconf automake build-essential ca-certificates curl gdb git \ - inetutils-traceroute iproute2 ipsec-tools iputils-ping \ + graphviz inetutils-traceroute iproute2 ipsec-tools iputils-ping \ libapr1 libmbedcrypto1 libmbedtls10 libmbedx509-0 libtool \ make mc nano netcat python software-properties-common sudo supervisor \ - telnet unzip wget \ + telnet unzip wget python-cffi \ && rm -rf /var/lib/apt/lists/* # install Protobuf @@ -71,7 +71,7 @@ RUN set -eux; \ make vpp_configure_args_vpp='--disable-japi' build-release pkg-deb; \ fi; \ cd build-root; \ - dpkg -i vpp_*.deb vpp-dev_*.deb libvppinfra*.deb vpp-plugin-core*.deb vpp-dbg_*.deb; \ + dpkg -i vpp_*.deb vpp-dev_*.deb vpp-lib_*.deb vpp-plugins_*.deb vpp-dbg_*.deb vpp-api-python_*.deb; \ rm -rf .ccache /var/lib/apt/lists/*; \ find . -type f -name '*.o' -exec rm -rf '{}' \; @@ -88,13 +88,14 @@ RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" \ && chmod -R 777 "$GOPATH" # install debugger -RUN go get -u github.com/go-delve/delve/cmd/dlv +RUN \ + go get -u github.com/go-delve/delve/cmd/dlv && dlv version; \ + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh && dep version; # copy configs COPY \ docker/dev/etcd.conf \ docker/dev/kafka.conf \ - docker/dev/govpp.conf \ docker/dev/vpp-plugin.conf \ docker/dev/linux-plugin.conf \ docker/dev/logs.conf \ diff --git a/docker/dev/build.sh b/docker/dev/build.sh index 1fa83af136..3154a0e761 100755 --- a/docker/dev/build.sh +++ b/docker/dev/build.sh @@ -1,12 +1,13 @@ #!/bin/bash -# Before run of this script you can set environmental variables -# IMAGE_TAG, DOCKERFILE, BASE_IMG, GOLANG_OS_ARCH, .. then export them -# and to use defined values instead of default ones cd "$(dirname "$0")" set -e +# Before run of this script you can set environmental variables +# IMAGE_TAG, DOCKERFILE, BASE_IMG, GOLANG_OS_ARCH, then export them +# and to use defined values instead of default ones + IMAGE_TAG=${IMAGE_TAG:-'dev_vpp_agent'} DOCKERFILE=${DOCKERFILE:-'Dockerfile'} @@ -39,24 +40,26 @@ COMMIT=$(git rev-parse HEAD) DATE=$(git log -1 --format="%ct" | xargs -I{} date -d @{} +'%Y-%m-%dT%H:%M%:z') echo "==============================" +echo "Building dev image" +echo "==============================" +echo " base image: ${BASE_IMG}" +echo " image tag: ${IMAGE_TAG}" +echo " architecture: ${BUILDARCH}" echo +echo "-----------------------------" echo "VPP" echo "-----------------------------" echo " repo URL: ${VPP_REPO_URL}" echo " commit: ${VPP_COMMIT}" -echo "-----------------------------" echo +echo "-----------------------------" echo "Agent" echo "-----------------------------" echo " version: ${VERSION}" echo " commit: ${COMMIT}" echo " date: ${DATE}" -echo "-----------------------------" echo -echo "base image: ${BASE_IMG}" -echo "image tag: ${IMAGE_TAG}" -echo "architecture: ${BUILDARCH}" -echo "==============================" +echo "-----------------------------" docker build -f ${DOCKERFILE} \ --tag ${IMAGE_TAG} \ diff --git a/docker/dev/exec_agent.sh b/docker/dev/exec_agent.sh deleted file mode 120000 index b2f3c76bc9..0000000000 --- a/docker/dev/exec_agent.sh +++ /dev/null @@ -1 +0,0 @@ -../prod/exec_agent.sh \ No newline at end of file diff --git a/docker/dev/exec_agent.sh b/docker/dev/exec_agent.sh new file mode 100755 index 0000000000..f6a74b028c --- /dev/null +++ b/docker/dev/exec_agent.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +if [ -n "$OMIT_AGENT" ]; then + echo "Start of vpp-agent disabled (unset OMIT_AGENT to enable it)" +else + echo "Starting vpp-agent.." + exec vpp-agent --config-dir=/opt/vpp-agent/dev +fi diff --git a/docker/dev/govpp.conf b/docker/dev/govpp.conf deleted file mode 100644 index b05afa02ea..0000000000 --- a/docker/dev/govpp.conf +++ /dev/null @@ -1,4 +0,0 @@ -health-check-probe-interval: 1000000000 -health-check-reply-timeout: 100000000 -health-check-threshold: 1 -reply-timeout: 1000000000 diff --git a/docker/dev/supervisord_kill.py b/docker/dev/supervisord_kill.py deleted file mode 120000 index 4b3a7743b5..0000000000 --- a/docker/dev/supervisord_kill.py +++ /dev/null @@ -1 +0,0 @@ -../prod/supervisord_kill.py \ No newline at end of file diff --git a/docker/dev/supervisord_kill.py b/docker/dev/supervisord_kill.py new file mode 100644 index 0000000000..debf647cb0 --- /dev/null +++ b/docker/dev/supervisord_kill.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +import sys +import os +import signal + + +def write_stdout(msg): + # only eventlistener protocol messages may be sent to stdout + sys.stdout.write(msg) + sys.stdout.flush() + + +def write_stderr(msg): + sys.stderr.write(msg) + sys.stderr.flush() + + +def main(): + while 1: + # transition from ACKNOWLEDGED to READY + write_stdout('READY\n') + + # read header line and print it to stderr + line = sys.stdin.readline() + write_stderr('EVENT: ' + line) + + # read event payload and print it to stderr + headers = dict([x.split(':') for x in line.split()]) + data = sys.stdin.read(int(headers['len'])) + write_stderr('DATA: ' + data + '\n') + + # ignore non vpp events, skipping + parsed_data = dict([x.split(':') for x in data.split()]) + if parsed_data["processname"] not in ["vpp", "agent"]: + write_stderr('Ignoring event from ' + parsed_data["processname"] + '\n') + write_stdout('RESULT 2\nOK') + continue + + # ignore exits with expected exit codes + if parsed_data["expected"] == "1": + write_stderr('Exit state from ' + parsed_data["processname"] + ' was expected\n') + write_stdout('RESULT 2\nOK') + continue + + # do not kill supervisor if retained and exit + if 'RETAIN_SUPERVISOR' in os.environ and os.environ['RETAIN_SUPERVISOR'] != '': + write_stderr('Supervisord is configured to retain after unexpected exits (unset RETAIN_SUPERVISOR to disable it)\n') + write_stdout('RESULT 2\nOK') + continue + + try: + with open('/run/supervisord.pid', 'r') as pidfile: + pid = int(pidfile.readline()) + write_stderr('Killing supervisord with pid: ' + str(pid) + '\n') + os.kill(pid, signal.SIGQUIT) + except Exception as e: + write_stderr('Could not kill supervisor: ' + str(e) + '\n') + + # transition from READY to ACKNOWLEDGED + write_stdout('RESULT 2\nOK') + return + + +if __name__ == '__main__': + main() diff --git a/docker/dev/vpp.conf b/docker/dev/vpp.conf index f64ad00f52..13f617ab03 100644 --- a/docker/dev/vpp.conf +++ b/docker/dev/vpp.conf @@ -18,4 +18,11 @@ socksvr { } statseg { default -} \ No newline at end of file +} +nat { + endpoint-dependent + translation hash buckets 1048576 + translation hash memory 268435456 + user hash buckets 1024 + max translations per user 10000 +} diff --git a/docker/prod/Dockerfile b/docker/prod/Dockerfile index 837bb62798..ecca606456 100644 --- a/docker/prod/Dockerfile +++ b/docker/prod/Dockerfile @@ -21,6 +21,7 @@ RUN apt-get update \ ipsec-tools \ python \ supervisor \ + netcat \ && rm -rf /var/lib/apt/lists/* # install vpp @@ -48,7 +49,6 @@ COPY --from=devimg \ COPY \ etcd.conf \ kafka.conf \ - govpp.conf \ vpp-plugin.conf \ linux-plugin.conf \ /opt/vpp-agent/dev/ diff --git a/docker/prod/govpp.conf b/docker/prod/govpp.conf deleted file mode 100644 index b05afa02ea..0000000000 --- a/docker/prod/govpp.conf +++ /dev/null @@ -1,4 +0,0 @@ -health-check-probe-interval: 1000000000 -health-check-reply-timeout: 100000000 -health-check-threshold: 1 -reply-timeout: 1000000000 diff --git a/docker/prod/vpp.conf b/docker/prod/vpp.conf index e53d74401f..7f5e02811e 100644 --- a/docker/prod/vpp.conf +++ b/docker/prod/vpp.conf @@ -8,3 +8,6 @@ plugins { disable } } +statseg { + default +} \ No newline at end of file diff --git a/examples/custom_model/main.go b/examples/custom_model/main.go new file mode 100644 index 0000000000..1c230307ec --- /dev/null +++ b/examples/custom_model/main.go @@ -0,0 +1,289 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "log" + "net" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/agent" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/vpp-agent/api/configurator" + "github.com/ligato/vpp-agent/client" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" + "github.com/ligato/vpp-agent/examples/custom_model/pb" + "github.com/ligato/vpp-agent/plugins/orchestrator" + "github.com/namsral/flag" + "google.golang.org/grpc" + + "github.com/ligato/vpp-agent/api/models/linux" + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + "github.com/ligato/vpp-agent/api/models/vpp" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp/l2" +) + +var ( + address = flag.String("address", "172.17.0.2:9111", "address of GRPC server") + socketType = flag.String("socket-type", "tcp", "socket type [tcp, tcp4, tcp6, unix, unixpacket]") + + dialTimeout = time.Second * 2 +) + +var exampleFinished = make(chan struct{}) + +func main() { + ep := &ExamplePlugin{} + ep.Deps = Deps{ + VPP: app.DefaultVPP(), + Linux: app.DefaultLinux(), + Orchestrator: &orchestrator.DefaultPlugin, + } + ep.SetName("custom-model-example") + ep.Setup() + + a := agent.NewAgent( + agent.AllPlugins(ep), + agent.QuitOnClose(exampleFinished), + ) + if err := a.Run(); err != nil { + log.Fatal() + } +} + +// ExamplePlugin demonstrates the use of the remoteclient to locally transport example configuration into the default VPP plugins. +type ExamplePlugin struct { + Deps + + conn *grpc.ClientConn + + wg sync.WaitGroup + cancel context.CancelFunc +} + +type Deps struct { + infra.PluginDeps + app.VPP + app.Linux + Orchestrator *orchestrator.Plugin +} + +// Init initializes example plugin. +func (p *ExamplePlugin) Init() (err error) { + _, p.cancel = context.WithCancel(context.Background()) + + // Set up connection to the server. + p.conn, err = grpc.Dial("unix", + grpc.WithInsecure(), + grpc.WithDialer(dialer(*socketType, *address, dialTimeout)), + ) + if err != nil { + return err + } + + p.Log.Info("Init complete") + return nil +} + +// AfterInit executes client demo. +func (p *ExamplePlugin) AfterInit() (err error) { + go func() { + time.Sleep(time.Second) + + //c := remoteclient.NewClientGRPC(api.NewGenericManagerClient(conn)) + c := client.LocalClient + + demonstrateClient(c) + + time.Sleep(time.Second * 3) + + logrus.DefaultLogger().Info("Closing example") + close(exampleFinished) + }() + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + logrus.DefaultLogger().Info("Closing example") + + p.cancel() + p.wg.Wait() + + if err := p.conn.Close(); err != nil { + return err + } + + return nil +} + +func demonstrateClient(c client.ConfigClient) { + // ========================================== + // List known models + // ========================================== + knownModels, err := c.KnownModels() + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Listing %d known models..\n", len(knownModels)) + for _, model := range knownModels { + fmt.Printf(" - %v\n", model.String()) + } + time.Sleep(time.Second * 3) + + // ========================================== + // Resync config + // ========================================== + fmt.Printf("Requesting config resync..\n") + err = c.ResyncConfig( + memif1, memif2, + veth1, veth2, + routeX, + ) + if err != nil { + log.Fatalln(err) + } + time.Sleep(time.Second * 5) + + // ========================================== + // Change config + // ========================================== + fmt.Printf("Requesting config change..\n") + memif1.Enabled = false + memif1.Mtu = 666 + + custom := &mymodel.MyModel{ + Name: "my1", + Mynum: 33, + } + + req := c.ChangeRequest() + req.Update(afp1, memif1, bd1, vppRoute1, custom) + req.Delete(memif2) + if err := req.Send(context.Background()); err != nil { + log.Fatalln(err) + } + time.Sleep(time.Second * 5) + + // ========================================== + // Get config + // ========================================== + fmt.Printf("Retrieving config..\n") + data := &configurator.Config{ + VppConfig: &vpp.ConfigData{}, + LinuxConfig: &linux.ConfigData{}, + } + if err := c.GetConfig(data.VppConfig, data.LinuxConfig); err != nil { + log.Fatalln(err) + } + fmt.Printf("Retrieved config:\n%+v\n", proto.MarshalTextString(data)) +} + +// Dialer for unix domain socket +func dialer(socket, address string, timeoutVal time.Duration) func(string, time.Duration) (net.Conn, error) { + return func(addr string, timeout time.Duration) (net.Conn, error) { + // Pass values + addr, timeout = address, timeoutVal + // Dial with timeout + return net.DialTimeout(socket, addr, timeoutVal) + } +} + +var ( + memif1 = &vpp.Interface{ + Name: "memif1", + Enabled: true, + IpAddresses: []string{"3.3.0.1/16"}, + Type: interfaces.Interface_MEMIF, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: true, + Secret: "secret", + SocketFilename: "/tmp/memif1.sock", + }, + }, + } + memif2 = &vpp.Interface{ + Name: "memif1.1", + Enabled: true, + Type: interfaces.Interface_SUB_INTERFACE, + IpAddresses: []string{"3.10.0.1/24"}, + Link: &interfaces.Interface_Sub{ + Sub: &interfaces.SubInterface{ + ParentName: "memif1", + SubId: 10, + }, + }, + } + bd1 = &vpp.BridgeDomain{ + Name: "bd1", + Interfaces: []*vpp_l2.BridgeDomain_Interface{ + {Name: "memif1"}, + }, + } + vppRoute1 = &vpp.Route{ + OutgoingInterface: "memif1", + DstNetwork: "4.4.10.0/24", + NextHopAddr: "3.10.0.5", + } + afp1 = &vpp.Interface{ + Name: "afp1", + Enabled: true, + Type: interfaces.Interface_AF_PACKET, + IpAddresses: []string{"10.10.3.5/24"}, + Link: &interfaces.Interface_Afpacket{ + Afpacket: &interfaces.AfpacketLink{ + HostIfName: "veth1", + }, + }, + } + veth1 = &linux.Interface{ + Name: "myVETH1", + Type: linux_interfaces.Interface_VETH, + Enabled: true, + HostIfName: "veth1", + IpAddresses: []string{"10.10.3.1/24"}, + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{ + PeerIfName: "myVETH2", + }, + }, + } + veth2 = &linux.Interface{ + Name: "myVETH2", + Type: linux_interfaces.Interface_VETH, + Enabled: true, + HostIfName: "veth2", + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{ + PeerIfName: "myVETH1", + }, + }, + } + routeX = &linux.Route{ + DstNetwork: "192.168.5.0/24", + OutgoingInterface: "myVETH1", + GwAddr: "10.10.3.254", + Scope: linux_l3.Route_GLOBAL, + } +) diff --git a/examples/custom_model/pb/model.go b/examples/custom_model/pb/model.go new file mode 100644 index 0000000000..7a422abd18 --- /dev/null +++ b/examples/custom_model/pb/model.go @@ -0,0 +1,13 @@ +package mymodel + +import ( + "github.com/ligato/vpp-agent/pkg/models" +) + +func init() { + models.Register(&MyModel{}, models.Spec{ + Module: "custom", + Type: "mymodel", + Version: "v2", + }) +} diff --git a/examples/custom_model/pb/model.pb.go b/examples/custom_model/pb/model.pb.go new file mode 100644 index 0000000000..c09ac9e23e --- /dev/null +++ b/examples/custom_model/pb/model.pb.go @@ -0,0 +1,92 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/ligato/vpp-agent/examples/custom_model/pb/model.proto + +package mymodel + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type MyModel struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Mynum int32 `protobuf:"varint,2,opt,name=mynum,proto3" json:"mynum,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyModel) Reset() { *m = MyModel{} } +func (m *MyModel) String() string { return proto.CompactTextString(m) } +func (*MyModel) ProtoMessage() {} +func (*MyModel) Descriptor() ([]byte, []int) { + return fileDescriptor_model_19bb6e2a6c7fed00, []int{0} +} +func (m *MyModel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyModel.Unmarshal(m, b) +} +func (m *MyModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyModel.Marshal(b, m, deterministic) +} +func (dst *MyModel) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyModel.Merge(dst, src) +} +func (m *MyModel) XXX_Size() int { + return xxx_messageInfo_MyModel.Size(m) +} +func (m *MyModel) XXX_DiscardUnknown() { + xxx_messageInfo_MyModel.DiscardUnknown(m) +} + +var xxx_messageInfo_MyModel proto.InternalMessageInfo + +func (m *MyModel) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MyModel) GetMynum() int32 { + if m != nil { + return m.Mynum + } + return 0 +} + +func (*MyModel) XXX_MessageName() string { + return "mymodel.MyModel" +} +func init() { + proto.RegisterType((*MyModel)(nil), "mymodel.MyModel") +} + +func init() { + proto.RegisterFile("github.com/ligato/vpp-agent/examples/custom_model/pb/model.proto", fileDescriptor_model_19bb6e2a6c7fed00) +} + +var fileDescriptor_model_19bb6e2a6c7fed00 = []byte{ + // 172 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4a, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0xc9, 0x4c, 0x4f, 0x2c, 0xc9, 0xd7, 0x2f, 0x2b, + 0x28, 0xd0, 0x4d, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, 0x4f, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, + 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xca, 0x4c, 0xd6, 0x4d, 0xce, 0xc9, 0x04, 0x89, 0x17, 0x24, + 0xe9, 0xe7, 0xe6, 0xa7, 0xa4, 0xe6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xb1, 0xe7, 0x56, + 0x82, 0xb9, 0x52, 0xba, 0x48, 0x86, 0xa5, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xe5, 0x93, 0x4a, 0xd3, + 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0xe8, 0x53, 0x32, 0xe6, 0x62, 0xf7, 0xad, 0xf4, 0x05, 0xe9, + 0x14, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, + 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x73, 0x2b, 0xf3, 0x4a, 0x73, 0x25, 0x98, 0x14, 0x18, 0x35, 0x58, + 0x83, 0x20, 0x1c, 0x27, 0x96, 0x13, 0x8f, 0xe5, 0x18, 0x93, 0xd8, 0xc0, 0x26, 0x18, 0x03, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x46, 0x04, 0x9d, 0xe7, 0xbf, 0x00, 0x00, 0x00, +} diff --git a/examples/custom_model/pb/model.proto b/examples/custom_model/pb/model.proto new file mode 100644 index 0000000000..f3340cdf9f --- /dev/null +++ b/examples/custom_model/pb/model.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package mymodel; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option (gogoproto.messagename_all) = true; + +message MyModel { + string name = 1; + int32 mynum = 2; +} diff --git a/examples/govpp_call/main.go b/examples/govpp_call/main.go index 2ca11df64d..8f53fbef40 100644 --- a/examples/govpp_call/main.go +++ b/examples/govpp_call/main.go @@ -15,20 +15,18 @@ package main import ( + "log" "time" govppapi "git.fd.io/govpp.git/api" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" "github.com/ligato/cn-infra/utils/safeclose" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" "github.com/ligato/vpp-agent/plugins/govppmux" - "github.com/ligato/vpp-agent/plugins/vpp" l2Api "github.com/ligato/vpp-agent/plugins/vpp/binapi/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "log" ) // ************************************************************************* @@ -47,22 +45,15 @@ import ( // required for the example are initialized. Agent is instantiated with generic plugins (etcd, Kafka, Status check, // HTTP and Log), and GOVPP, and resync plugin, and example plugin which demonstrates GOVPP call functionality. func main() { - //Init close channel to stop the example. - closeChannel := make(chan struct{}, 1) - // Prepare all the dependencies for example plugin - watcher := datasync.KVProtoWatchers{ - local.Get(), - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) + // Init close channel to stop the example. + closeChannel := make(chan struct{}) // Inject dependencies to example plugin ep := &ExamplePlugin{ Log: logrus.DefaultLogger(), closeChannel: closeChannel, } - ep.Deps.VPP = vppPlugin + ep.Deps.VPP = app.DefaultVPP() ep.Deps.GoVppMux = &govppmux.DefaultPlugin // Start Agent @@ -92,7 +83,7 @@ type ExamplePlugin struct { // Deps is example plugin dependencies. type Deps struct { GoVppMux *govppmux.Plugin - VPP *vpp.Plugin + VPP app.VPP } // Init members of plugin. @@ -103,8 +94,6 @@ func (plugin *ExamplePlugin) Init() (err error) { plugin.Log.Info("Default plugin plugin ready") - plugin.VPP.DisableResync(l2.BdPrefix) - // Make VPP call go plugin.VppCall() @@ -168,38 +157,34 @@ func (plugin *ExamplePlugin) VppCall() { } // Auxiliary function to build bridge domain data -func buildData(name string) *l2.BridgeDomains { - return &l2.BridgeDomains{ - BridgeDomains: []*l2.BridgeDomains_BridgeDomain{ +func buildData(name string) *l2.BridgeDomain { + return &l2.BridgeDomain{ + Name: name, + Flood: false, + UnknownUnicastFlood: true, + Forward: true, + Learn: true, + ArpTermination: true, + MacAge: 0, + Interfaces: []*l2.BridgeDomain_Interface{ { - Name: name, - Flood: false, - UnknownUnicastFlood: true, - Forward: true, - Learn: true, - ArpTermination: true, - MacAge: 0, - Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ - { - Name: "memif1", - }, - }, + Name: "memif1", }, }, } } // Auxiliary method to transform agent model data to binary api format -func buildBinapiMessage(data *l2.BridgeDomains, id uint32) *l2Api.BridgeDomainAddDel { +func buildBinapiMessage(data *l2.BridgeDomain, id uint32) *l2Api.BridgeDomainAddDel { req := &l2Api.BridgeDomainAddDel{} req.IsAdd = 1 req.BdID = id - req.Flood = boolToInt(data.BridgeDomains[0].Flood) - req.UuFlood = boolToInt(data.BridgeDomains[0].UnknownUnicastFlood) - req.Forward = boolToInt(data.BridgeDomains[0].Forward) - req.Learn = boolToInt(data.BridgeDomains[0].Learn) - req.ArpTerm = boolToInt(data.BridgeDomains[0].ArpTermination) - req.MacAge = uint8(data.BridgeDomains[0].MacAge) + req.Flood = boolToInt(data.Flood) + req.UuFlood = boolToInt(data.UnknownUnicastFlood) + req.Forward = boolToInt(data.Forward) + req.Learn = boolToInt(data.Learn) + req.ArpTerm = boolToInt(data.ArpTermination) + req.MacAge = uint8(data.MacAge) return req } diff --git a/examples/grpc_vpp/notifications/main.go b/examples/grpc_vpp/notifications/main.go index 1d9c7e5b9a..ff69168ac0 100644 --- a/examples/grpc_vpp/notifications/main.go +++ b/examples/grpc_vpp/notifications/main.go @@ -15,48 +15,34 @@ package main import ( - "fmt" "io" "log" "net" - "os" "time" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" "github.com/namsral/flag" "golang.org/x/net/context" "google.golang.org/grpc" -) -const ( - defaultAddress = "localhost:9111" - defaultSocket = "tcp" - requestPeriod = 3 + "github.com/ligato/vpp-agent/api/configurator" ) var ( - address = defaultAddress - socketType string - reqPer = requestPeriod + address = flag.String("address", "localhost:9111", "address of GRPC server") + socketType = flag.String("socket-type", "tcp", "[tcp, tcp4, tcp6, unix, unixpacket]") + reqPer = flag.Int("request-period", 3, "notification request period in seconds") ) -// init sets the default logging level -func init() { - logrus.DefaultLogger().SetOutput(os.Stdout) - logrus.DefaultLogger().SetLevel(logging.DebugLevel) -} - // Start Agent plugins selected for this example. func main() { - flag.StringVar(&address, "address", defaultAddress, "address of GRPC server") - flag.StringVar(&socketType, "socket-type", defaultSocket, "[tcp, tcp4, tcp6, unix, unixpacket]") - flag.IntVar(&reqPer, "request-period", requestPeriod, "notification request period in seconds") - // Inject dependencies to example plugin ep := &ExamplePlugin{} + ep.SetName("remote-client-example") + ep.Setup() + // Start Agent a := agent.NewAgent( agent.AllPlugins(ep), @@ -66,71 +52,56 @@ func main() { } } -// PluginName represents name of plugin. -const PluginName = "grpc-notification-example" - // ExamplePlugin demonstrates the use of grpc to watch on VPP notifications using vpp-agent. type ExamplePlugin struct { + infra.PluginDeps + conn *grpc.ClientConn } // Init initializes example plugin. -func (plugin *ExamplePlugin) Init() (err error) { +func (p *ExamplePlugin) Init() (err error) { // Set up connection to the server. - switch socketType { - case "tcp", "tcp4", "tcp6", "unix", "unixpacket": - plugin.conn, err = grpc.Dial("unix", grpc.WithInsecure(), - grpc.WithDialer(dialer(socketType, address, 2*time.Second))) - default: - return fmt.Errorf("unknown gRPC socket type: %s", socketType) - } + p.conn, err = grpc.Dial("unix", + grpc.WithInsecure(), + grpc.WithDialer(dialer(*socketType, *address, 2*time.Second))) if err != nil { return err } + client := configurator.NewConfiguratorClient(p.conn) + // Start notification watcher. - go plugin.watchNotifications() + go p.watchNotifications(client) logrus.DefaultLogger().Info("Initialization of the example plugin has completed") return err } -// Close does nothing -func (plugin *ExamplePlugin) Close() error { - return nil -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - // Get is an implementation of client-side statistics streaming. -func (plugin *ExamplePlugin) watchNotifications() { - var nextIdx uint32 = 1 +func (p *ExamplePlugin) watchNotifications(client configurator.ConfiguratorClient) { + var nextIdx uint32 + logrus.DefaultLogger().Info("Watching..") for { - // Get client for notification service - client := rpc.NewNotificationServiceClient(plugin.conn) // Prepare request with the initial index - request := &rpc.NotificationRequest{ + request := &configurator.NotificationRequest{ Idx: nextIdx, } // Get stream object - stream, err := client.Get(context.Background(), request) + stream, err := client.Notify(context.Background(), request) if err != nil { logrus.DefaultLogger().Error(err) return } // Receive all message from the stream - logrus.DefaultLogger().Info("Sending request ... ") var recvNotifs int for { notif, err := stream.Recv() if err == io.EOF { if recvNotifs == 0 { - logrus.DefaultLogger().Info("No new notifications") + //logrus.DefaultLogger().Info("No new notifications") } else { logrus.DefaultLogger().Infof("%d new notifications received", recvNotifs) } @@ -141,14 +112,14 @@ func (plugin *ExamplePlugin) watchNotifications() { return } - logrus.DefaultLogger().Infof("(IDX: %d) Received notif: %v", - notif.NextIdx-1, notif.NIf) + logrus.DefaultLogger().Infof("Notification[%d]: %v", + notif.NextIdx-1, notif.Notification) nextIdx = notif.NextIdx recvNotifs++ } // Wait till next request - time.Sleep(time.Duration(reqPer) * time.Second) + time.Sleep(time.Duration(*reqPer) * time.Second) } } diff --git a/examples/grpc_vpp/remote_client/main.go b/examples/grpc_vpp/remote_client/main.go index deef0149c1..6046424623 100644 --- a/examples/grpc_vpp/remote_client/main.go +++ b/examples/grpc_vpp/remote_client/main.go @@ -16,58 +16,43 @@ package main import ( "context" + "fmt" "log" "net" - "os" "sync" "time" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/ligato/vpp-agent/clientv1/vpp/remoteclient" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" - "github.com/ligato/vpp-agent/plugins/vpp/model/rpc" - - "fmt" - + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" "github.com/ligato/cn-infra/agent" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/logging/logrus" "github.com/namsral/flag" "google.golang.org/grpc" -) -const ( - defaultAddress = "localhost:9111" - defaultSocket = "tcp" + "github.com/ligato/vpp-agent/api/configurator" + "github.com/ligato/vpp-agent/api/models/linux" + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + "github.com/ligato/vpp-agent/api/models/vpp/l3" ) -var address = defaultAddress -var socketType string +var ( + address = flag.String("address", "172.17.0.2:9111", "address of GRPC server") + socketType = flag.String("socket-type", "tcp", "socket type [tcp, tcp4, tcp6, unix, unixpacket]") -// init sets the default logging level -func init() { - logrus.DefaultLogger().SetOutput(os.Stdout) - logrus.DefaultLogger().SetLevel(logging.DebugLevel) -} + dialTimeout = time.Second * 2 +) -/******** - * Main * - ********/ +var exampleFinished = make(chan struct{}) -// Start Agent plugins selected for this example. func main() { - flag.StringVar(&address, "address", defaultAddress, "address of GRPC server") - flag.StringVar(&socketType, "socket-type", defaultSocket, "socket type [tcp, tcp4, tcp6, unix, unixpacket]") - - //Init close channel to stop the example. - exampleFinished := make(chan struct{}, 1) - - // Inject dependencies to example plugin ep := &ExamplePlugin{} - // Start Agent + ep.SetName("remote-client-example") + ep.Setup() + a := agent.NewAgent( agent.AllPlugins(ep), agent.QuitOnClose(exampleFinished), @@ -75,75 +60,138 @@ func main() { if err := a.Run(); err != nil { log.Fatal() } - - // End when the localhost example is finished. - go closeExample("localhost example finished", exampleFinished) - } -// Stop the agent with desired info message. -func closeExample(message string, exampleFinished chan struct{}) { - time.Sleep(25 * time.Second) - logrus.DefaultLogger().Info(message) - close(exampleFinished) -} - -/****************** - * Example plugin * - ******************/ - -// PluginName represents name of plugin. -const PluginName = "grpc-config-example" - // ExamplePlugin demonstrates the use of the remoteclient to locally transport example configuration into the default VPP plugins. type ExamplePlugin struct { + infra.PluginDeps + + conn *grpc.ClientConn + wg sync.WaitGroup cancel context.CancelFunc - conn *grpc.ClientConn } // Init initializes example plugin. -func (plugin *ExamplePlugin) Init() (err error) { +func (p *ExamplePlugin) Init() (err error) { // Set up connection to the server. - switch socketType { - case "tcp", "tcp4", "tcp6", "unix", "unixpacket": - plugin.conn, err = grpc.Dial("unix", grpc.WithInsecure(), - grpc.WithDialer(dialer(socketType, address, 2*time.Second))) - default: - return fmt.Errorf("unknown gRPC socket type: %s", socketType) + p.conn, err = grpc.Dial("unix", + grpc.WithInsecure(), + grpc.WithDialer(dialer(*socketType, *address, dialTimeout)), + ) + if err != nil { + return err } + client := configurator.NewConfiguratorClient(p.conn) + // Apply initial VPP configuration. - plugin.resyncVPP() + go p.demonstrateClient(client) // Schedule reconfiguration. var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - plugin.wg.Add(1) - go plugin.reconfigureVPP(ctx) - go plugin.getConfiguration(ctx) + ctx, p.cancel = context.WithCancel(context.Background()) + _ = ctx + /*plugin.wg.Add(1) + go plugin.reconfigureVPP(ctx)*/ + + go func() { + time.Sleep(time.Second * 30) + close(exampleFinished) + }() - logrus.DefaultLogger().Info("Initialization of the example plugin has completed") return nil } // Close cleans up the resources. -func (plugin *ExamplePlugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *ExamplePlugin) Close() error { + logrus.DefaultLogger().Info("Closing example plugin") - err := safeclose.Close(plugin.conn) - if err != nil { + p.cancel() + p.wg.Wait() + + if err := p.conn.Close(); err != nil { return err } - logrus.DefaultLogger().Info("Closed example plugin") return nil } -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName +// demonstrateClient propagates snapshot of the whole initial configuration to VPP plugins. +func (p *ExamplePlugin) demonstrateClient(client configurator.ConfiguratorClient) { + time.Sleep(time.Second * 2) + p.Log.Infof("Requesting resync..") + + config := &configurator.Config{ + VppConfig: &vpp.ConfigData{ + Interfaces: []*interfaces.Interface{ + memif1, + }, + IpscanNeighbor: ipScanNeigh, + IpsecSas: []*vpp_ipsec.SecurityAssociation{sa10}, + IpsecSpds: []*vpp_ipsec.SecurityPolicyDatabase{spd1}, + }, + LinuxConfig: &linux.ConfigData{ + Interfaces: []*linux_interfaces.Interface{ + veth1, veth2, + }, + }, + } + _, err := client.Update(context.Background(), &configurator.UpdateRequest{ + Update: config, + FullResync: true, + }) + if err != nil { + log.Fatalln(err) + } + + time.Sleep(time.Second * 5) + p.Log.Infof("Requesting change..") + + ifaces := []*interfaces.Interface{memif1, memif2, afpacket} + _, err = client.Update(context.Background(), &configurator.UpdateRequest{ + Update: &configurator.Config{ + VppConfig: &vpp.ConfigData{ + Interfaces: ifaces, + }, + }, + }) + if err != nil { + log.Fatalln(err) + } + time.Sleep(time.Second * 5) + p.Log.Infof("Requesting delete..") + + ifaces = []*interfaces.Interface{memif1} + _, err = client.Delete(context.Background(), &configurator.DeleteRequest{ + Delete: &configurator.Config{ + VppConfig: &vpp.ConfigData{ + Interfaces: ifaces, + }, + }, + }) + if err != nil { + log.Fatalln(err) + } + + time.Sleep(time.Second * 5) + p.Log.Infof("Requesting get..") + + cfg, err := client.Get(context.Background(), &configurator.GetRequest{}) + if err != nil { + log.Fatalln(err) + } + out, _ := (&jsonpb.Marshaler{Indent: " "}).MarshalToString(cfg) + fmt.Printf("Config:\n %+v\n", out) + + time.Sleep(time.Second * 5) + p.Log.Infof("Requesting dump..") + + dump, err := client.Dump(context.Background(), &configurator.DumpRequest{}) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Dump:\n %+v\n", proto.MarshalTextString(dump)) } // Dialer for unix domain socket @@ -156,8 +204,102 @@ func dialer(socket, address string, timeoutVal time.Duration) func(string, time. } } -// resyncVPP propagates snapshot of the whole initial configuration to VPP plugins. -func (plugin *ExamplePlugin) resyncVPP() { +var ( + sa10 = &vpp.IPSecSA{ + Index: "10", + Spi: 1001, + Protocol: 1, + CryptoAlg: 1, + CryptoKey: "4a506a794f574265564551694d653768", + IntegAlg: 2, + IntegKey: "4339314b55523947594d6d3547666b45764e6a58", + } + spd1 = &vpp.IPSecSPD{ + Index: "1", + PolicyEntries: []*vpp_ipsec.SecurityPolicyDatabase_PolicyEntry{ + { + Priority: 100, + IsOutbound: false, + Action: 0, + Protocol: 50, + SaIndex: "10", + }, + }, + } + memif1 = &vpp.Interface{ + Name: "memif1", + Enabled: true, + IpAddresses: []string{"3.3.0.1/16"}, + Type: interfaces.Interface_MEMIF, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: true, + Secret: "secret", + SocketFilename: "/tmp/memif1.sock", + }, + }, + } + memif2 = &vpp.Interface{ + Name: "memif2", + Enabled: true, + IpAddresses: []string{"4.3.0.1/16"}, + Type: interfaces.Interface_MEMIF, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 2, + Master: true, + Secret: "secret", + SocketFilename: "/tmp/memif2.sock", + }, + }, + } + ipScanNeigh = &vpp.IPScanNeigh{ + Mode: vpp_l3.IPScanNeighbor_BOTH, + } + veth1 = &linux.Interface{ + Name: "myVETH1", + Type: linux_interfaces.Interface_VETH, + Enabled: true, + HostIfName: "veth1", + IpAddresses: []string{"10.10.3.1/24"}, + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{ + PeerIfName: "myVETH2", + }, + }, + } + veth2 = &linux.Interface{ + Name: "myVETH2", + Type: linux_interfaces.Interface_VETH, + Enabled: true, + HostIfName: "veth2", + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{ + PeerIfName: "myVETH1", + }, + }, + } + afpacket = &vpp.Interface{ + Name: "myAFpacket", + Type: interfaces.Interface_AF_PACKET, + Enabled: true, + PhysAddress: "a7:35:45:55:65:75", + IpAddresses: []string{ + "10.20.30.40/24", + }, + Mtu: 1800, + Link: &interfaces.Interface_Afpacket{ + Afpacket: &interfaces.AfpacketLink{ + HostIfName: "veth2", + }, + }, + } +) + +/* +// demonstrateClient propagates snapshot of the whole initial configuration to VPP plugins. +func (plugin *ExamplePlugin) demonstrateClient() { err := remoteclient.DataResyncRequestGRPC(rpc.NewDataResyncServiceClient(plugin.conn)). Interface(&memif1AsMaster). Interface(&tap1Disabled). @@ -173,6 +315,7 @@ func (plugin *ExamplePlugin) resyncVPP() { // reconfigureVPP simulates a set of changes in the configuration related to VPP plugins. func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { + return _, dstNetAddr, err := net.ParseCIDR("192.168.2.1/32") if err != nil { return @@ -181,18 +324,18 @@ func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { select { case <-time.After(3 * time.Second): - // Simulate configuration change several seconds after resync. + // Simulate configuration change exactly 15seconds after resync. err := remoteclient.DataChangeRequestGRPC(rpc.NewDataChangeServiceClient(plugin.conn)). Put(). - Interface(&memif1AsSlave). /* turn memif1 into slave, remove the IP address */ - Interface(&memif2). /* newly added memif interface */ - Interface(&tap1Enabled). /* enable tap1 interface */ - Interface(&loopback1WithAddr). /* assign IP address to loopback1 interface */ - ACL(&acl1). /* declare ACL for the traffic leaving tap1 interface */ - XConnect(&XConMemif1ToMemif2). /* xconnect memif interfaces */ - BD(&BDLoopback1ToTap1). /* put loopback and tap1 into the same bridge domain */ + Interface(&memif1AsSlave). + Interface(&memif2). + Interface(&tap1Enabled). + Interface(&loopback1WithAddr). + ACL(&acl1). + XConnect(&XConMemif1ToMemif2). + BD(&BDLoopback1ToTap1). Delete(). - StaticRoute(0, dstNetAddr.String(), nextHopAddr.String()). /* remove the route going through memif1 */ + StaticRoute(0, dstNetAddr.String(), nextHopAddr.String()). Send().ReceiveReply() if err != nil { logrus.DefaultLogger().Errorf("Failed to reconfigure VPP: %v", err) @@ -205,28 +348,7 @@ func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { } plugin.wg.Done() } - -func (plugin *ExamplePlugin) getConfiguration(ctx context.Context) { - select { - case <-time.After(6 * time.Second): - // Simulate get (dump) - reply, err := remoteclient.DataDumpRequestGRPC(rpc.NewDataDumpServiceClient(plugin.conn)). - Dump(). - ACLs(). - Interfaces(). - BDs(). - XConnects(). - Send(). - ReceiveReply() - if err != nil { - logrus.DefaultLogger().Errorf("Failed to dump data from the VPP: %v", err) - } else { - logrus.DefaultLogger().Infof("Data dumped from the VPP: ACLs: %d, interfaces: %d, bd: %d, xc: %d", - len(reply.GetACLs()), len(reply.GetInterfaces()), len(reply.GetBDs()), len(reply.GetXConnects())) - } - } -} - +*/ /************************* * Example plugin config * *************************/ @@ -268,7 +390,7 @@ func (plugin *ExamplePlugin) getConfiguration(ctx context.Context) { * +------------------------------------------------+ * * * ********************************************************/ - +/* var ( // memif1AsMaster is an example of a memory interface configuration. (Master=true, with IPv4 address). memif1AsMaster = interfaces.Interfaces_Interface{ @@ -394,7 +516,7 @@ var ( Forward: true, Learn: true, ArpTermination: false, - MacAge: 0, /* means disable aging */ + MacAge: 0, Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ { Name: "loopback1", @@ -415,3 +537,4 @@ var ( Weight: 5, } ) +*/ diff --git a/examples/idx_bd_cache/doc.go b/examples/idx_bd_cache/doc.go deleted file mode 100644 index b4d4c85a2b..0000000000 --- a/examples/idx_bd_cache/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Example idx_bd_cache demonstrates the use of "bridge-domain name-to-index -// cache" to watch for bridge-domain config changes across agents. -package main diff --git a/examples/idx_bd_cache/main.go b/examples/idx_bd_cache/main.go deleted file mode 100644 index 9a7f94494b..0000000000 --- a/examples/idx_bd_cache/main.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/servicelabel" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/ligato/vpp-agent/plugins/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/l2idx" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "log" -) - -const agent1, agent2 = "agent1", "agent2" - -// Start Agent plugins selected for this example. -func main() { - // Agent 1 datasync plugin - serviceLabel1 := servicelabel.NewPlugin(servicelabel.UseLabel(agent1)) - serviceLabel1.SetName(agent1) - etcdDataSyncAgent1 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent1) - deps.ServiceLabel = serviceLabel1 - })) - etcdDataSyncAgent1.SetName("etcd-datasync-" + agent1) - - // Agent 2 datasync plugin - serviceLabel2 := servicelabel.NewPlugin(servicelabel.UseLabel(agent2)) - serviceLabel2.SetName(agent2) - etcdDataSyncAgent2 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent2) - deps.ServiceLabel = serviceLabel2 - })) - etcdDataSyncAgent2.SetName("etcd-datasync-" + agent2) - - // Example plugin datasync - etcdDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin)) - - // VPP plugin - watcher := datasync.KVProtoWatchers{ - etcdDataSync, - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) - - // Inject dependencies to example plugin - ep := &ExamplePlugin{ - exampleFinished: make(chan struct{}), - Deps: Deps{ - Log: logging.DefaultLogger, - ETCDDataSync: etcdDataSync, - VPP: vppPlugin, - Agent1: etcdDataSyncAgent1, - Agent2: etcdDataSyncAgent2, - }, - } - - // Start Agent - a := agent.NewAgent( - agent.AllPlugins(ep), - agent.QuitOnClose(ep.exampleFinished), - ) - if err := a.Run(); err != nil { - log.Fatal() - } -} - -// PluginName represents name of plugin. -const PluginName = "idx-bd-cache-example" - -// ExamplePlugin is used for demonstration of Bridge Domain Indexes - see Init(). -type ExamplePlugin struct { - Deps - - bdIdxLocal l2idx.BDIndex - bdIdxAgent1 l2idx.BDIndex - bdIdxAgent2 l2idx.BDIndex - - // Fields below are used to properly finish the example. - exampleFinished chan struct{} -} - -// Deps is a helper struct which is grouping all dependencies injected to the plugin -type Deps struct { - Log logging.Logger - ETCDDataSync datasync.KeyProtoValWriter - VPP vpp.API - Agent1 *kvdbsync.Plugin - Agent2 *kvdbsync.Plugin -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - -// Init transport & bdIndexes, then watch, publish & lookup -func (plugin *ExamplePlugin) Init() (err error) { - // Get access to local bridge domain indexes. - plugin.bdIdxLocal = plugin.VPP.GetBDIndexes() - - // Run consumer. - go plugin.consume() - - // Cache other agent's bridge domain index mapping using injected plugin and local plugin name. - // /vnf-agent/agent1/vpp/config/v1/bd/ - plugin.bdIdxAgent1 = l2idx.Cache(plugin.Agent1) - // /vnf-agent/agent2/vpp/config/v1/bd/ - plugin.bdIdxAgent2 = l2idx.Cache(plugin.Agent2) - - return nil -} - -// AfterInit - call Cache() -func (plugin *ExamplePlugin) AfterInit() error { - // Publish test data - plugin.publish() - - return nil -} - -// Close is called by Agent Core when the Agent is shutting down. It is supposed -// to clean up resources that were allocated by the plugin during its lifetime. -func (plugin *ExamplePlugin) Close() error { - return safeclose.Close(plugin.Agent1, plugin.Agent2, plugin.ETCDDataSync, plugin.bdIdxLocal, plugin.bdIdxAgent1, - plugin.bdIdxAgent2) -} - -// Test data are published to different agents (including local). -func (plugin *ExamplePlugin) publish() (err error) { - // Create bridge domain in local agent. - br0 := newExampleBridgeDomain("bd0", "iface0") - err = plugin.ETCDDataSync.Put(l2.BridgeDomainKey(br0.Name), br0) - if err != nil { - return err - } - // Create bridge domain in agent1 - br1 := newExampleBridgeDomain("bd1", "iface1") - err = plugin.Agent1.Put(l2.BridgeDomainKey(br1.Name), br1) - if err != nil { - return err - } - // Create bridge domain in agent2 - br2 := newExampleBridgeDomain("bd2", "iface2") - err = plugin.Agent2.Put(l2.BridgeDomainKey(br2.Name), br2) - return err -} - -// Use the NameToIndexMapping to watch changes. -func (plugin *ExamplePlugin) consume() { - plugin.Log.Info("Watching started") - bdIdxChan := make(chan l2idx.BdChangeDto) - // Subscribe local bd-idx-mapping and both of cache mapping. - plugin.bdIdxLocal.WatchNameToIdx(PluginName, bdIdxChan) - plugin.bdIdxAgent1.WatchNameToIdx(PluginName, bdIdxChan) - plugin.bdIdxAgent2.WatchNameToIdx(PluginName, bdIdxChan) - - counter := 0 - - watching := true - for watching { - select { - case bdIdxEvent := <-bdIdxChan: - plugin.Log.Info("Event received: bridge domain ", bdIdxEvent.Name, " of ", bdIdxEvent.RegistryTitle) - counter++ - } - // Example is expecting 3 events. - if counter == 3 { - watching = false - } - } - - // Do a lookup whether all mappings were registered. - plugin.lookup() -} - -// Use the NameToIndexMapping to lookup local mapping and external cached mappings. -func (plugin *ExamplePlugin) lookup() { - plugin.Log.Info("Lookup in progress") - - if index, _, found := plugin.bdIdxLocal.LookupIdx("bd0"); found { - plugin.Log.Infof("Bridge domain bd0 (index %v) found in local mapping", index) - } - - if index, _, found := plugin.bdIdxAgent1.LookupIdx("bd1"); found { - plugin.Log.Infof("Bridge domain bd1 (index %v) found in local mapping", index) - } - - if index, _, found := plugin.bdIdxAgent2.LookupIdx("bd2"); found { - plugin.Log.Infof("Bridge domain bd2 (index %v) found in local mapping", index) - } - - // End the example. - plugin.Log.Infof("idx-bd-cache example finished, sending shutdown ...") - close(plugin.exampleFinished) -} - -func newExampleBridgeDomain(bdName, ifName string) *l2.BridgeDomains_BridgeDomain { - return &l2.BridgeDomains_BridgeDomain{ - Name: bdName, - Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ - { - Name: ifName, - BridgedVirtualInterface: true, - }, - }, - } -} diff --git a/examples/idx_iface_cache/doc.go b/examples/idx_iface_cache/doc.go deleted file mode 100644 index cafcea476d..0000000000 --- a/examples/idx_iface_cache/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Example idx_iface_cache demonstrates the use of "interface name-to-index -// cache" to watch for VPP interface config changes across agents. -package main diff --git a/examples/idx_iface_cache/main.go b/examples/idx_iface_cache/main.go deleted file mode 100644 index 5fdc1fa707..0000000000 --- a/examples/idx_iface_cache/main.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/servicelabel" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/ligato/vpp-agent/plugins/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "log" -) - -const agent1, agent2 = "agent1", "agent2" - -// Start Agent plugins selected for this example. -func main() { - // Agent 1 datasync plugin - serviceLabel1 := servicelabel.NewPlugin(servicelabel.UseLabel(agent1)) - serviceLabel1.SetName(agent1) - etcdDataSyncAgent1 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent1) - deps.ServiceLabel = serviceLabel1 - })) - etcdDataSyncAgent1.SetName("etcd-datasync-" + agent1) - - // Agent 2 datasync plugin - serviceLabel2 := servicelabel.NewPlugin(servicelabel.UseLabel(agent2)) - serviceLabel2.SetName(agent2) - etcdDataSyncAgent2 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent2) - deps.ServiceLabel = serviceLabel2 - })) - etcdDataSyncAgent2.SetName("etcd-datasync-" + agent2) - - // Example plugin datasync - etcdDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin)) - - // VPP plugin - watcher := datasync.KVProtoWatchers{ - etcdDataSync, - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) - - // Inject dependencies to example plugin - ep := &ExamplePlugin{ - exampleFinished: make(chan struct{}), - Deps: Deps{ - Log: logging.DefaultLogger, - ETCDDataSync: etcdDataSync, - VPP: vppPlugin, - Agent1: etcdDataSyncAgent1, - Agent2: etcdDataSyncAgent2, - }, - } - - // Start Agent - a := agent.NewAgent( - agent.AllPlugins(ep), - agent.QuitOnClose(ep.exampleFinished), - ) - if err := a.Run(); err != nil { - log.Fatal() - } -} - -// PluginName represents name of plugin. -const PluginName = "idx-iface-cache-example" - -// ExamplePlugin used for demonstration of SwIfIndexes - see Init() -type ExamplePlugin struct { - Deps - - swIfIdxLocal ifaceidx.SwIfIndex - swIfIdxAgent1 ifaceidx.SwIfIndex - swIfIdxAgent2 ifaceidx.SwIfIndex - - // Fields below are used to properly finish the example. - exampleFinished chan struct{} -} - -// Deps is a helper struct which is grouping all dependencies injected to the plugin -type Deps struct { - Log logging.Logger - ETCDDataSync *kvdbsync.Plugin - VPP vpp.API - Agent1 *kvdbsync.Plugin - Agent2 *kvdbsync.Plugin -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - -// Init initializes transport & SwIfIndexes then watch, publish & lookup. -func (plugin *ExamplePlugin) Init() (err error) { - // Get access to local interface indexes. - plugin.swIfIdxLocal = plugin.VPP.GetSwIfIndexes() - - // Run consumer - go plugin.consume() - - // Cache other agent's interface index mapping using injected plugin and local plugin name. - // /vnf-agent/agent1/vpp/config/v1/interface/ - plugin.swIfIdxAgent1 = ifaceidx.Cache(plugin.Agent1) - // /vnf-agent/agent2/vpp/config/v1/interface/ - plugin.swIfIdxAgent2 = ifaceidx.Cache(plugin.Agent2) - - return nil -} - -// AfterInit - call Cache() -func (plugin *ExamplePlugin) AfterInit() error { - // Publish test data. - plugin.publish() - - return nil -} - -// Close is called by Agent Core when the Agent is shutting down. It is supposed -// to clean up resources that were allocated by the plugin during its lifetime. -func (plugin *ExamplePlugin) Close() error { - return safeclose.Close(plugin.Agent1, plugin.Agent2, plugin.swIfIdxLocal, plugin.swIfIdxAgent1, - plugin.swIfIdxAgent2) -} - -// Test data are published to different agents (including local). -func (plugin *ExamplePlugin) publish() (err error) { - // Create interface in local agent. - iface0 := newExampleInterface("iface0", "192.168.0.1") - err = plugin.ETCDDataSync.Put(interfaces.InterfaceKey(iface0.Name), iface0) - if err != nil { - return err - } - // Create interface in agent1. - iface1 := newExampleInterface("iface1", "192.168.0.2") - err = plugin.Agent1.Put(interfaces.InterfaceKey(iface1.Name), iface1) - if err != nil { - return err - } - // Create interface in agent2. - iface2 := newExampleInterface("iface2", "192.168.0.3") - err = plugin.Agent2.Put(interfaces.InterfaceKey(iface2.Name), iface2) - return err -} - -// Use the NameToIndexMapping to watch changes. -func (plugin *ExamplePlugin) consume() { - plugin.Log.Info("Watching started") - swIfIdxChan := make(chan ifaceidx.SwIfIdxDto) - // Subscribe local iface-idx-mapping and both of cache mapping. - plugin.swIfIdxLocal.WatchNameToIdx(PluginName, swIfIdxChan) - plugin.swIfIdxAgent1.WatchNameToIdx(PluginName, swIfIdxChan) - plugin.swIfIdxAgent2.WatchNameToIdx(PluginName, swIfIdxChan) - - counter := 0 - - watching := true - for watching { - select { - case ifaceIdxEvent := <-swIfIdxChan: - plugin.Log.Info("Event received: interface ", ifaceIdxEvent.Name, " of ", ifaceIdxEvent.RegistryTitle) - counter++ - } - // Example is expecting 3 events - if counter == 3 { - watching = false - } - } - - // Do a lookup whether all mappings were registered. - plugin.lookup() -} - -// Use the NameToIndexMapping to lookup local mapping + external cached mappings. -func (plugin *ExamplePlugin) lookup() { - plugin.Log.Info("Lookup in progress") - - if index, _, found := plugin.swIfIdxLocal.LookupIdx("iface0"); found { - plugin.Log.Infof("interface iface0 (index %v) found in local mapping", index) - } - - if index, _, found := plugin.swIfIdxAgent1.LookupIdx("iface1"); found { - plugin.Log.Infof("interface iface1 (index %v) found in local mapping", index) - } - - if index, _, found := plugin.swIfIdxAgent2.LookupIdx("iface2"); found { - plugin.Log.Infof("interface iface2 (index %v) found in local mapping", index) - } - - // End the example. - plugin.Log.Infof("idx-iface-cache example finished, sending shutdown ...") - close(plugin.exampleFinished) -} - -func newExampleInterface(ifName, ipAddr string) *interfaces.Interfaces_Interface { - return &interfaces.Interfaces_Interface{ - Name: ifName, - Enabled: true, - IpAddresses: []string{ipAddr}, - } -} diff --git a/examples/idx_mapping_lookup/doc.go b/examples/idx_mapping_lookup/doc.go deleted file mode 100644 index bffd5bde4e..0000000000 --- a/examples/idx_mapping_lookup/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Example idx_mapping_lookup shows how to use the name-to-index mapping -// registry to create a new mapping and how to add and lookup mapping items. -package main diff --git a/examples/idx_mapping_lookup/main.go b/examples/idx_mapping_lookup/main.go deleted file mode 100644 index 2bb93c5765..0000000000 --- a/examples/idx_mapping_lookup/main.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "log" - - "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/idxvpp" - "github.com/ligato/vpp-agent/idxvpp/nametoidx" -) - -// ************************************************************************* -// This file contains an example of the use of the name-to-index mapping registry -// to register items with unique names, and indexes, and metadata -// and to read these values. -// ************************************************************************/ - -// Main allows running Example Plugin as a statically linked binary with Agent Core Plugins. Close channel and plugins -// required for the example are initialized. Agent is instantiated with generic plugins (etcd, Kafka, Status check, -// HTTP and Log) and example plugin which demonstrates index mapping lookup functionality. -func main() { - ep := &ExamplePlugin{ - Log: logging.DefaultLogger, - exampleFinished: make(chan struct{}), - } - - // Start Agent - a := agent.NewAgent( - agent.AllPlugins(ep), - agent.QuitOnClose(ep.exampleFinished), - ) - if err := a.Run(); err != nil { - log.Fatal() - } -} - -// PluginName represents name of plugin. -const PluginName = "idx-mapping-lookup" - -// ExamplePlugin implements Plugin interface which is used to pass custom plugin instances to the Agent. -type ExamplePlugin struct { - exampleIdx idxvpp.NameToIdxRW // Name to index mapping registry - exampleIDSeq uint32 // Provides unique ID for every item stored in mapping - // Fields below are used to properly finish the example. - exampleFinished chan struct{} - - Log logging.Logger -} - -// Init is the entry point into the plugin that is called by Agent Core when the Agent is coming up. -// The Go native plugin mechanism that was introduced in Go 1.8 -func (plugin *ExamplePlugin) Init() (err error) { - // Init new name-to-index mapping. - plugin.exampleIdx = nametoidx.NewNameToIdx(logrus.DefaultLogger(), "example_index", nil) - - // Set the initial ID. After every registration, this ID has to be incremented - // so new mapping is registered under a unique number. - plugin.exampleIDSeq = 1 - - plugin.Log.Info("Initialization of the custom plugin for the idx-mapping lookup example is completed") - - // Demonstrate mapping lookup functionality. - plugin.exampleMappingUsage() - - // End the example. - plugin.Log.Infof("idx-mapping-lookup example finished, sending shutdown ...") - close(plugin.exampleFinished) - - return err -} - -// Close cleans up the resources. -func (plugin *ExamplePlugin) Close() error { - return nil -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - -// Meta structure. It can contain any number of fields of different types. Metadata is optional and can be nil. -type Meta struct { - ip string - prefix uint32 -} - -// Illustration of index-mapping lookup usage. -func (plugin *ExamplePlugin) exampleMappingUsage() { - // Random name used to registration. Every registered name should be unique. - name := "example-entity" - - // Register name, and unique ID, and metadata to the example index map. Metadata - // are optional, can be nil. Name and ID have to be unique, otherwise the mapping will be overridden. - plugin.exampleIdx.RegisterName(name, plugin.exampleIDSeq, &Meta{}) - plugin.Log.Infof("Name %v registered", name) - - // Find the registered mapping using lookup index (name has to be known). The function - // returns an index related to the provided name, and metadata (nil if there are no metadata - // or mapping was not found), and a bool flag saying whether the mapping with provided name was found or not. - _, meta, found := plugin.exampleIdx.LookupIdx(name) - if found && meta != nil { - plugin.Log.Infof("Name %v stored in mapping", name) - } else { - plugin.Log.Errorf("Name %v not found", name) - } - - // Find the registered mapping using lookup name (index has to be known). The function - // returns a name related to provided index, and metadata (nil if there are no metadata - // or mapping was not found), and a bool flag saying whether the mapping with provided index was found or not. - _, meta, found = plugin.exampleIdx.LookupName(plugin.exampleIDSeq) - if found && meta != nil { - plugin.Log.Infof("Index %v stored in mapping", plugin.exampleIDSeq) - } else { - plugin.Log.Errorf("Index %v not found", plugin.exampleIDSeq) - } - - // This is how to remove mapping from registry. Other plugins can be notified about this change. - plugin.exampleIdx.UnregisterName(name) - plugin.Log.Infof("Name %v unregistered", name) -} diff --git a/examples/idx_mapping_watcher/doc.go b/examples/idx_mapping_watcher/doc.go deleted file mode 100644 index c7f568157c..0000000000 --- a/examples/idx_mapping_watcher/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Example idx_mapping_watcher shows how to watch on changes done in name-to-index -// mapping registry. -package main diff --git a/examples/idx_mapping_watcher/main.go b/examples/idx_mapping_watcher/main.go deleted file mode 100644 index bb1fe3b595..0000000000 --- a/examples/idx_mapping_watcher/main.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "strconv" - - "log" - - "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/ligato/vpp-agent/idxvpp" - "github.com/ligato/vpp-agent/idxvpp/nametoidx" -) - -// ************************************************************************* -// This file contains the example of how to watch on changes done in name-to-index -// mapping registry. -// The procedure requires a subscriber channel used in the watcher to listen on -// created, modified or removed items in the registry. -// ************************************************************************/ - -const expectedEvents = 5 - -/******** - * Main * - ********/ - -// Main allows running Example Plugin as a statically linked binary with Agent Core Plugins. Close channel and plugins -// required for the example are initialized. The Agent is instantiated with generic plugins (etcd, Kafka, Status check, -// HTTP and Log) and example plugin which demonstrates index mapping watcher functionality. -func main() { - ep := &ExamplePlugin{ - Log: logging.DefaultLogger, - exampleFinished: make(chan struct{}), - } - - // Start Agent - a := agent.NewAgent( - agent.AllPlugins(ep), - agent.QuitOnClose(ep.exampleFinished), - ) - if err := a.Run(); err != nil { - log.Fatal() - } -} - -// PluginName represents name of plugin. -const PluginName = "idx-mapping-watcher" - -// ExamplePlugin implements Plugin interface which is used to pass custom plugin instances to the Agent. -type ExamplePlugin struct { - exampleIdx idxvpp.NameToIdxRW // Name-to-index mapping - exampleIDSeq uint32 // Unique ID - exIdxWatchChannel chan idxvpp.NameToIdxDto // Channel to watch changes in mapping - watchDataReg datasync.WatchRegistration // To subscribe to mapping change events - // Fields below are used to properly finish the example - eventCounter uint8 - exampleFinished chan struct{} - Log logging.Logger -} - -// Init is the entry point into the plugin that is called by Agent Core when the Agent is coming up. -// The Go native plugin mechanism was introduced in Go 1.8. -func (plugin *ExamplePlugin) Init() (err error) { - // Init new name-to-index mapping - plugin.exampleIdx = nametoidx.NewNameToIdx(logrus.DefaultLogger(), "example_index", nil) - - // Mapping channel is used to notify about changes in the mapping registry. - plugin.exIdxWatchChannel = make(chan idxvpp.NameToIdxDto, 100) - - plugin.Log.Info("Initialization of the custom plugin for the idx-mapping watcher example is completed") - - // Start watcher before plugin init. - go plugin.watchEvents() - - go func() { - // This function registers several name-to-index items to registry owned by the plugin. - for i := 1; i <= 5; i++ { - plugin.RegisterTestData(i) - } - }() - - // Subscribe name-to-index watcher. - plugin.exampleIdx.Watch(PluginName, nametoidx.ToChan(plugin.exIdxWatchChannel)) - - return err -} - -// Close cleans up the resources. -func (plugin *ExamplePlugin) Close() error { - return safeclose.Close(plugin.exIdxWatchChannel) -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - -/************ - * Register * - ************/ - -// RegisterTestData registers item to the name-to-index registry. -func (plugin *ExamplePlugin) RegisterTestData(index int) { - // Generate name used in registration. In the example, an index is added to the name to make it unique. - name := "example-entity-" + strconv.Itoa(index) - // Register name-to-index mapping with name and index. In this example, - // no metadata is used so the last is nil. Metadata are optional. - plugin.exampleIdx.RegisterName(name, plugin.exampleIDSeq, nil) - plugin.exampleIDSeq++ - plugin.Log.Infof("Name %v registered", name) -} - -/*********** - * Watcher * - ***********/ - -// Watch on name-to-index mapping changes created in plugin. -func (plugin *ExamplePlugin) watchEvents() { - plugin.Log.Info("Watcher started") - for { - select { - case exIdx := <-plugin.exIdxWatchChannel: - // Just for example purposes - plugin.eventCounter++ - - plugin.Log.Infof("Index event arrived to watcher, key %v", exIdx.Idx) - if exIdx.IsDelete() { - // IsDelete flag recognizes what kind of event arrived (put or delete). - } - // Done is used to signal to the event producer that the event consumer - // has processed the event. User of the API is supposed to clear event with Done(). - exIdx.Done() - - // End the example when it is done (5 events are expected). - if plugin.eventCounter == expectedEvents { - plugin.Log.Infof("idx-watch-lookup example finished, sending shutdown ...") - close(plugin.exampleFinished) - } - } - } -} diff --git a/examples/idx_veth_cache/doc.go b/examples/idx_veth_cache/doc.go deleted file mode 100644 index 5a42986e6b..0000000000 --- a/examples/idx_veth_cache/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Example idx_veth_cache demonstrates the use of the "VETH name-to-index cache" -// to watch Linux VETH interface config changes across agents. -package main diff --git a/examples/idx_veth_cache/main.go b/examples/idx_veth_cache/main.go deleted file mode 100644 index 82bf263d67..0000000000 --- a/examples/idx_veth_cache/main.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "log" - "os" - "sync" - "time" - - "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync" - "github.com/ligato/cn-infra/datasync/resync" - "github.com/ligato/cn-infra/db/keyval/etcd" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/cn-infra/servicelabel" - "github.com/ligato/cn-infra/utils/safeclose" - "github.com/ligato/vpp-agent/plugins/linux" - linux_if "github.com/ligato/vpp-agent/plugins/linux/ifplugin/ifaceidx" - linux_intf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" -) - -// ************************************************************************* -// This file contains an examples of linux plugin name-to-index cache operations. -// -// Two more transport adapters for different agents are registered using -// OfDifferentAgent() and their interface name-to-idx mapping is cached -// with linux_if.Cache() as a new map. -// -// These new maps are watched and all new events are logged. -// -// VETH interfaces are then created on agents using both the transports and -// data presence in cached name-to-idx map is verified. -// ************************************************************************/ - -// Init sets the default logging level. -func init() { - logrus.DefaultLogger().SetOutput(os.Stdout) - logrus.DefaultLogger().SetLevel(logging.InfoLevel) -} - -const agent1, agent2 = "agent1", "agent2" - -// Start Agent plugins selected for this example. -func main() { - // Agent 1 datasync plugin - serviceLabel1 := servicelabel.NewPlugin(servicelabel.UseLabel(agent1)) - serviceLabel1.SetName(agent1) - etcdDataSyncAgent1 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent1) - deps.ServiceLabel = serviceLabel1 - })) - etcdDataSyncAgent1.SetName("etcd-datasync-" + agent1) - - // Agent 2 datasync plugin - serviceLabel2 := servicelabel.NewPlugin(servicelabel.UseLabel(agent2)) - serviceLabel2.SetName(agent2) - etcdDataSyncAgent2 := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin), kvdbsync.UseDeps(func(deps *kvdbsync.Deps) { - deps.Log = logging.ForPlugin(agent2) - deps.ServiceLabel = serviceLabel2 - })) - etcdDataSyncAgent2.SetName("etcd-datasync-" + agent2) - - // Example plugin datasync - etcdDataSync := kvdbsync.NewPlugin(kvdbsync.UseKV(&etcd.DefaultPlugin)) - - // Linux plugin - watcher := datasync.KVProtoWatchers{ - etcdDataSync, - } - linuxPlugin := linux.NewPlugin(linux.UseDeps(func(deps *linux.Deps) { - deps.Watcher = watcher - })) - - // Inject dependencies to example plugin - ep := &ExamplePlugin{ - exampleFinished: make(chan struct{}), - Deps: Deps{ - Log: logging.DefaultLogger, - ETCDDataSync: etcdDataSync, - Linux: linuxPlugin, - Agent1: etcdDataSyncAgent1, - Agent2: etcdDataSyncAgent2, - }, - } - - // Start Agent - a := agent.NewAgent( - agent.AllPlugins(ep), - agent.QuitOnClose(ep.exampleFinished), - ) - if err := a.Run(); err != nil { - log.Fatal() - } -} - -// PluginName represents name of plugin. -const PluginName = "idx-veth-cache-example" - -// ExamplePlugin demonstrates the use of the name-to-idx cache in linux plugin. -type ExamplePlugin struct { - Deps - - linuxIfIdxLocal linux_if.LinuxIfIndex - linuxIfIdxAgent1 linux_if.LinuxIfIndex - linuxIfIdxAgent2 linux_if.LinuxIfIndex - wg sync.WaitGroup - - // Fields below are used to properly finish the example. - exampleFinished chan struct{} -} - -// Deps is a helper struct which is grouping all dependencies injected to the plugin -type Deps struct { - Log logging.Logger - ETCDDataSync *kvdbsync.Plugin - Linux *linux.Plugin - Agent1 *kvdbsync.Plugin - Agent2 *kvdbsync.Plugin -} - -// String returns plugin name -func (plugin *ExamplePlugin) String() string { - return PluginName -} - -// Init initializes example plugin. -func (plugin *ExamplePlugin) Init() error { - // Receive linux interfaces mapping. - if plugin.Linux != nil { - plugin.linuxIfIdxLocal = plugin.Linux.GetLinuxIfIndexes() - } else { - return fmt.Errorf("linux plugin not initialized") - } - - // Run consumer. - go plugin.consume() - - // Cache the agent1/agent2 name-to-idx mapping to separate mapping within plugin example. - plugin.linuxIfIdxAgent1 = linux_if.Cache(plugin.Agent1) - plugin.linuxIfIdxAgent2 = linux_if.Cache(plugin.Agent2) - - logrus.DefaultLogger().Info("Initialization of the example plugin has completed") - - return nil -} - -// AfterInit - call Cache() -func (plugin *ExamplePlugin) AfterInit() error { - // Manually start resync (simulate vpp-agent default behaviour) - resync.DefaultPlugin.DoResync() - - // Publish test data. - plugin.publish() - - return nil -} - -// Close cleans up the resources. -func (plugin *ExamplePlugin) Close() error { - plugin.wg.Wait() - - return safeclose.Close(plugin.Agent1, plugin.Agent2, plugin.Agent1, plugin.Agent2, - plugin.linuxIfIdxLocal, plugin.linuxIfIdxAgent1, plugin.linuxIfIdxAgent2) -} - -// publish propagates example configuration to etcd. -func (plugin *ExamplePlugin) publish() error { - logrus.DefaultLogger().Infof("Putting interfaces to ETCD") - - // VETH pair in default namespace - vethDef := &veth11DefaultNs - vethDefPeer := &veth12DefaultNs - - // Publish VETH pair to agent1. - err := plugin.Agent1.Put(linux_intf.InterfaceKey(vethDef.Name), vethDef) - err = plugin.Agent1.Put(linux_intf.InterfaceKey(vethDefPeer.Name), vethDefPeer) - - // VETH pair in custom namespace - vethNs1 := &veth21Ns1 - vethNs2Peer := &veth22Ns2 - - // Publish VETH pair to agent2. - err = plugin.Agent2.Put(linux_intf.InterfaceKey(vethNs1.Name), vethDef) - err = plugin.Agent2.Put(linux_intf.InterfaceKey(vethNs2Peer.Name), vethNs2Peer) - - if err != nil { - logrus.DefaultLogger().Errorf("Failed to apply initial Linux&VPP configuration: %v", err) - return err - } - logrus.DefaultLogger().Info("Successfully applied initial Linux&VPP configuration") - - return err -} - -// Use the NameToIndexMapping to watch changes. -func (plugin *ExamplePlugin) consume() (err error) { - plugin.Log.Info("Watching started") - // Init chan to sent watch updates. - linuxIfIdxChan := make(chan linux_if.LinuxIfIndexDto) - // Register all agents (incl. local) to watch linux name-to-idx mapping changes. - plugin.linuxIfIdxLocal.WatchNameToIdx(PluginName, linuxIfIdxChan) - plugin.linuxIfIdxAgent1.WatchNameToIdx(PluginName, linuxIfIdxChan) - plugin.linuxIfIdxAgent2.WatchNameToIdx(PluginName, linuxIfIdxChan) - - counter := 0 - - watching := true - for watching { - select { - case ifaceIdxEvent := <-linuxIfIdxChan: - plugin.Log.Info("Event received: VETH interface ", ifaceIdxEvent.Name, - " of ", ifaceIdxEvent.RegistryTitle) - counter++ - } - // Example is expecting 3 events. - if counter == 4 { - watching = false - } - } - - // Do a lookup whether all mappings were registered. - success := plugin.lookup() - if !success { - return fmt.Errorf("idx_veth_cache example failed; one or more VETH interfaces are missing") - } - - // End the example. - plugin.Log.Infof("idx-iface-cache example finished, sending shutdown ...") - close(plugin.exampleFinished) - - return nil -} - -// Use the NameToIndexMapping to lookup changes. -func (plugin *ExamplePlugin) lookup() bool { - var ( - loopback bool - veth11 bool - veth12 bool - veth21 bool - veth22 bool - ) - - // Look for loopback interface. - if _, _, loopback = plugin.linuxIfIdxLocal.LookupIdx("lo"); loopback { - logrus.DefaultLogger().Info("Interface found: loopback") - } else { - logrus.DefaultLogger().Warn("Interface not found: loopback") - } - // Look for VETH 11 default namespace interface on agent1. - for i := 0; i <= 10; i++ { - if _, _, veth11 = plugin.linuxIfIdxAgent1.LookupIdx(veth11DefaultNs.Name); veth11 { - logrus.DefaultLogger().Info("Interface found on agent1: veth11Def") - break - } else if i == 3 { - logrus.DefaultLogger().Warn("Interface not found on agent1: veth11Def") - } else { - // Try several times in case cache is not updated yet. - time.Sleep(1 * time.Second) - continue - } - } - // Look for VETH 12 default namespace interface on agent1. - for i := 0; i <= 3; i++ { - if _, _, veth12 = plugin.linuxIfIdxAgent1.LookupIdx(veth12DefaultNs.Name); veth12 { - logrus.DefaultLogger().Info("Interface found on agent1: veth12Def") - break - } else if i == 3 { - logrus.DefaultLogger().Warn("Interface not found on agent1: veth12Def") - } else { - // Try several times in case cache is not updated yet. - time.Sleep(1 * time.Second) - continue - } - } - // Look for VETH 21 ns1 namespace interface on agent2. - for i := 0; i <= 3; i++ { - if _, _, veth21 = plugin.linuxIfIdxAgent2.LookupIdx(veth21Ns1.Name); veth21 { - logrus.DefaultLogger().Info("Interface found on agent2: veth21ns1") - break - } else if i == 3 { - logrus.DefaultLogger().Warn("Interface not found on agent2 : veth21ns1") - } else { - // Try several times in case cache is not updated yet. - time.Sleep(1 * time.Second) - continue - } - } - // Look for VETH 22 ns2 namespace interface on agent2. - for i := 0; i <= 3; i++ { - if _, _, veth22 = plugin.linuxIfIdxAgent2.LookupIdx(veth22Ns2.Name); veth22 { - logrus.DefaultLogger().Info("Interface found on agent2: veth22ns2") - break - } else if i == 3 { - logrus.DefaultLogger().Warn("Interface not found on agent2: veth22ns2") - } else { - // Try several times in case cache is not updated yet. - time.Sleep(1 * time.Second) - continue - } - } - - if loopback && veth11 && veth12 && veth21 && veth22 { - return true - } - return false -} - -// Interface data -var ( - // veth11DefaultNs is one member of the veth11-veth12DefaultNs VETH pair, put into the default namespace - veth11DefaultNs = linux_intf.LinuxInterfaces_Interface{ - Name: "veth1", - Type: linux_intf.LinuxInterfaces_VETH, - Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth12DefaultNs", - }, - IpAddresses: []string{"10.0.0.1/24"}, - } - // veth12DefaultNs is one member of the veth11-veth12DefaultNs VETH pair, put into the default namespace - veth12DefaultNs = linux_intf.LinuxInterfaces_Interface{ - Name: "veth12DefaultNs", - Type: linux_intf.LinuxInterfaces_VETH, - Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth11", - }, - } - // veth11DefaultNs is one member of the veth21-veth22 VETH pair, put into the ns1. - veth21Ns1 = linux_intf.LinuxInterfaces_Interface{ - Name: "veth11", - Type: linux_intf.LinuxInterfaces_VETH, - Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth12DefaultNs", - }, - IpAddresses: []string{"10.0.0.1/24"}, - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - Name: "ns1", - }, - } - // veth22Ns2 is one member of the veth21-veth22 VETH pair, put into the namespace "ns2". - veth22Ns2 = linux_intf.LinuxInterfaces_Interface{ - Name: "veth21", - Type: linux_intf.LinuxInterfaces_VETH, - Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth22", - }, - IpAddresses: []string{"10.0.0.2/24"}, - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - Name: "ns2", - }, - } -) diff --git a/examples/kvscheduler/acl/main.go b/examples/kvscheduler/acl/main.go new file mode 100644 index 0000000000..70b8dba6b9 --- /dev/null +++ b/examples/kvscheduler/acl/main.go @@ -0,0 +1,183 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/ligato/cn-infra/agent" + + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/clientv2/vpp/localclient" + "github.com/ligato/vpp-agent/plugins/orchestrator" + vpp_aclplugin "github.com/ligato/vpp-agent/plugins/vpp/aclplugin" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" +) + +/* + This example demonstrates KVScheduler-based ACLPlugin. +*/ + +func main() { + ep := &ExamplePlugin{ + Orchestrator: &orchestrator.DefaultPlugin, + VPPIfPlugin: &vpp_ifplugin.DefaultPlugin, + VPPACLPlugin: &vpp_aclplugin.DefaultPlugin, + } + + a := agent.NewAgent( + agent.AllPlugins(ep), + ) + if err := a.Run(); err != nil { + log.Fatal(err) + } +} + +// ExamplePlugin is the main plugin which +// handles resync and changes in this example. +type ExamplePlugin struct { + VPPIfPlugin *vpp_ifplugin.IfPlugin + VPPACLPlugin *vpp_aclplugin.ACLPlugin + Orchestrator *orchestrator.Plugin +} + +// String returns plugin name +func (p *ExamplePlugin) String() string { + return "acl-example" +} + +// Init handles initialization phase. +func (p *ExamplePlugin) Init() error { + return nil +} + +// AfterInit handles phase after initialization. +func (p *ExamplePlugin) AfterInit() error { + go testLocalClientWithScheduler() + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + return nil +} + +func testLocalClientWithScheduler() { + // initial resync + time.Sleep(time.Second * 2) + fmt.Println("=== RESYNC ===") + + txn := localclient.DataResyncRequest("example") + err := txn. + Interface(memif0). + ACL(acl0). + ACL(acl1). + ACL(acl3). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // data change + time.Sleep(time.Second * 10) + fmt.Println("=== CHANGE ===") + + acl1.Interfaces = nil + acl0.Interfaces.Egress = nil + acl3.Rules[0].IpRule.Ip.SourceNetwork = "0.0.0.0/0" // this is actually equivalent to unspecified field + + txn2 := localclient.DataChangeRequest("example") + err = txn2.Put(). + ACL(acl0). + ACL(acl1). + ACL(acl3). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } +} + +var ( + memif0 = &interfaces.Interface{ + Name: "memif0", + Enabled: true, + Type: interfaces.Interface_MEMIF, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: true, + Secret: "secret", + SocketFilename: "/tmp/memif1.sock", + }, + }, + } + acl0 = &acl.ACL{ + Name: "acl0", + Rules: []*acl.ACL_Rule{ + { + Action: acl.ACL_Rule_PERMIT, + IpRule: &acl.ACL_Rule_IpRule{ + Ip: &acl.ACL_Rule_IpRule_Ip{ + SourceNetwork: "10.0.0.0/24", + DestinationNetwork: "20.0.0.0/24", + }, + }, + }, + }, + Interfaces: &acl.ACL_Interfaces{ + Ingress: []string{"memif0"}, + Egress: []string{"memif0"}, + }, + } + acl1 = &acl.ACL{ + Name: "acl1", + Rules: []*acl.ACL_Rule{ + { + Action: acl.ACL_Rule_PERMIT, + MacipRule: &acl.ACL_Rule_MacIpRule{ + SourceAddress: "192.168.0.1", + SourceAddressPrefix: 16, + SourceMacAddress: "b2:74:8c:12:67:d2", + SourceMacAddressMask: "ff:ff:ff:ff:00:00", + }, + }, + }, + Interfaces: &acl.ACL_Interfaces{ + Ingress: []string{"memif0"}, + }, + } + acl3 = &acl.ACL{ + Name: "acl3", + Rules: []*acl.ACL_Rule{ + { + Action: acl.ACL_Rule_DENY, + IpRule: &acl.ACL_Rule_IpRule{ + Ip: &acl.ACL_Rule_IpRule_Ip{ + // SourceNetwork is unspecified (ANY) + DestinationNetwork: "30.0.0.0/8", + }, + }, + }, + }, + Interfaces: &acl.ACL_Interfaces{ + Egress: []string{"memif0"}, + }, + } +) diff --git a/examples/kvscheduler/interconnect/main.go b/examples/kvscheduler/interconnect/main.go new file mode 100644 index 0000000000..a880f868a4 --- /dev/null +++ b/examples/kvscheduler/interconnect/main.go @@ -0,0 +1,311 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/ligato/cn-infra/agent" + "github.com/ligato/vpp-agent/clientv2/linux/localclient" + "github.com/ligato/vpp-agent/plugins/orchestrator" + + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + linux_ns "github.com/ligato/vpp-agent/api/models/linux/namespace" + "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linuxifaceidx "github.com/ligato/vpp-agent/plugins/linux/ifplugin/ifaceidx" + linux_l3plugin "github.com/ligato/vpp-agent/plugins/linux/l3plugin" + linux_nsplugin "github.com/ligato/vpp-agent/plugins/linux/nsplugin" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + vppifaceidx "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/ifaceidx" +) + +/* + This example demonstrates KVScheduler-based VPP ifplugin, Linux ifplugin and Linux l3plugin. +*/ + +func main() { + // Set inter-dependency between VPP & Linux plugins + vpp_ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + vpp_ifplugin.DefaultPlugin.NsPlugin = &linux_nsplugin.DefaultPlugin + linux_ifplugin.DefaultPlugin.VppIfPlugin = &vpp_ifplugin.DefaultPlugin + + ep := &ExamplePlugin{ + Orchestrator: &orchestrator.DefaultPlugin, + LinuxIfPlugin: &linux_ifplugin.DefaultPlugin, + LinuxL3Plugin: &linux_l3plugin.DefaultPlugin, + VPPIfPlugin: &vpp_ifplugin.DefaultPlugin, + } + + a := agent.NewAgent( + agent.AllPlugins(ep), + ) + if err := a.Run(); err != nil { + log.Fatal(err) + } +} + +// ExamplePlugin is the main plugin which +// handles resync and changes in this example. +type ExamplePlugin struct { + LinuxIfPlugin *linux_ifplugin.IfPlugin + LinuxL3Plugin *linux_l3plugin.L3Plugin + VPPIfPlugin *vpp_ifplugin.IfPlugin + Orchestrator *orchestrator.Plugin +} + +// String returns plugin name +func (p *ExamplePlugin) String() string { + return "vpp-linux-example" +} + +// Init handles initialization phase. +func (p *ExamplePlugin) Init() error { + return nil +} + +// AfterInit handles phase after initialization. +func (p *ExamplePlugin) AfterInit() error { + go testLocalClientWithScheduler( + p.VPPIfPlugin.GetInterfaceIndex(), + p.LinuxIfPlugin.GetInterfaceIndex(), + ) + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + return nil +} + +func testLocalClientWithScheduler( + vppIfIndex vppifaceidx.IfaceMetadataIndex, + linuxIfIndex linuxifaceidx.LinuxIfMetadataIndex, +) { + // initial resync + time.Sleep(time.Second * 2) + fmt.Println("=== RESYNC ===") + + txn := localclient.DataResyncRequest("example") + err := txn. + LinuxInterface(veth2). + LinuxInterface(veth1). + LinuxInterface(linuxTap). + LinuxArpEntry(arpForVeth1). + LinuxArpEntry(arpForLinuxTap). + LinuxRoute(linkRouteToMs1). + LinuxRoute(routeToMs1). + LinuxRoute(linkRouteToMs2). + LinuxRoute(routeToMs2). + VppInterface(afpacket). + VppInterface(vppTap). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // data change + time.Sleep(time.Second * 10) + fmt.Println("=== CHANGE ===") + + veth1.Enabled = false + + txn2 := localclient.DataChangeRequest("example") + err = txn2. + Put(). + LinuxInterface(veth1). + /*Delete(). + VppInterface(vppTap.Name).*/ + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // test Linux interface metadata map + linuxIfMeta, exists := linuxIfIndex.LookupByName(veth1LogicalName) + fmt.Printf("Linux interface %s: found=%t, meta=%v\n", veth1LogicalName, exists, linuxIfMeta) + linuxIfMeta, exists = linuxIfIndex.LookupByName(linuxTapLogicalName) + fmt.Printf("Linux interface %s: found=%t, meta=%v\n", linuxTapLogicalName, exists, linuxIfMeta) + + // test VPP interface metadata map + vppIfMeta, exists := vppIfIndex.LookupByName(afPacketLogicalName) + fmt.Printf("VPP interface %s: found=%t, meta=%v\n", afPacketLogicalName, exists, vppIfMeta) + vppIfMeta, exists = vppIfIndex.LookupByName(vppTapLogicalName) + fmt.Printf("VPP interface %s: found=%t, meta=%v\n", vppTapLogicalName, exists, vppIfMeta) +} + +const ( + veth1LogicalName = "myVETH1" + + veth2LogicalName = "myVETH2" + veth2HostName = "veth2" + + afPacketLogicalName = "myAFPacket" + + afPacketIPAddr = "10.11.1.2" + + vppTapLogicalName = "myVPPTap" + vppTapIPAddr = "10.11.2.2" + vppTapHwAddr = "b3:12:12:45:A7:B7" + + linuxTapLogicalName = "myLinuxTAP" + + linuxTapIPAddr = "10.11.2.1" + linuxTapHwAddr = "88:88:88:88:88:88" + + microserviceNetMask = "/30" + mycroservice1 = "microservice1" + mycroservice2 = "microservice2" + microservice1Net = "10.11.1.0" + microserviceNetMask + microservice2Net = "10.11.2.0" + microserviceNetMask + + mycroservice2Mtu = 1700 + + routeMetric = 50 +) + +var ( + /* microservice1 <-> VPP */ + veth1 = &linux_interfaces.Interface{ + Name: veth1LogicalName, + Type: linux_interfaces.Interface_VETH, + Enabled: true, + PhysAddress: "66:66:66:66:66:66", + IpAddresses: []string{ + ("10.11.1.1") + microserviceNetMask, + }, + Mtu: 1800, + HostIfName: "veth1", + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{PeerIfName: veth2LogicalName}, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroservice1, + }, + } + + arpForVeth1 = &linux_l3.ARPEntry{ + Interface: veth1LogicalName, + IpAddress: vppTapIPAddr, + HwAddress: vppTapHwAddr, + } + + linkRouteToMs2 = &linux_l3.Route{ + OutgoingInterface: veth1LogicalName, + Scope: linux_l3.Route_LINK, + DstNetwork: vppTapIPAddr + "/32", + } + + routeToMs2 = &linux_l3.Route{ + OutgoingInterface: veth1LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: microservice2Net, + GwAddr: vppTapIPAddr, + Metric: routeMetric, + } + + veth2 = &linux_interfaces.Interface{ + Name: veth2LogicalName, + Type: linux_interfaces.Interface_VETH, + Enabled: true, + Mtu: 1800, + HostIfName: veth2HostName, + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{PeerIfName: veth1LogicalName}, + }, + } + + afpacket = &vpp_interfaces.Interface{ + Name: afPacketLogicalName, + Type: vpp_interfaces.Interface_AF_PACKET, + Enabled: true, + PhysAddress: "a7:35:45:55:65:75", + IpAddresses: []string{ + afPacketIPAddr + microserviceNetMask, + }, + Mtu: 1800, + Link: &vpp_interfaces.Interface_Afpacket{ + Afpacket: &vpp_interfaces.AfpacketLink{ + HostIfName: veth2HostName, + }, + }, + } + + /* microservice2 <-> VPP */ + + linuxTap = &linux_interfaces.Interface{ + Name: linuxTapLogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + PhysAddress: linuxTapHwAddr, + IpAddresses: []string{ + linuxTapIPAddr + microserviceNetMask, + }, + Mtu: mycroservice2Mtu, + HostIfName: "tap_to_vpp", + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapLogicalName, + }, + }, + /*Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroservice2, + },*/ + } + + vppTap = &vpp_interfaces.Interface{ + Name: vppTapLogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + PhysAddress: vppTapHwAddr, + IpAddresses: []string{ + vppTapIPAddr + microserviceNetMask, + }, + Mtu: mycroservice2Mtu, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: 2, + //ToMicroservice: mycroservice2, + }, + }, + } + + arpForLinuxTap = &linux_l3.ARPEntry{ + Interface: linuxTapLogicalName, + IpAddress: afPacketIPAddr, + HwAddress: "a7:35:45:55:65:75", + } + + linkRouteToMs1 = &linux_l3.Route{ + OutgoingInterface: linuxTapLogicalName, + Scope: linux_l3.Route_LINK, + DstNetwork: afPacketIPAddr + "/32", + } + + routeToMs1 = &linux_l3.Route{ + OutgoingInterface: linuxTapLogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: microservice1Net, + GwAddr: afPacketIPAddr, + Metric: routeMetric, + } +) diff --git a/examples/kvscheduler/l2/main.go b/examples/kvscheduler/l2/main.go new file mode 100644 index 0000000000..fce586a792 --- /dev/null +++ b/examples/kvscheduler/l2/main.go @@ -0,0 +1,330 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/ligato/cn-infra/agent" + + linux_interfaces "github.com/ligato/vpp-agent/api/models/linux/interfaces" + linux_ns "github.com/ligato/vpp-agent/api/models/linux/namespace" + vpp_interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + vpp_l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + "github.com/ligato/vpp-agent/clientv2/linux/localclient" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linux_l3plugin "github.com/ligato/vpp-agent/plugins/linux/l3plugin" + "github.com/ligato/vpp-agent/plugins/orchestrator" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + vpp_l2plugin "github.com/ligato/vpp-agent/plugins/vpp/l2plugin" +) + +/* + This example demonstrates L2 Plugin v.2. +*/ + +func main() { + // Set inter-dependency between VPP & Linux plugins + vpp_ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + linux_ifplugin.DefaultPlugin.VppIfPlugin = &vpp_ifplugin.DefaultPlugin + + ep := &ExamplePlugin{ + Orchestrator: &orchestrator.DefaultPlugin, + LinuxIfPlugin: &linux_ifplugin.DefaultPlugin, + LinuxL3Plugin: &linux_l3plugin.DefaultPlugin, + VPPIfPlugin: &vpp_ifplugin.DefaultPlugin, + VPPL2Plugin: &vpp_l2plugin.DefaultPlugin, + } + + a := agent.NewAgent( + agent.AllPlugins(ep), + ) + if err := a.Run(); err != nil { + log.Fatal(err) + } +} + +// ExamplePlugin is the main plugin which +// handles resync and changes in this example. +type ExamplePlugin struct { + LinuxIfPlugin *linux_ifplugin.IfPlugin + LinuxL3Plugin *linux_l3plugin.L3Plugin + VPPIfPlugin *vpp_ifplugin.IfPlugin + VPPL2Plugin *vpp_l2plugin.L2Plugin + Orchestrator *orchestrator.Plugin +} + +// String returns plugin name +func (p *ExamplePlugin) String() string { + return "l2-example" +} + +// Init handles initialization phase. +func (p *ExamplePlugin) Init() error { + return nil +} + +// AfterInit handles phase after initialization. +func (p *ExamplePlugin) AfterInit() error { + go testLocalClientWithScheduler() + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + return nil +} + +func testLocalClientWithScheduler() { + // initial resync + time.Sleep(time.Second * 2) + fmt.Println("=== RESYNC (using bridge domain) ===") + + txn := localclient.DataResyncRequest("example") + err := txn. + LinuxInterface(veth2). + LinuxInterface(veth1). + LinuxInterface(linuxTap). + VppInterface(afpacket). + VppInterface(vppTap). + VppInterface(bviLoop). + VppInterface(loop2). + BD(bd). + BDFIB(fibForLoop). + BDFIB(fibForTAP). + BDFIB(fibForVETH). + BDFIB(dropFIB). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // data changes + + time.Sleep(time.Second * 10) + fmt.Printf("=== CHANGE (switching to XConnect) ===\n") + + txn3 := localclient.DataChangeRequest("example") + err = txn3.Delete(). + BD(bd.Name). // FIBs will be pending + Put(). + XConnect(xConnectMs1ToMs2). + XConnect(xConnectMs2ToMs1). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } +} + +const ( + bdNetPrefix = "10.11.1." + bdNetMask = "/24" + + veth1LogicalName = "myVETH1" + veth1HostName = "veth1" + veth1IPAddr = bdNetPrefix + "1" + veth1HwAddr = "66:66:66:66:66:66" + + veth2LogicalName = "myVETH2" + veth2HostName = "veth2" + + afPacketLogicalName = "myAFPacket" + afPacketHwAddr = "a7:35:45:55:65:75" + + vppTapLogicalName = "myVPPTap" + vppTapHwAddr = "b3:12:12:45:A7:B7" + vppTapVersion = 2 + + linuxTapLogicalName = "myLinuxTAP" + linuxTapHostName = "tap_to_vpp" + linuxTapIPAddr = bdNetPrefix + "2" + linuxTapHwAddr = "88:88:88:88:88:88" + + mycroservice1 = "microservice1" + mycroservice2 = "microservice2" + + bviLoopName = "myLoopback1" + bviLoopIP = bdNetPrefix + "3" + bviLoopHwAddr = "cd:cd:cd:cd:cd:cd" + + loop2Name = "myLoopback2" + loop2HwAddr = "ef:ef:ef:ef:ef:ef" + + bdName = "myBridgeDomain" + bdFlood = true + bdUnknownUnicastFlood = true + bdForward = true + bdLearn = false /* Learning turned off, FIBs are needed for connectivity */ + bdArpTermination = true + bdMacAge = 0 +) + +var ( + /* microservice1 <-> VPP */ + + veth1 = &linux_interfaces.Interface{ + Name: veth1LogicalName, + Type: linux_interfaces.Interface_VETH, + Enabled: true, + PhysAddress: veth1HwAddr, + IpAddresses: []string{ + veth1IPAddr + bdNetMask, + }, + HostIfName: veth1HostName, + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{PeerIfName: veth2LogicalName}, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroservice1, + }, + } + veth2 = &linux_interfaces.Interface{ + Name: veth2LogicalName, + Type: linux_interfaces.Interface_VETH, + Enabled: true, + HostIfName: veth2HostName, + Link: &linux_interfaces.Interface_Veth{ + Veth: &linux_interfaces.VethLink{PeerIfName: veth1LogicalName}, + }, + } + afpacket = &vpp_interfaces.Interface{ + Name: afPacketLogicalName, + Type: vpp_interfaces.Interface_AF_PACKET, + Enabled: true, + PhysAddress: afPacketHwAddr, + Link: &vpp_interfaces.Interface_Afpacket{ + Afpacket: &vpp_interfaces.AfpacketLink{ + HostIfName: veth2HostName, + }, + }, + } + + /* microservice2 <-> VPP */ + + linuxTap = &linux_interfaces.Interface{ + Name: linuxTapLogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + PhysAddress: linuxTapHwAddr, + IpAddresses: []string{ + linuxTapIPAddr + bdNetMask, + }, + HostIfName: linuxTapHostName, + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapLogicalName, + }, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroservice2, + }, + } + vppTap = &vpp_interfaces.Interface{ + Name: vppTapLogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + PhysAddress: vppTapHwAddr, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: vppTapVersion, + ToMicroservice: mycroservice2, + }, + }, + } + + /* Bridge domain */ + + bd = &vpp_l2.BridgeDomain{ + Name: bdName, + Flood: bdFlood, + UnknownUnicastFlood: bdUnknownUnicastFlood, + Forward: bdForward, + Learn: bdLearn, + ArpTermination: bdArpTermination, + MacAge: bdMacAge, + Interfaces: []*vpp_l2.BridgeDomain_Interface{ + { + Name: vppTapLogicalName, + }, + { + Name: afPacketLogicalName, + }, + { + Name: bviLoopName, + BridgedVirtualInterface: true, + }, + }, + } + bviLoop = &vpp_interfaces.Interface{ + Name: bviLoopName, + Type: vpp_interfaces.Interface_SOFTWARE_LOOPBACK, + Enabled: true, + PhysAddress: bviLoopHwAddr, + IpAddresses: []string{ + bviLoopIP + bdNetMask, + }, + } + loop2 = &vpp_interfaces.Interface{ + Name: loop2Name, + Type: vpp_interfaces.Interface_SOFTWARE_LOOPBACK, + Enabled: true, + PhysAddress: loop2HwAddr, + } + + /* FIB entries */ + + fibForLoop = &vpp_l2.FIBEntry{ + PhysAddress: bviLoopHwAddr, + BridgeDomain: bdName, + Action: vpp_l2.FIBEntry_FORWARD, + OutgoingInterface: bviLoopName, + BridgedVirtualInterface: true, + StaticConfig: true, + } + fibForVETH = &vpp_l2.FIBEntry{ + PhysAddress: veth1HwAddr, + BridgeDomain: bdName, + Action: vpp_l2.FIBEntry_FORWARD, + OutgoingInterface: afPacketLogicalName, + } + fibForTAP = &vpp_l2.FIBEntry{ + PhysAddress: linuxTapHwAddr, + BridgeDomain: bdName, + Action: vpp_l2.FIBEntry_FORWARD, + OutgoingInterface: vppTapLogicalName, + } + dropFIB = &vpp_l2.FIBEntry{ + PhysAddress: loop2HwAddr, + BridgeDomain: bdName, + Action: vpp_l2.FIBEntry_DROP, + } + + /* XConnect */ + + xConnectMs1ToMs2 = &vpp_l2.XConnectPair{ + ReceiveInterface: afPacketLogicalName, + TransmitInterface: vppTapLogicalName, + } + xConnectMs2ToMs1 = &vpp_l2.XConnectPair{ + ReceiveInterface: vppTapLogicalName, + TransmitInterface: afPacketLogicalName, + } +) diff --git a/examples/kvscheduler/nat/main.go b/examples/kvscheduler/nat/main.go new file mode 100644 index 0000000000..4e2595f62b --- /dev/null +++ b/examples/kvscheduler/nat/main.go @@ -0,0 +1,679 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/ligato/cn-infra/agent" + "github.com/ligato/vpp-agent/plugins/orchestrator" + + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/l3" + linux_ns "github.com/ligato/vpp-agent/api/models/linux/namespace" + "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp/nat" + "github.com/ligato/vpp-agent/clientv2/linux/localclient" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linux_l3plugin "github.com/ligato/vpp-agent/plugins/linux/l3plugin" + linux_nsplugin "github.com/ligato/vpp-agent/plugins/linux/nsplugin" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + vpp_natplugin "github.com/ligato/vpp-agent/plugins/vpp/natplugin" +) + +/* + This example demonstrates natplugin v.2 + + Deploy microservices with servers: + + host-term1$ docker run -it --rm -e MICROSERVICE_LABEL=microservice-server1 lencomilan/ubuntu /bin/bash + host-term1$ nc -l -p 8080 & + host-term1$ nc -u -l -p 9090 & + + host-term2$ docker run -it --rm -e MICROSERVICE_LABEL=microservice-server2 lencomilan/ubuntu /bin/bash + host-term2$ nc -l -p 8081 & + host-term2$ nc -u -l -p 9091 & + + Test DNATs from microservice-client: + + host-term3$ docker run -it --rm -e MICROSERVICE_LABEL=microservice-client lencomilan/ubuntu /bin/bash + # TCP Service: + host-term3$ nc 10.36.10.1 80 + host-term3$ nc 10.36.10.2 80 + host-term3$ nc 10.36.10.3 80 + # UDP Service: + host-term3$ nc -u 10.36.10.10 90 + host-term3$ nc -u 10.36.10.11 90 + host-term3$ nc -u 10.36.10.12 90 + + Run server in the host: + + host-term4$ nc -l -p 8080 & + + # Accessing server 192.168.13.10:8080 running in the host should trigger + # source-NAT in the post-routing, i.e. no need to route microservices from the host: + host-term3$ nc 192.168.13.10 8080 # host-term3 = microservice-client +*/ + +func main() { + // Set inter-dependency between VPP & Linux plugins + vpp_ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + vpp_ifplugin.DefaultPlugin.NsPlugin = &linux_nsplugin.DefaultPlugin + linux_ifplugin.DefaultPlugin.VppIfPlugin = &vpp_ifplugin.DefaultPlugin + + ep := &ExamplePlugin{ + Orchestrator: &orchestrator.DefaultPlugin, + LinuxIfPlugin: &linux_ifplugin.DefaultPlugin, + LinuxL3Plugin: &linux_l3plugin.DefaultPlugin, + VPPIfPlugin: &vpp_ifplugin.DefaultPlugin, + VPPNATPlugin: &vpp_natplugin.DefaultPlugin, + } + + a := agent.NewAgent( + agent.AllPlugins(ep), + ) + if err := a.Run(); err != nil { + log.Fatal(err) + } +} + +// ExamplePlugin is the main plugin which +// handles resync and changes in this example. +type ExamplePlugin struct { + LinuxIfPlugin *linux_ifplugin.IfPlugin + LinuxL3Plugin *linux_l3plugin.L3Plugin + VPPIfPlugin *vpp_ifplugin.IfPlugin + VPPNATPlugin *vpp_natplugin.NATPlugin + Orchestrator *orchestrator.Plugin +} + +// String returns plugin name +func (p *ExamplePlugin) String() string { + return "vpp-nat-example" +} + +// Init handles initialization phase. +func (p *ExamplePlugin) Init() error { + return nil +} + +// AfterInit handles phase after initialization. +func (p *ExamplePlugin) AfterInit() error { + go testLocalClientWithScheduler() + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + return nil +} + +func testLocalClientWithScheduler() { + // initial resync + time.Sleep(time.Second * 2) + fmt.Println("=== RESYNC ===") + + txn := localclient.DataResyncRequest("example") + err := txn. + LinuxInterface(hostLinuxTap). + LinuxInterface(clientLinuxTap). + LinuxInterface(server1LinuxTap). + LinuxInterface(server2LinuxTap). + LinuxRoute(hostRouteToServices). + LinuxRoute(clientRouteToServices). + LinuxRoute(clientRouteToHost). + LinuxRoute(server1RouteToServices). + LinuxRoute(server1RouteToHost). + LinuxRoute(server1RouteToClient). + LinuxRoute(server2RouteToServices). + LinuxRoute(server2RouteToHost). + LinuxRoute(server2RouteToClient). + VppInterface(hostVPPTap). + VppInterface(clientVPPTap). + VppInterface(server1VPPTap). + VppInterface(server2VPPTap). + NAT44Global(natGlobal). + DNAT44(tcpServiceDNAT). + DNAT44(udpServiceDNAT). + DNAT44(idDNAT). + DNAT44(externalIfaceDNAT). + DNAT44(emptyDNAT). + DNAT44(addrFromPoolDNAT). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // data change + time.Sleep(time.Second * 10) + fmt.Println("=== CHANGE ===") + + txn2 := localclient.DataChangeRequest("example") + err = txn2.Put(). + Delete(). + NAT44Global(). + DNAT44(udpServiceDNAT.Label). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + +} + +const ( + mycroserviceClient = "microservice-client" + microserviceClientNetPrefix = "10.11.1." + mycroserviceServer1 = "microservice-server1" + microserviceServer1NetPrefix = "10.11.2." + mycroserviceServer2 = "microservice-server2" + microserviceServer2NetPrefix = "10.11.3." + microserviceNetMask = "/30" + + hostNetPrefix = "192.168.13." + hostNetMask = "/24" + + vppTapHostLogicalName = "vpp-tap-host" + vppTapHostIPAddr = hostNetPrefix + "10" + vppTapHostVersion = 2 + + vppTapClientLogicalName = "vpp-tap-client" + vppTapClientIPAddr = microserviceClientNetPrefix + "1" + vppTapClientVersion = 2 + + vppTapServer1LogicalName = "vpp-tap-server1" + vppTapServer1IPAddr = microserviceServer1NetPrefix + "1" + vppTapServer1Version = 1 + + vppTapServer2LogicalName = "vpp-tap-server2" + vppTapServer2IPAddr = microserviceServer2NetPrefix + "1" + vppTapServer2Version = 1 + + linuxTapHostLogicalName = "linux-tap-host" + linuxTapHostIPAddr = hostNetPrefix + "20" + + linuxTapClientLogicalName = "linux-tap-client" + linuxTapClientIPAddr = microserviceClientNetPrefix + "2" + + linuxTapServer1LogicalName = "linux-tap-server1" + linuxTapServer1IPAddr = microserviceServer1NetPrefix + "2" + + linuxTapServer2LogicalName = "linux-tap-server2" + linuxTapServer2IPAddr = microserviceServer2NetPrefix + "2" + + linuxTapHostName = "tap_to_vpp" + + serviceNetPrefix = "10.36.10." + serviceNetMask = "/24" + + tcpServiceLabel = "tcp-service" + tcpServiceExternalIP1 = serviceNetPrefix + "1" + tcpServiceExternalIP2 = serviceNetPrefix + "2" + tcpServiceExternalIP3 = serviceNetPrefix + "3" + tcpServiceExternalPort = 80 + tcpServiceLocalPortServer1 = 8080 + tcpServiceLocalPortServer2 = 8081 + + udpServiceLabel = "udp-service" + udpServiceExternalIP1 = serviceNetPrefix + "10" + udpServiceExternalIP2 = serviceNetPrefix + "11" + udpServiceExternalIP3 = serviceNetPrefix + "12" + udpServiceExternalPort = 90 + udpServiceLocalPortServer1 = 9090 + udpServiceLocalPortServer2 = 9091 + + idDNATLabel = "id-dnat" + idDNATPort = 7777 + + extIfaceDNATLabel = "external-interfaces" + extIfaceDNATExternalPort = 3333 + extIfaceDNATLocalPort = 4444 + + addrFromPoolDNATLabel = "external-address-from-pool" + addrFromPoolDNATPort = 6000 + + emptyDNATLabel = "empty-dnat" + + natPoolAddr1 = hostNetPrefix + "100" + natPoolAddr2 = hostNetPrefix + "200" + natPoolAddr3 = hostNetPrefix + "250" +) + +var ( + /* host <-> VPP */ + + hostLinuxTap = &linux_interfaces.Interface{ + Name: linuxTapHostLogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + IpAddresses: []string{ + linuxTapHostIPAddr + hostNetMask, + }, + HostIfName: linuxTapHostName, + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapHostLogicalName, + }, + }, + } + hostVPPTap = &vpp_interfaces.Interface{ + Name: vppTapHostLogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + IpAddresses: []string{ + vppTapHostIPAddr + hostNetMask, + }, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: vppTapHostVersion, + }, + }, + } + hostRouteToServices = &linux_l3.Route{ + OutgoingInterface: linuxTapHostLogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: serviceNetPrefix + "0" + serviceNetMask, + GwAddr: vppTapHostIPAddr, + } + + /* microservice-client <-> VPP */ + + clientLinuxTap = &linux_interfaces.Interface{ + Name: linuxTapClientLogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + IpAddresses: []string{ + linuxTapClientIPAddr + microserviceNetMask, + }, + HostIfName: linuxTapHostName, + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapClientLogicalName, + }, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroserviceClient, + }, + } + clientVPPTap = &vpp_interfaces.Interface{ + Name: vppTapClientLogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + IpAddresses: []string{ + vppTapClientIPAddr + microserviceNetMask, + }, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: vppTapClientVersion, + ToMicroservice: mycroserviceClient, + }, + }, + } + clientRouteToServices = &linux_l3.Route{ + OutgoingInterface: linuxTapClientLogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: serviceNetPrefix + "0" + serviceNetMask, + GwAddr: vppTapClientIPAddr, + } + clientRouteToHost = &linux_l3.Route{ + OutgoingInterface: linuxTapClientLogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: hostNetPrefix + "0" + hostNetMask, + GwAddr: vppTapClientIPAddr, + } + + /* microservice-server1 <-> VPP */ + + server1LinuxTap = &linux_interfaces.Interface{ + Name: linuxTapServer1LogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + IpAddresses: []string{ + linuxTapServer1IPAddr + microserviceNetMask, + }, + HostIfName: linuxTapHostName, + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapServer1LogicalName, + }, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroserviceServer1, + }, + } + server1VPPTap = &vpp_interfaces.Interface{ + Name: vppTapServer1LogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + IpAddresses: []string{ + vppTapServer1IPAddr + microserviceNetMask, + }, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: vppTapServer1Version, + ToMicroservice: mycroserviceServer1, + }, + }, + } + server1RouteToServices = &linux_l3.Route{ + OutgoingInterface: linuxTapServer1LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: serviceNetPrefix + "0" + serviceNetMask, + GwAddr: vppTapServer1IPAddr, + } + server1RouteToHost = &linux_l3.Route{ + OutgoingInterface: linuxTapServer1LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: hostNetPrefix + "0" + hostNetMask, + GwAddr: vppTapServer1IPAddr, + } + server1RouteToClient = &linux_l3.Route{ + OutgoingInterface: linuxTapServer1LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: linuxTapClientIPAddr + "/32", + GwAddr: vppTapServer1IPAddr, + } + + /* microservice-server2 <-> VPP */ + server2LinuxTap = &linux_interfaces.Interface{ + Name: linuxTapServer2LogicalName, + Type: linux_interfaces.Interface_TAP_TO_VPP, + Enabled: true, + IpAddresses: []string{ + linuxTapServer2IPAddr + microserviceNetMask, + }, + HostIfName: linuxTapHostName, + Link: &linux_interfaces.Interface_Tap{ + Tap: &linux_interfaces.TapLink{ + VppTapIfName: vppTapServer2LogicalName, + }, + }, + Namespace: &linux_ns.NetNamespace{ + Type: linux_ns.NetNamespace_MICROSERVICE, + Reference: mycroserviceServer2, + }, + } + server2VPPTap = &vpp_interfaces.Interface{ + Name: vppTapServer2LogicalName, + Type: vpp_interfaces.Interface_TAP, + Enabled: true, + IpAddresses: []string{ + vppTapServer2IPAddr + microserviceNetMask, + }, + Link: &vpp_interfaces.Interface_Tap{ + Tap: &vpp_interfaces.TapLink{ + Version: vppTapServer2Version, + ToMicroservice: mycroserviceServer2, + }, + }, + } + server2RouteToServices = &linux_l3.Route{ + OutgoingInterface: linuxTapServer2LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: serviceNetPrefix + "0" + serviceNetMask, + GwAddr: vppTapServer2IPAddr, + } + server2RouteToHost = &linux_l3.Route{ + OutgoingInterface: linuxTapServer2LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: hostNetPrefix + "0" + hostNetMask, + GwAddr: vppTapServer2IPAddr, + } + server2RouteToClient = &linux_l3.Route{ + OutgoingInterface: linuxTapServer2LogicalName, + Scope: linux_l3.Route_GLOBAL, + DstNetwork: linuxTapClientIPAddr + "/32", + GwAddr: vppTapServer2IPAddr, + } + + /* NAT44 global config */ + + natGlobal = &vpp_nat.Nat44Global{ + Forwarding: true, + VirtualReassembly: &vpp_nat.VirtualReassembly{ + Timeout: 4, + MaxReassemblies: 2048, + MaxFragments: 10, + DropFragments: true, + }, + NatInterfaces: []*vpp_nat.Nat44Global_Interface{ + { + Name: vppTapHostLogicalName, + IsInside: false, + OutputFeature: true, + }, + { + Name: vppTapClientLogicalName, + IsInside: false, + OutputFeature: false, + }, + { + Name: vppTapClientLogicalName, + IsInside: true, // just to test in & out together + OutputFeature: false, + }, + { + Name: vppTapServer1LogicalName, + IsInside: true, + OutputFeature: false, + }, + { + Name: vppTapServer2LogicalName, + IsInside: true, + OutputFeature: false, + }, + }, + AddressPool: []*vpp_nat.Nat44Global_Address{ + { + Address: natPoolAddr1, + }, + { + Address: natPoolAddr2, + }, + { + Address: natPoolAddr3, + TwiceNat: true, + }, + }, + } + + /* TCP service */ + + tcpServiceDNAT = &vpp_nat.DNat44{ + Label: tcpServiceLabel, + StMappings: []*vpp_nat.DNat44_StaticMapping{ + { + ExternalIp: tcpServiceExternalIP1, // with LB + ExternalPort: tcpServiceExternalPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: tcpServiceLocalPortServer1, + Probability: 1, + }, + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: tcpServiceLocalPortServer2, + Probability: 2, /* twice more likely */ + }, + }, + }, + { + ExternalIp: tcpServiceExternalIP2, // server 1 only + ExternalPort: tcpServiceExternalPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: tcpServiceLocalPortServer1, + }, + }, + }, + { + ExternalIp: tcpServiceExternalIP3, // server 2 only + ExternalPort: tcpServiceExternalPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: tcpServiceLocalPortServer2, + }, + }, + }, + }, + } + + /* UDP service */ + + udpServiceDNAT = &vpp_nat.DNat44{ + Label: udpServiceLabel, + StMappings: []*vpp_nat.DNat44_StaticMapping{ + { + ExternalIp: udpServiceExternalIP1, // with LB + ExternalPort: udpServiceExternalPort, + Protocol: vpp_nat.DNat44_UDP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: udpServiceLocalPortServer1, + Probability: 1, + }, + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: udpServiceLocalPortServer2, + Probability: 2, /* twice more likely */ + }, + }, + }, + { + ExternalIp: udpServiceExternalIP2, // server 1 only + ExternalPort: udpServiceExternalPort, + Protocol: vpp_nat.DNat44_UDP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: udpServiceLocalPortServer1, + }, + }, + }, + { + ExternalIp: udpServiceExternalIP3, // server 2 only + ExternalPort: udpServiceExternalPort, + Protocol: vpp_nat.DNat44_UDP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: udpServiceLocalPortServer2, + }, + }, + }, + }, + } + + /* identity mapping */ + + idDNAT = &vpp_nat.DNat44{ + Label: idDNATLabel, + IdMappings: []*vpp_nat.DNat44_IdentityMapping{ + { + Interface: vppTapClientLogicalName, + Port: idDNATPort, + Protocol: vpp_nat.DNat44_TCP, + }, + { + IpAddress: natPoolAddr2, + Port: idDNATPort, + Protocol: vpp_nat.DNat44_TCP, + }, + }, + } + + /* DNAT with external interfaces */ + + externalIfaceDNAT = &vpp_nat.DNat44{ + Label: extIfaceDNATLabel, + StMappings: []*vpp_nat.DNat44_StaticMapping{ + { + ExternalInterface: vppTapServer1LogicalName, + ExternalPort: extIfaceDNATExternalPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: extIfaceDNATLocalPort, + }, + }, + }, + { + ExternalInterface: vppTapServer2LogicalName, + ExternalPort: extIfaceDNATExternalPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: extIfaceDNATLocalPort, + }, + }, + }, + }, + } + + /* empty DNAT */ + emptyDNAT = &vpp_nat.DNat44{ + Label: emptyDNATLabel, + } + + /* DNAT with address from the pool */ + + addrFromPoolDNAT = &vpp_nat.DNat44{ + Label: addrFromPoolDNATLabel, + StMappings: []*vpp_nat.DNat44_StaticMapping{ + // Without LB + { + ExternalIp: natPoolAddr1, + ExternalPort: addrFromPoolDNATPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: addrFromPoolDNATPort, + }, + }, + }, + // With LB + { + ExternalIp: natPoolAddr2, + ExternalPort: addrFromPoolDNATPort, + Protocol: vpp_nat.DNat44_TCP, + LocalIps: []*vpp_nat.DNat44_StaticMapping_LocalIP{ + { + LocalIp: linuxTapServer1IPAddr, + LocalPort: addrFromPoolDNATPort, + }, + { + LocalIp: linuxTapServer2IPAddr, + LocalPort: addrFromPoolDNATPort, + }, + }, + }, + }, + } +) diff --git a/examples/kvscheduler/vpp-l3/main.go b/examples/kvscheduler/vpp-l3/main.go new file mode 100644 index 0000000000..88344d672c --- /dev/null +++ b/examples/kvscheduler/vpp-l3/main.go @@ -0,0 +1,188 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "log" + "time" + + "github.com/ligato/cn-infra/agent" + + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + "github.com/ligato/vpp-agent/clientv2/linux/localclient" + "github.com/ligato/vpp-agent/plugins/orchestrator" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + "github.com/ligato/vpp-agent/plugins/vpp/l3plugin" +) + +/* + This example demonstrates example with VPP L3Plugin using KVScheduler. +*/ + +func main() { + ep := &ExamplePlugin{ + Orchestrator: &orchestrator.DefaultPlugin, + VPPIfPlugin: &ifplugin.DefaultPlugin, + VPPL3Plugin: &l3plugin.DefaultPlugin, + } + + a := agent.NewAgent( + agent.AllPlugins(ep), + ) + if err := a.Run(); err != nil { + log.Fatal(err) + } +} + +// ExamplePlugin is the main plugin which +// handles resync and changes in this example. +type ExamplePlugin struct { + VPPIfPlugin *ifplugin.IfPlugin + VPPL3Plugin *l3plugin.L3Plugin + Orchestrator *orchestrator.Plugin +} + +// String returns plugin name +func (p *ExamplePlugin) String() string { + return "vpp-l3-example" +} + +// Init handles initialization phase. +func (p *ExamplePlugin) Init() error { + return nil +} + +// AfterInit handles phase after initialization. +func (p *ExamplePlugin) AfterInit() error { + go testLocalClientWithScheduler() + return nil +} + +// Close cleans up the resources. +func (p *ExamplePlugin) Close() error { + return nil +} + +func testLocalClientWithScheduler() { + // initial resync + time.Sleep(time.Second * 2) + fmt.Println("=== RESYNC ===") + + txn := localclient.DataResyncRequest("example") + err := txn. + VppInterface(memif0). + VppInterface(memif0_10). + StaticRoute(route0). + StaticRoute(route1). + Arp(arp0). + ProxyArp(proxyArp). + IPScanNeighbor(ipScanNeighbor). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } + + // data change + time.Sleep(time.Second * 10) + fmt.Println("=== CHANGE ===") + + route0.OutgoingInterface = "" + arp0.PhysAddress = "22:22:22:22:22:22" + proxyArp.Ranges = append(proxyArp.Ranges, &l3.ProxyARP_Range{ + FirstIpAddr: "10.10.2.1", LastIpAddr: "10.10.2.255", + }) + proxyArp.Interfaces = nil + + txn2 := localclient.DataChangeRequest("example") + err = txn2. + Put(). + VppInterface(memif0_10). + StaticRoute(route0). + Delete(). + VppInterface(memif0.Name). + StaticRoute(route1.VrfId, route1.DstNetwork, route1.NextHopAddr). + Put(). + Arp(arp0). + ProxyArp(proxyArp). + Send().ReceiveReply() + if err != nil { + fmt.Println(err) + return + } +} + +var ( + memif0 = &interfaces.Interface{ + Name: "memif0", + Enabled: true, + Type: interfaces.Interface_MEMIF, + IpAddresses: []string{"3.3.0.1/16"}, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: true, + Secret: "secret", + SocketFilename: "/tmp/memif1.sock", + }, + }, + } + memif0_10 = &interfaces.Interface{ + Name: "memif0/10", + Enabled: true, + Type: interfaces.Interface_SUB_INTERFACE, + IpAddresses: []string{"3.10.0.10/32"}, + Link: &interfaces.Interface_Sub{ + Sub: &interfaces.SubInterface{ + ParentName: "memif0", + SubId: 10, + }, + }, + } + route0 = &l3.Route{ + DstNetwork: "10.10.1.0/24", + OutgoingInterface: "memif0", + Weight: 200, + } + route1 = &l3.Route{ + DstNetwork: "2001:DB8::0001/32", + OutgoingInterface: "memif0", + Weight: 100, + } + arp0 = &l3.ARPEntry{ + Interface: "memif0", + PhysAddress: "33:33:33:33:33:33", + IpAddress: "3.3.3.3", + Static: true, + } + proxyArp = &l3.ProxyARP{ + Ranges: []*l3.ProxyARP_Range{ + {FirstIpAddr: "10.10.1.1", LastIpAddr: "10.10.1.255"}, + }, + Interfaces: []*l3.ProxyARP_Interface{ + {Name: "memif0"}, + }, + } + ipScanNeighbor = &l3.IPScanNeighbor{ + Mode: l3.IPScanNeighbor_IPv4, + ScanInterval: 1, + ScanIntDelay: 1, + MaxProcTime: 20, + MaxUpdate: 0, + StaleThreshold: 4, + } +) diff --git a/examples/localclient_linux/tap/main.go b/examples/localclient_linux/tap/main.go index 2104d5baf8..afba04c1b1 100644 --- a/examples/localclient_linux/tap/main.go +++ b/examples/localclient_linux/tap/main.go @@ -21,16 +21,18 @@ import ( "time" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/clientv1/linux/localclient" - "github.com/ligato/vpp-agent/plugins/linux" - linux_intf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp" - vpp_intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - vpp_l2 "github.com/ligato/vpp-agent/plugins/vpp/model/l2" + linux_intf "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/namespace" + vpp_intf "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + vpp_l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + "github.com/ligato/vpp-agent/clientv2/linux/localclient" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linux_nsplugin "github.com/ligato/vpp-agent/plugins/linux/nsplugin" + "github.com/ligato/vpp-agent/plugins/orchestrator" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" "github.com/namsral/flag" ) @@ -65,7 +67,7 @@ var ( // Modify sets IP address for tap1, moves linux host to namespace ns1 and configures second TAP interface with linux // host in namespace ns2 /************************************************ - * Initial Data * + * Modified Data * * * * +----------------------------------------+ * * | +-- Bridge Domain --+ | * @@ -90,31 +92,21 @@ var ( // Start Agent plugins selected for this example. func main() { - //Init close channel to stop the example. + // Set inter-dependency between VPP & Linux plugins + vpp_ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + vpp_ifplugin.DefaultPlugin.NsPlugin = &linux_nsplugin.DefaultPlugin + linux_ifplugin.DefaultPlugin.VppIfPlugin = &vpp_ifplugin.DefaultPlugin + + // Init close channel to stop the example. exampleFinished := make(chan struct{}) - // Prepare all the dependencies for example plugin - watcher := datasync.KVProtoWatchers{ - local.Get(), - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) - linuxPlugin := linux.NewPlugin(linux.UseDeps(func(deps *linux.Deps) { - deps.VPP = vppPlugin - deps.Watcher = watcher - })) - vppPlugin.Deps.Linux = linuxPlugin - - var watchEventsMutex sync.Mutex - vppPlugin.Deps.WatchEventsMutex = &watchEventsMutex - linuxPlugin.Deps.WatchEventsMutex = &watchEventsMutex // Inject dependencies to example plugin ep := &TapExamplePlugin{ - Log: logging.DefaultLogger, + Log: logging.DefaultLogger, + VPP: app.DefaultVPP(), + Linux: app.DefaultLinux(), + Orchestrator: &orchestrator.DefaultPlugin, } - ep.Deps.VPP = vppPlugin - ep.Deps.Linux = linuxPlugin // Start Agent a := agent.NewAgent( @@ -122,7 +114,7 @@ func main() { agent.QuitOnClose(exampleFinished), ) if err := a.Run(); err != nil { - log.Fatal() + log.Fatal(err) } go closeExample("localhost example finished", exampleFinished) @@ -140,80 +132,81 @@ func closeExample(message string, exampleFinished chan struct{}) { // TapExamplePlugin uses localclient to transport example tap and its linux end // configuration to linuxplugin or VPP plugins type TapExamplePlugin struct { - Deps + Log logging.Logger + app.VPP + app.Linux + Orchestrator *orchestrator.Plugin - Log logging.Logger wg sync.WaitGroup cancel context.CancelFunc } -// Deps is example plugin dependencies. Keep order of fields. -type Deps struct { - VPP *vpp.Plugin - Linux *linux.Plugin -} - // PluginName represents name of plugin. const PluginName = "tap-example" // Init initializes example plugin. -func (plugin *TapExamplePlugin) Init() error { +func (p *TapExamplePlugin) Init() error { // Logger - plugin.Log = logrus.DefaultLogger() - plugin.Log.SetLevel(logging.DebugLevel) - plugin.Log.Info("Initializing Tap example") + p.Log = logrus.DefaultLogger() + p.Log.SetLevel(logging.DebugLevel) + p.Log.Info("Initializing Tap example") // Flags flag.Parse() - plugin.Log.Infof("Timeout between create and modify set to %d", *timeout) + p.Log.Infof("Timeout between create and modify set to %d", *timeout) + + p.Log.Info("Tap example initialization done") + return nil +} +// AfterInit initializes example plugin. +func (p *TapExamplePlugin) AfterInit() error { // Apply initial Linux/VPP configuration. - plugin.putInitialData() + p.putInitialData() // Schedule reconfiguration. var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - plugin.wg.Add(1) - go plugin.putModifiedData(ctx, *timeout) + ctx, p.cancel = context.WithCancel(context.Background()) + p.wg.Add(1) + go p.putModifiedData(ctx, *timeout) - plugin.Log.Info("Tap example initialization done") return nil } // Close cleans up the resources. -func (plugin *TapExamplePlugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *TapExamplePlugin) Close() error { + p.cancel() + p.wg.Wait() - plugin.Log.Info("Closed Tap plugin") + p.Log.Info("Closed Tap plugin") return nil } // String returns plugin name -func (plugin *TapExamplePlugin) String() string { +func (p *TapExamplePlugin) String() string { return PluginName } // Configure initial data -func (plugin *TapExamplePlugin) putInitialData() { - plugin.Log.Infof("Applying initial configuration") +func (p *TapExamplePlugin) putInitialData() { + p.Log.Infof("Applying initial configuration") err := localclient.DataResyncRequest(PluginName). VppInterface(initialTap1()). LinuxInterface(initialLinuxTap1()). BD(bridgeDomain()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("Initial configuration failed: %v", err) + p.Log.Errorf("Initial configuration failed: %v", err) } else { - plugin.Log.Info("Initial configuration successful") + p.Log.Info("Initial configuration successful") } } // Configure modified data -func (plugin *TapExamplePlugin) putModifiedData(ctx context.Context, timeout int) { +func (p *TapExamplePlugin) putModifiedData(ctx context.Context, timeout int) { select { case <-time.After(time.Duration(timeout) * time.Second): - plugin.Log.Infof("Applying modified configuration") + p.Log.Infof("Applying modified configuration") // Simulate configuration change after timeout err := localclient.DataChangeRequest(PluginName). Put(). @@ -223,112 +216,134 @@ func (plugin *TapExamplePlugin) putModifiedData(ctx context.Context, timeout int LinuxInterface(linuxTap2()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("Modified configuration failed: %v", err) + p.Log.Errorf("Modified configuration failed: %v", err) } else { - plugin.Log.Info("Modified configuration successful") + p.Log.Info("Modified configuration successful") } case <-ctx.Done(): // Cancel the scheduled re-configuration. - plugin.Log.Info("Modification of configuration canceled") + p.Log.Info("Modification of configuration canceled") } - plugin.wg.Done() + p.wg.Done() } /* Example Data */ -func initialTap1() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func initialTap1() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "tap1", - Type: vpp_intf.InterfaceType_TAP_INTERFACE, + Type: vpp_intf.Interface_TAP, Enabled: true, - Tap: &vpp_intf.Interfaces_Interface_Tap{ - HostIfName: "linux-tap1", + Link: &vpp_intf.Interface_Tap{ + Tap: &vpp_intf.TapLink{ + Version: 2, + }, }, } } -func modifiedTap1() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func modifiedTap1() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "tap1", - Type: vpp_intf.InterfaceType_TAP_INTERFACE, + Type: vpp_intf.Interface_TAP, Enabled: true, PhysAddress: "12:E4:0E:D5:BC:DC", IpAddresses: []string{ "10.0.0.11/24", }, - Tap: &vpp_intf.Interfaces_Interface_Tap{ - HostIfName: "linux-tap1", + Link: &vpp_intf.Interface_Tap{ + Tap: &vpp_intf.TapLink{ + Version: 2, + }, }, } } -func tap2() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func tap2() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "tap2", - Type: vpp_intf.InterfaceType_TAP_INTERFACE, + Type: vpp_intf.Interface_TAP, Enabled: true, PhysAddress: "D5:BC:DC:12:E4:0E", IpAddresses: []string{ "20.0.0.11/24", }, - Tap: &vpp_intf.Interfaces_Interface_Tap{ - HostIfName: "linux-tap2", + Link: &vpp_intf.Interface_Tap{ + Tap: &vpp_intf.TapLink{ + Version: 2, + }, }, } } -func initialLinuxTap1() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ - +func initialLinuxTap1() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "linux-tap1", - Type: linux_intf.LinuxInterfaces_AUTO_TAP, + Type: linux_intf.Interface_TAP_TO_VPP, Enabled: true, - PhysAddress: "BC:FE:E9:5E:07:04", - Mtu: 1500, + PhysAddress: "88:88:88:88:88:88", IpAddresses: []string{ - "10.0.0.12/24", + "10.0.0.2/24", + }, + HostIfName: "tap_to_vpp1", + Link: &linux_intf.Interface_Tap{ + Tap: &linux_intf.TapLink{ + VppTapIfName: "tap1", + }, }, } } -func modifiedLinuxTap1() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func modifiedLinuxTap1() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "linux-tap1", - Type: linux_intf.LinuxInterfaces_AUTO_TAP, + Type: linux_intf.Interface_TAP_TO_VPP, Enabled: true, PhysAddress: "BC:FE:E9:5E:07:04", - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Name: "ns1", - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns1", + Type: linux_namespace.NetNamespace_NSID, }, Mtu: 1500, IpAddresses: []string{ "10.0.0.12/24", }, + HostIfName: "tap_to_vpp1", + Link: &linux_intf.Interface_Tap{ + Tap: &linux_intf.TapLink{ + VppTapIfName: "tap1", + }, + }, } } -func linuxTap2() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func linuxTap2() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "linux-tap2", - Type: linux_intf.LinuxInterfaces_AUTO_TAP, + Type: linux_intf.Interface_TAP_TO_VPP, Enabled: true, PhysAddress: "5E:07:04:BC:FE:E9", - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Name: "ns2", - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns2", + Type: linux_namespace.NetNamespace_NSID, }, Mtu: 1500, IpAddresses: []string{ "20.0.0.12/24", }, + HostIfName: "tap_to_vpp2", + Link: &linux_intf.Interface_Tap{ + Tap: &linux_intf.TapLink{ + VppTapIfName: "tap2", + }, + }, } } -func bridgeDomain() *vpp_l2.BridgeDomains_BridgeDomain { - return &vpp_l2.BridgeDomains_BridgeDomain{ +func bridgeDomain() *vpp_l2.BridgeDomain { + return &vpp_l2.BridgeDomain{ Name: "br1", Flood: true, UnknownUnicastFlood: true, @@ -336,7 +351,7 @@ func bridgeDomain() *vpp_l2.BridgeDomains_BridgeDomain { Learn: true, ArpTermination: false, MacAge: 0, /* means disable aging */ - Interfaces: []*vpp_l2.BridgeDomains_BridgeDomain_Interfaces{ + Interfaces: []*vpp_l2.BridgeDomain_Interface{ { Name: "tap1", BridgedVirtualInterface: false, diff --git a/examples/localclient_linux/veth/main.go b/examples/localclient_linux/veth/main.go index ad0ea83fc0..f1d4161ff9 100644 --- a/examples/localclient_linux/veth/main.go +++ b/examples/localclient_linux/veth/main.go @@ -22,16 +22,18 @@ import ( "log" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - localclient2 "github.com/ligato/vpp-agent/clientv1/linux/localclient" - "github.com/ligato/vpp-agent/plugins/linux" - linux_intf "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp" - vpp_intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - vpp_l2 "github.com/ligato/vpp-agent/plugins/vpp/model/l2" + linux_intf "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/api/models/linux/namespace" + vpp_intf "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + vpp_l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + localclient2 "github.com/ligato/vpp-agent/clientv2/linux/localclient" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" + linux_ifplugin "github.com/ligato/vpp-agent/plugins/linux/ifplugin" + linux_nsplugin "github.com/ligato/vpp-agent/plugins/linux/nsplugin" + "github.com/ligato/vpp-agent/plugins/orchestrator" + vpp_ifplugin "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" "github.com/namsral/flag" ) @@ -103,31 +105,21 @@ const PluginName = "veth-example" // Start Agent plugins selected for this example. func main() { - //Init close channel to stop the example. - exampleFinished := make(chan struct{}, 1) - // Prepare all the dependencies for example plugin - watcher := datasync.KVProtoWatchers{ - local.Get(), - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) - linuxPlugin := linux.NewPlugin(linux.UseDeps(func(deps *linux.Deps) { - deps.VPP = vppPlugin - deps.Watcher = watcher - })) - vppPlugin.Deps.Linux = linuxPlugin - - var watchEventsMutex sync.Mutex - vppPlugin.Deps.WatchEventsMutex = &watchEventsMutex - linuxPlugin.Deps.WatchEventsMutex = &watchEventsMutex + // Set inter-dependency between VPP & Linux plugins + vpp_ifplugin.DefaultPlugin.LinuxIfPlugin = &linux_ifplugin.DefaultPlugin + vpp_ifplugin.DefaultPlugin.NsPlugin = &linux_nsplugin.DefaultPlugin + linux_ifplugin.DefaultPlugin.VppIfPlugin = &vpp_ifplugin.DefaultPlugin + + // Init close channel to stop the example. + exampleFinished := make(chan struct{}) // Inject dependencies to example plugin ep := &VethExamplePlugin{ - Log: logging.DefaultLogger, + Log: logging.DefaultLogger, + VPP: app.DefaultVPP(), + Linux: app.DefaultLinux(), + Orchestrator: &orchestrator.DefaultPlugin, } - ep.Deps.VPP = vppPlugin - ep.Deps.Linux = linuxPlugin // Start Agent a := agent.NewAgent( @@ -153,60 +145,61 @@ func closeExample(message string, exampleFinished chan struct{}) { // VethExamplePlugin uses localclient to transport example veth and af-packet // configuration to linuxplugin, eventually VPP plugins type VethExamplePlugin struct { - Deps + Log logging.Logger + app.VPP + app.Linux + Orchestrator *orchestrator.Plugin - Log logging.Logger wg sync.WaitGroup cancel context.CancelFunc } -// Deps is example plugin dependencies. Keep order of fields. -type Deps struct { - VPP *vpp.Plugin - Linux *linux.Plugin -} - // String returns plugin name -func (plugin *VethExamplePlugin) String() string { +func (p *VethExamplePlugin) String() string { return PluginName } // Init initializes example plugin. -func (plugin *VethExamplePlugin) Init() error { +func (p *VethExamplePlugin) Init() error { // Logger - plugin.Log = logrus.DefaultLogger() - plugin.Log.SetLevel(logging.DebugLevel) - plugin.Log.Info("Initializing Veth example") + p.Log = logrus.DefaultLogger() + p.Log.SetLevel(logging.DebugLevel) + p.Log.Info("Initializing Veth example") // Flags flag.Parse() - plugin.Log.Infof("Timeout between create and modify set to %d", *timeout) + p.Log.Infof("Timeout between create and modify set to %d", *timeout) + p.Log.Info("Veth example initialization done") + return nil +} + +// AfterInit initializes example plugin. +func (p *VethExamplePlugin) AfterInit() error { // Apply initial Linux/VPP configuration. - plugin.putInitialData() + p.putInitialData() // Schedule reconfiguration. var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - plugin.wg.Add(1) - go plugin.putModifiedData(ctx, *timeout) + ctx, p.cancel = context.WithCancel(context.Background()) + p.wg.Add(1) + go p.putModifiedData(ctx, *timeout) - plugin.Log.Info("Veth example initialization done") return nil } // Close cleans up the resources. -func (plugin *VethExamplePlugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *VethExamplePlugin) Close() error { + p.cancel() + p.wg.Wait() - plugin.Log.Info("Closed Veth plugin") + p.Log.Info("Closed Veth plugin") return nil } // Configure initial data -func (plugin *VethExamplePlugin) putInitialData() { - plugin.Log.Infof("Applying initial configuration") +func (p *VethExamplePlugin) putInitialData() { + p.Log.Infof("Applying initial configuration") err := localclient2.DataResyncRequest(PluginName). LinuxInterface(initialVeth11()). LinuxInterface(initialVeth12()). @@ -214,17 +207,17 @@ func (plugin *VethExamplePlugin) putInitialData() { BD(bridgeDomain()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("Initial configuration failed: %v", err) + p.Log.Errorf("Initial configuration failed: %v", err) } else { - plugin.Log.Info("Initial configuration successful") + p.Log.Info("Initial configuration successful") } } // Configure modified data -func (plugin *VethExamplePlugin) putModifiedData(ctx context.Context, timeout int) { +func (p *VethExamplePlugin) putModifiedData(ctx context.Context, timeout int) { select { case <-time.After(time.Duration(timeout) * time.Second): - plugin.Log.Infof("Applying modified configuration") + p.Log.Infof("Applying modified configuration") // Simulate configuration change after timeout err := localclient2.DataChangeRequest(PluginName). Put(). @@ -235,122 +228,126 @@ func (plugin *VethExamplePlugin) putModifiedData(ctx context.Context, timeout in VppInterface(afPacket2()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("Modified configuration failed: %v", err) + p.Log.Errorf("Modified configuration failed: %v", err) } else { - plugin.Log.Info("Modified configuration successful") + p.Log.Info("Modified configuration successful") } case <-ctx.Done(): // Cancel the scheduled re-configuration. - plugin.Log.Info("Modification of configuration canceled") + p.Log.Info("Modification of configuration canceled") } - plugin.wg.Done() + p.wg.Done() } /* Example Data */ -func initialVeth11() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func initialVeth11() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth11", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth12", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth12"}, }, } } -func modifiedVeth11() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func modifiedVeth11() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth11", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth12", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth12"}, }, Mtu: 1000, } } -func initialVeth12() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func initialVeth12() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth12", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth11", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth11"}, }, } } -func modifiedVeth12() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func modifiedVeth12() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth12", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth11", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth11"}, }, IpAddresses: []string{"10.0.0.1/24"}, PhysAddress: "D2:74:8C:12:67:D2", - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - Name: "ns1", + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns1", + Type: linux_namespace.NetNamespace_NSID, }, } } -func veth21() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func veth21() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth21", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth22", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth22"}, }, } } -func veth22() *linux_intf.LinuxInterfaces_Interface { - return &linux_intf.LinuxInterfaces_Interface{ +func veth22() *linux_intf.Interface { + return &linux_intf.Interface{ Name: "veth22", - Type: linux_intf.LinuxInterfaces_VETH, + Type: linux_intf.Interface_VETH, Enabled: true, - Veth: &linux_intf.LinuxInterfaces_Interface_Veth{ - PeerIfName: "veth21", + Link: &linux_intf.Interface_Veth{ + Veth: &linux_intf.VethLink{PeerIfName: "veth21"}, }, IpAddresses: []string{"10.0.0.2/24"}, PhysAddress: "92:C7:42:67:AB:CD", - Namespace: &linux_intf.LinuxInterfaces_Interface_Namespace{ - Type: linux_intf.LinuxInterfaces_Interface_Namespace_NAMED_NS, - Name: "ns2", + Namespace: &linux_namespace.NetNamespace{ + Reference: "ns2", + Type: linux_namespace.NetNamespace_NSID, }, } } -func afPacket1() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func afPacket1() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "afpacket1", - Type: vpp_intf.InterfaceType_AF_PACKET_INTERFACE, + Type: vpp_intf.Interface_AF_PACKET, Enabled: true, - Afpacket: &vpp_intf.Interfaces_Interface_Afpacket{ - HostIfName: "veth11", + Link: &vpp_intf.Interface_Afpacket{ + Afpacket: &vpp_intf.AfpacketLink{ + HostIfName: "veth11", + }, }, } } -func afPacket2() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func afPacket2() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "afpacket2", - Type: vpp_intf.InterfaceType_AF_PACKET_INTERFACE, + Type: vpp_intf.Interface_AF_PACKET, Enabled: true, - Afpacket: &vpp_intf.Interfaces_Interface_Afpacket{ - HostIfName: "veth21", + Link: &vpp_intf.Interface_Afpacket{ + Afpacket: &vpp_intf.AfpacketLink{ + HostIfName: "veth21", + }, }, } } -func bridgeDomain() *vpp_l2.BridgeDomains_BridgeDomain { - return &vpp_l2.BridgeDomains_BridgeDomain{ +func bridgeDomain() *vpp_l2.BridgeDomain { + return &vpp_l2.BridgeDomain{ Name: "br1", Flood: true, UnknownUnicastFlood: true, @@ -358,7 +355,7 @@ func bridgeDomain() *vpp_l2.BridgeDomains_BridgeDomain { Learn: true, ArpTermination: false, MacAge: 0, /* means disable aging */ - Interfaces: []*vpp_l2.BridgeDomains_BridgeDomain_Interfaces{ + Interfaces: []*vpp_l2.BridgeDomain_Interface{ { Name: "afpacket1", BridgedVirtualInterface: false, diff --git a/examples/localclient_vpp/nat/main.go b/examples/localclient_vpp/nat/main.go index 0ff60eafdb..9614230ce1 100644 --- a/examples/localclient_vpp/nat/main.go +++ b/examples/localclient_vpp/nat/main.go @@ -21,14 +21,13 @@ import ( "time" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/clientv1/vpp/localclient" - "github.com/ligato/vpp-agent/plugins/vpp" - vpp_intf "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/nat" + vpp_intf "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + nat "github.com/ligato/vpp-agent/api/models/vpp/nat" + "github.com/ligato/vpp-agent/clientv2/vpp/localclient" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" + "github.com/ligato/vpp-agent/plugins/orchestrator" "github.com/namsral/flag" ) @@ -108,20 +107,15 @@ vpp# // Start Agent plugins selected for this example. func main() { - //Init close channel to stop the example. - exampleFinished := make(chan struct{}, 1) - // Prepare all the dependencies for example plugin - watcher := datasync.KVProtoWatchers{ - local.Get(), - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) + // Init close channel to stop the example. + exampleFinished := make(chan struct{}) // Inject dependencies to example plugin - ep := &NatExamplePlugin{} - ep.Deps.Log = logging.DefaultLogger - ep.Deps.VPP = vppPlugin + ep := &NatExamplePlugin{ + Log: logging.DefaultLogger, + VPP: app.DefaultVPP(), + Orchestrator: &orchestrator.DefaultPlugin, + } // Start Agent a := agent.NewAgent( @@ -147,62 +141,63 @@ func closeExample(message string, exampleFinished chan struct{}) { // NatExamplePlugin uses localclient to transport example global NAT and DNAT and af-packet // configuration to NAT VPP plugin type NatExamplePlugin struct { - Deps + Log logging.Logger + app.VPP + Orchestrator *orchestrator.Plugin wg sync.WaitGroup cancel context.CancelFunc } -// Deps is example plugin dependencies. -type Deps struct { - Log logging.Logger - VPP *vpp.Plugin -} - // PluginName represents name of plugin. const PluginName = "nat-example" // Init initializes example plugin. -func (plugin *NatExamplePlugin) Init() error { +func (p *NatExamplePlugin) Init() error { // Logger - plugin.Log = logrus.DefaultLogger() - plugin.Log.SetLevel(logging.DebugLevel) - plugin.Log.Info("Initializing NAT44 example") + p.Log = logrus.DefaultLogger() + p.Log.SetLevel(logging.DebugLevel) + p.Log.Info("Initializing NAT44 example") // Flags flag.Parse() - plugin.Log.Infof("Timeout between configuring NAT global and DNAT set to %d", *timeout) + p.Log.Infof("Timeout between configuring NAT global and DNAT set to %d", *timeout) + p.Log.Info("NAT example initialization done") + return nil +} + +// AfterInit initializes example plugin. +func (p *NatExamplePlugin) AfterInit() error { // Apply initial VPP configuration. - plugin.putGlobalConfig() + p.putGlobalConfig() // Schedule reconfiguration. var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - plugin.wg.Add(1) - go plugin.putDNAT(ctx, *timeout) + ctx, p.cancel = context.WithCancel(context.Background()) + p.wg.Add(1) + go p.putDNAT(ctx, *timeout) - plugin.Log.Info("NAT example initialization done") return nil } // Close cleans up the resources. -func (plugin *NatExamplePlugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *NatExamplePlugin) Close() error { + p.cancel() + p.wg.Wait() logrus.DefaultLogger().Info("Closed NAT example plugin") return nil } // String returns plugin name -func (plugin *NatExamplePlugin) String() string { +func (p *NatExamplePlugin) String() string { return PluginName } // Configure NAT44 Global config -func (plugin *NatExamplePlugin) putGlobalConfig() { - plugin.Log.Infof("Applying NAT44 global configuration") +func (p *NatExamplePlugin) putGlobalConfig() { + p.Log.Infof("Applying NAT44 global configuration") err := localclient.DataResyncRequest(PluginName). Interface(interface1()). Interface(interface2()). @@ -210,85 +205,91 @@ func (plugin *NatExamplePlugin) putGlobalConfig() { NAT44Global(globalNat()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("NAT44 global configuration failed: %v", err) + p.Log.Errorf("NAT44 global configuration failed: %v", err) } else { - plugin.Log.Info("NAT44 global configuration successful") + p.Log.Info("NAT44 global configuration successful") } } // Configure DNAT -func (plugin *NatExamplePlugin) putDNAT(ctx context.Context, timeout int) { +func (p *NatExamplePlugin) putDNAT(ctx context.Context, timeout int) { select { case <-time.After(time.Duration(timeout) * time.Second): - plugin.Log.Infof("Applying DNAT configuration") + p.Log.Infof("Applying DNAT configuration") err := localclient.DataChangeRequest(PluginName). Put(). - NAT44DNat(dNat()). + DNAT44(dNat()). Send().ReceiveReply() if err != nil { - plugin.Log.Errorf("DNAT configuration failed: %v", err) + p.Log.Errorf("DNAT configuration failed: %v", err) } else { - plugin.Log.Info("DNAT configuration successful") + p.Log.Info("DNAT configuration successful") } case <-ctx.Done(): // Cancel the scheduled DNAT configuration. - plugin.Log.Info("DNAT configuration canceled") + p.Log.Info("DNAT configuration canceled") } - plugin.wg.Done() + p.wg.Done() } /* Example Data */ -func interface1() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func interface1() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "memif1", - Type: vpp_intf.InterfaceType_MEMORY_INTERFACE, + Type: vpp_intf.Interface_MEMIF, Enabled: true, Mtu: 1478, IpAddresses: []string{ "172.125.40.1/24", }, - Memif: &vpp_intf.Interfaces_Interface_Memif{ - Id: 1, - Secret: "secret1", - Master: false, - SocketFilename: "/tmp/memif1.sock", + Link: &vpp_intf.Interface_Memif{ + Memif: &vpp_intf.MemifLink{ + Id: 1, + Secret: "secret1", + Master: false, + SocketFilename: "/tmp/memif1.sock", + }, }, } } -func interface2() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func interface2() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "memif2", - Type: vpp_intf.InterfaceType_MEMORY_INTERFACE, + Type: vpp_intf.Interface_MEMIF, Enabled: true, Mtu: 1478, IpAddresses: []string{ "192.47.21.1/24", }, - Memif: &vpp_intf.Interfaces_Interface_Memif{ - Id: 2, - Secret: "secret2", - Master: false, - SocketFilename: "/tmp/memif1.sock", + Link: &vpp_intf.Interface_Memif{ + Memif: &vpp_intf.MemifLink{ + Id: 2, + Secret: "secret2", + Master: false, + SocketFilename: "/tmp/memif1.sock", + }, }, } } -func interface3() *vpp_intf.Interfaces_Interface { - return &vpp_intf.Interfaces_Interface{ +func interface3() *vpp_intf.Interface { + return &vpp_intf.Interface{ Name: "memif3", - Type: vpp_intf.InterfaceType_MEMORY_INTERFACE, + Type: vpp_intf.Interface_MEMIF, Enabled: true, Mtu: 1478, IpAddresses: []string{ "94.18.21.1/24", }, - Memif: &vpp_intf.Interfaces_Interface_Memif{ - Id: 3, - Secret: "secret3", - Master: false, - SocketFilename: "/tmp/memif1.sock", + Link: &vpp_intf.Interface_Memif{ + Memif: &vpp_intf.MemifLink{ + Id: 3, + Secret: "secret3", + Master: false, + SocketFilename: "/tmp/memif1.sock", + }, }, } } @@ -296,7 +297,7 @@ func interface3() *vpp_intf.Interfaces_Interface { func globalNat() *nat.Nat44Global { return &nat.Nat44Global{ Forwarding: false, - NatInterfaces: []*nat.Nat44Global_NatInterface{ + NatInterfaces: []*nat.Nat44Global_Interface{ { Name: "memif1", IsInside: false, @@ -313,38 +314,38 @@ func globalNat() *nat.Nat44Global { OutputFeature: false, }, }, - AddressPools: []*nat.Nat44Global_AddressPool{ + AddressPool: []*nat.Nat44Global_Address{ { - VrfId: 0, - FirstSrcAddress: "192.168.0.1", - TwiceNat: false, + VrfId: 0, + Address: "192.168.0.1", + TwiceNat: false, }, { - VrfId: 0, - FirstSrcAddress: "175.124.0.1", - LastSrcAddress: "175.124.0.3", - TwiceNat: false, + VrfId: 0, + Address: "175.124.0.1", + //LastSrcAddress: "175.124.0.3", + TwiceNat: false, }, { - VrfId: 0, - FirstSrcAddress: "10.10.0.1", - LastSrcAddress: "10.10.0.2", - TwiceNat: false, + VrfId: 0, + Address: "10.10.0.1", + //LastSrcAddress: "10.10.0.2", + TwiceNat: false, }, }, } } -func dNat() *nat.Nat44DNat_DNatConfig { - return &nat.Nat44DNat_DNatConfig{ +func dNat() *nat.DNat44 { + return &nat.DNat44{ Label: "dnat1", - StMappings: []*nat.Nat44DNat_DNatConfig_StaticMapping{ + StMappings: []*nat.DNat44_StaticMapping{ { // DNAT static mapping with load balancer (multiple local addresses) ExternalInterface: "memif1", ExternalIp: "192.168.0.1", ExternalPort: 8989, - LocalIps: []*nat.Nat44DNat_DNatConfig_StaticMapping_LocalIP{ + LocalIps: []*nat.DNat44_StaticMapping_LocalIP{ { VrfId: 0, LocalIp: "172.124.0.2", @@ -359,14 +360,14 @@ func dNat() *nat.Nat44DNat_DNatConfig { }, }, Protocol: 1, - TwiceNat: nat.TwiceNatMode_ENABLED, + //TwiceNat: nat.DNat44_StaticMapping_ENABLED, }, { // DNAT static mapping without load balancer (single local address) ExternalInterface: "memif2", ExternalIp: "192.168.0.2", ExternalPort: 8989, - LocalIps: []*nat.Nat44DNat_DNatConfig_StaticMapping_LocalIP{ + LocalIps: []*nat.DNat44_StaticMapping_LocalIP{ { VrfId: 0, LocalIp: "172.124.0.3", @@ -375,10 +376,10 @@ func dNat() *nat.Nat44DNat_DNatConfig { }, }, Protocol: 1, - TwiceNat: nat.TwiceNatMode_ENABLED, + //TwiceNat: nat.DNat44_StaticMapping_ENABLED, }, }, - IdMappings: []*nat.Nat44DNat_DNatConfig_IdentityMapping{ + IdMappings: []*nat.DNat44_IdentityMapping{ { VrfId: 0, IpAddress: "10.10.0.1", diff --git a/examples/localclient_vpp/plugins/main.go b/examples/localclient_vpp/plugins/main.go index 5c261978d7..8f15125a58 100644 --- a/examples/localclient_vpp/plugins/main.go +++ b/examples/localclient_vpp/plugins/main.go @@ -23,16 +23,15 @@ import ( "log" "github.com/ligato/cn-infra/agent" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/cn-infra/datasync/kvdbsync/local" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/clientv1/vpp/localclient" - "github.com/ligato/vpp-agent/plugins/vpp" - "github.com/ligato/vpp-agent/plugins/vpp/model/acl" - "github.com/ligato/vpp-agent/plugins/vpp/model/interfaces" - "github.com/ligato/vpp-agent/plugins/vpp/model/l2" - "github.com/ligato/vpp-agent/plugins/vpp/model/l3" + acl "github.com/ligato/vpp-agent/api/models/vpp/acl" + interfaces "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + l2 "github.com/ligato/vpp-agent/api/models/vpp/l2" + l3 "github.com/ligato/vpp-agent/api/models/vpp/l3" + "github.com/ligato/vpp-agent/clientv2/vpp/localclient" + "github.com/ligato/vpp-agent/cmd/vpp-agent/app" + "github.com/ligato/vpp-agent/plugins/orchestrator" ) // init sets the default logging level. @@ -47,22 +46,15 @@ func init() { // Start Agent plugins selected for this example. func main() { - //Init close channel to stop the example. - exampleFinished := make(chan struct{}, 1) - // Prepare all the dependencies for example plugin - watcher := datasync.KVProtoWatchers{ - local.Get(), - } - vppPlugin := vpp.NewPlugin(vpp.UseDeps(func(deps *vpp.Deps) { - deps.Watcher = watcher - })) - - var watchEventsMutex sync.Mutex - vppPlugin.Deps.WatchEventsMutex = &watchEventsMutex + // Init close channel to stop the example. + exampleFinished := make(chan struct{}) // Inject dependencies to example plugin - ep := &ExamplePlugin{} - ep.Deps.VPP = vppPlugin + ep := &ExamplePlugin{ + Log: logging.DefaultLogger, + VPP: app.DefaultVPP(), + Orchestrator: &orchestrator.DefaultPlugin, + } // Start Agent a := agent.NewAgent( @@ -89,51 +81,58 @@ func closeExample(message string, exampleFinished chan struct{}) { // ExamplePlugin demonstrates the use of the localclient to locally transport example configuration into the default VPP plugins. type ExamplePlugin struct { - Deps + Log logging.Logger + app.VPP + Orchestrator *orchestrator.Plugin wg sync.WaitGroup cancel context.CancelFunc } -// Deps is example plugin dependencies. -type Deps struct { - VPP *vpp.Plugin -} - // PluginName represents name of plugin. const PluginName = "plugin-example" // Init initializes example plugin. -func (plugin *ExamplePlugin) Init() error { +func (p *ExamplePlugin) Init() error { + // Logger + p.Log = logrus.DefaultLogger() + p.Log.SetLevel(logging.DebugLevel) + p.Log.Info("Initializing VPP example") + + logrus.DefaultLogger().Info("Initialization of the example plugin has completed") + return nil +} + +// AfterInit initializes example plugin. +func (p *ExamplePlugin) AfterInit() error { // Apply initial VPP configuration. - plugin.resyncVPP() + p.resyncVPP() // Schedule reconfiguration. var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - plugin.wg.Add(1) - go plugin.reconfigureVPP(ctx) + ctx, p.cancel = context.WithCancel(context.Background()) + p.wg.Add(1) + go p.reconfigureVPP(ctx) - logrus.DefaultLogger().Info("Initialization of the example plugin has completed") return nil } // Close cleans up the resources. -func (plugin *ExamplePlugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *ExamplePlugin) Close() error { + p.cancel() + p.wg.Wait() logrus.DefaultLogger().Info("Closed example plugin") return nil } // String returns plugin name -func (plugin *ExamplePlugin) String() string { +func (p *ExamplePlugin) String() string { return PluginName } // resyncVPP propagates snapshot of the whole initial configuration to VPP plugins. -func (plugin *ExamplePlugin) resyncVPP() { +func (p *ExamplePlugin) resyncVPP() { err := localclient.DataResyncRequest(PluginName). Interface(&memif1AsMaster). Interface(&tap1Disabled). @@ -148,7 +147,7 @@ func (plugin *ExamplePlugin) resyncVPP() { } // reconfigureVPP simulates a set of changes in the configuration related to VPP plugins. -func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { +func (p *ExamplePlugin) reconfigureVPP(ctx context.Context) { select { case <-time.After(15 * time.Second): // Simulate configuration change exactly 15seconds after resync. @@ -173,7 +172,7 @@ func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { // cancel the scheduled re-configuration logrus.DefaultLogger().Info("Planned VPP re-configuration was canceled") } - plugin.wg.Done() + p.wg.Done() } /************************* @@ -220,115 +219,125 @@ func (plugin *ExamplePlugin) reconfigureVPP(ctx context.Context) { var ( // memif1AsMaster is an example of a memory interface configuration. (Master=true, with IPv4 address). - memif1AsMaster = interfaces.Interfaces_Interface{ - Name: "memif1", - Type: interfaces.InterfaceType_MEMORY_INTERFACE, - Enabled: true, - Memif: &interfaces.Interfaces_Interface_Memif{ - Id: 1, - Master: true, - SocketFilename: "/tmp/memif1.sock", - }, + memif1AsMaster = interfaces.Interface{ + Name: "memif1", + Type: interfaces.Interface_MEMIF, + Enabled: true, Mtu: 1500, IpAddresses: []string{"192.168.1.1/24"}, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: true, + SocketFilename: "/tmp/memif1.sock", + }, + }, } // memif1AsSlave is the original memif1 turned into slave and stripped of the IP address. - memif1AsSlave = interfaces.Interfaces_Interface{ + memif1AsSlave = interfaces.Interface{ Name: "memif1", - Type: interfaces.InterfaceType_MEMORY_INTERFACE, + Type: interfaces.Interface_MEMIF, Enabled: true, - Memif: &interfaces.Interfaces_Interface_Memif{ - Id: 1, - Master: false, - SocketFilename: "/tmp/memif1.sock", + Mtu: 1500, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 1, + Master: false, + SocketFilename: "/tmp/memif1.sock", + }, }, - Mtu: 1500, } // Memif2 is a slave memif without IP address and to be xconnected with memif1. - memif2 = interfaces.Interfaces_Interface{ + memif2 = interfaces.Interface{ Name: "memif2", - Type: interfaces.InterfaceType_MEMORY_INTERFACE, + Type: interfaces.Interface_MEMIF, Enabled: true, - Memif: &interfaces.Interfaces_Interface_Memif{ - Id: 2, - Master: false, - SocketFilename: "/tmp/memif2.sock", + Mtu: 1500, + Link: &interfaces.Interface_Memif{ + Memif: &interfaces.MemifLink{ + Id: 2, + Master: false, + SocketFilename: "/tmp/memif2.sock", + }, }, - Mtu: 1500, } + // XConMemif1ToMemif2 defines xconnect between memifs. - XConMemif1ToMemif2 = l2.XConnectPairs_XConnectPair{ + XConMemif1ToMemif2 = l2.XConnectPair{ ReceiveInterface: memif1AsSlave.Name, TransmitInterface: memif2.Name, } // tap1Disabled is a disabled tap interface. - tap1Disabled = interfaces.Interfaces_Interface{ + tap1Disabled = interfaces.Interface{ Name: "tap1", - Type: interfaces.InterfaceType_TAP_INTERFACE, + Type: interfaces.Interface_TAP, Enabled: false, - Tap: &interfaces.Interfaces_Interface_Tap{ - HostIfName: "linux-tap1", + Link: &interfaces.Interface_Tap{ + Tap: &interfaces.TapLink{ + Version: 2, + HostIfName: "linux-tap1", + }, }, Mtu: 1500, } // tap1Enabled is an enabled tap1 interface. - tap1Enabled = interfaces.Interfaces_Interface{ + tap1Enabled = interfaces.Interface{ Name: "tap1", - Type: interfaces.InterfaceType_TAP_INTERFACE, + Type: interfaces.Interface_TAP, Enabled: true, - Tap: &interfaces.Interfaces_Interface_Tap{ - HostIfName: "linux-tap1", + Link: &interfaces.Interface_Tap{ + Tap: &interfaces.TapLink{ + Version: 2, + HostIfName: "linux-tap1", + }, }, Mtu: 1500, } - acl1 = acl.AccessLists_Acl{ - AclName: "acl1", - Rules: []*acl.AccessLists_Acl_Rule{ + acl1 = acl.ACL{ + Name: "acl1", + Rules: []*acl.ACL_Rule{ { - RuleName: "rule1", - AclAction: acl.AclAction_DENY, - Match: &acl.AccessLists_Acl_Rule_Match{ - IpRule: &acl.AccessLists_Acl_Rule_Match_IpRule{ - Ip: &acl.AccessLists_Acl_Rule_Match_IpRule_Ip{ - DestinationNetwork: "10.1.1.0/24", - SourceNetwork: "10.1.2.0/24", + Action: acl.ACL_Rule_DENY, + IpRule: &acl.ACL_Rule_IpRule{ + Ip: &acl.ACL_Rule_IpRule_Ip{ + DestinationNetwork: "10.1.1.0/24", + SourceNetwork: "10.1.2.0/24", + }, + Tcp: &acl.ACL_Rule_IpRule_Tcp{ + DestinationPortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 50, + UpperPort: 150, }, - Tcp: &acl.AccessLists_Acl_Rule_Match_IpRule_Tcp{ - DestinationPortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 50, - UpperPort: 150, - }, - SourcePortRange: &acl.AccessLists_Acl_Rule_Match_IpRule_PortRange{ - LowerPort: 1000, - UpperPort: 2000, - }, + SourcePortRange: &acl.ACL_Rule_IpRule_PortRange{ + LowerPort: 1000, + UpperPort: 2000, }, }, }, }, }, - Interfaces: &acl.AccessLists_Acl_Interfaces{ + Interfaces: &acl.ACL_Interfaces{ Egress: []string{"tap1"}, }, } // loopback1 is an example of a loopback interface configuration (without IP address assigned). - loopback1 = interfaces.Interfaces_Interface{ + loopback1 = interfaces.Interface{ Name: "loopback1", - Type: interfaces.InterfaceType_SOFTWARE_LOOPBACK, + Type: interfaces.Interface_SOFTWARE_LOOPBACK, Enabled: true, Mtu: 1500, } // loopback1WithAddr extends loopback1 definition with an IP address. - loopback1WithAddr = interfaces.Interfaces_Interface{ + loopback1WithAddr = interfaces.Interface{ Name: "loopback1", - Type: interfaces.InterfaceType_SOFTWARE_LOOPBACK, + Type: interfaces.Interface_SOFTWARE_LOOPBACK, Enabled: true, Mtu: 1500, IpAddresses: []string{"10.0.0.1/24"}, @@ -336,7 +345,7 @@ var ( // BDLoopback1ToTap1 is a bridge domain with tap1 and loopback1 interfaces in it. // Loopback is set to be BVI. - BDLoopback1ToTap1 = l2.BridgeDomains_BridgeDomain{ + BDLoopback1ToTap1 = l2.BridgeDomain{ Name: "br1", Flood: false, UnknownUnicastFlood: false, @@ -344,7 +353,7 @@ var ( Learn: true, ArpTermination: false, MacAge: 0, /* means disable aging */ - Interfaces: []*l2.BridgeDomains_BridgeDomain_Interfaces{ + Interfaces: []*l2.BridgeDomain_Interface{ { Name: "loopback1", BridgedVirtualInterface: true, @@ -356,10 +365,9 @@ var ( } // routeThroughMemif1 is an example route configuration with memif1 being the next hop. - routeThroughMemif1 = l3.StaticRoutes_Route{ - Description: "Description", + routeThroughMemif1 = l3.Route{ VrfId: 0, - DstIpAddr: "192.168.2.1/32", + DstNetwork: "192.168.2.1/32", NextHopAddr: "192.168.1.1", // Memif1AsMaster Weight: 5, } diff --git a/idxvpp/README.md b/idxvpp/README.md deleted file mode 100644 index 0f9f1055e1..0000000000 --- a/idxvpp/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# NameToIdx - -The NameToIdx mapping is an extension of the NamedMapping mapping. It is -used by VPP Agent plugins that interact with VPP to map between VPP -interface handles (the `sw_if_index` entities) and the string-based -object identifiers used by northbound clients of the Agent. - -The mappings are used to implement the re-configuration and state -re-synchronization after failures. Furthermore, a mapping registry may -be shared between plugins. For example, `ifplugin` exposes the -`sw_if_index->name` mapping so that other plugins may reference interfaces -from objects that depend on them, such as bridge domains or IP routes. - -**API** - -*Mapping* - -Every plugin is allowed to allocate a new mapping using the function -`NewNameToIdxRW(logger, owner, title, indexfunction)`, giving in-memory-only -storage capabilities. Specifying an indexFunction allows to query mappings -by secondary indices computed from metadata. - -The `NameToIdxRW` interface supports read and write operations. While the -registry owner is allowed to do both reads and writes, only the read -interface `NameToIdx` is typically exposed to the other plugins. See for -example the `sw_if_index->name` mapping defined in `ifplugin`. Its read-only -interface supports index-by-name and name-by-index look-ups using the -`LookupIdx` and `LookupName` functions. Additionally, a client can use the -`Watch` function to watch for changes in the registry. The registry owner -can register a new mapping using the `RegisterName` function and remove -an existing mapping using the `UnregisterName` function. - -**Example** - -Here is a simplified code snippet from `ifplugin` showing how to use the -`sw_if_index->name` mapping: - -``` -// Plugin allocates new registries by its name and automatically becomes -// their owner. -const PluginID pluginapi.PluginName = "ifplugin" - -// InterfaceMeta defines the attributes of metadata as used by the -// interface plugin. -type InterfaceMeta struct { - InterfaceType intf.InterfaceType -} - -// Init initializes the interface plugin -func (plugin *InterfaceConfigurator) Init() { - // Allocate registry for sw_if_index to name mappings. - plugin.swIfIndexes, err = idxmap.NewNameToIdx(logger, PluginID, "sw_if_indexes", nil) - if err != nil { - // handle error - } - - // Continue with the initialization... -} - -// ConfigureInterface configures a new VPP or Linux interface. -func (plugin *InterfaceConfigurator) ConfigureInterface(iface *intf.Interfaces_Interface) { - // Create the interface ... - // ifIdx := ... - - - // Once a new interface is created in VPP/Linux, add new mapping into the registry - // if it doesn't exist yet - _, _, found := plugin.SwIfIndexes.LookupName(ifIdx) - if !found { - plugin.SwIfIndexes.RegisterName(iface.Name, ifIdx, &InterfaceMeta{iface.Type}) - } -} - -// DeleteInterface removes an existing VPP or Linux interface. -func (plugin *InterfaceConfigurator) DeleteInterface(iface *intf.Interfaces_Interface) { - // Delete the interface ... - - // When the interface gets deleted from VPP/Linux, the mapping must be removed as well. - plugin.SwIfIndexes.UnregisterName(iface.Name) -} -``` diff --git a/idxvpp/api.go b/idxvpp/api.go deleted file mode 100644 index 3f4452860e..0000000000 --- a/idxvpp/api.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idxvpp - -import ( - "errors" - - "github.com/ligato/cn-infra/idxmap" -) - -// NameToIdxDto defines the Data Transfer Object (DTO) that carries a -// mapping between a logical object name defined at Agent's Northbound -// API and an sw_if_index defined at the VPP binary API. -type NameToIdxDto struct { - NameToIdxDtoWithoutMeta - // Auxiliary data related to mapping - Metadata interface{} -} - -// NameToIdxDtoWithoutMeta is the part of NameToIdxDto that can be reused -// by indices with typed metadata. -type NameToIdxDtoWithoutMeta struct { - idxmap.NamedMappingEvent - - Idx uint32 -} - -// Done is used to signal to the event producer that the event consumer -// has processed the event. -func (dto *NameToIdxDtoWithoutMeta) Done() error { - // TODO Consumer of the channel must signal that it processed the event. - return errors.New("Unimplemented") -} - -// IsDelete returns true if the mapping was deleted. -func (dto *NameToIdxDtoWithoutMeta) IsDelete() bool { // similarity to other APIs - return dto.Del -} - -// IsUpdate returns true if mapping metadata was updated -func (dto *NameToIdxDtoWithoutMeta) IsUpdate() bool { - return dto.Update -} - -// NameToIdxRW is the "owner API" to the NameToIdx registry. Using this -// API the owner adds (registers) new mappings to the registry or deletes -// (unregisters) existing mappings from the registry. -type NameToIdxRW interface { - NameToIdx - - // RegisterName registers a new name-to-index mapping. After - // registration, other plugins can use the "user's API" to lookup the - // index (by providing the name) or the name (by providing the index), - // and/or can be notified when the mapping is changed. Plugins will - // typically use the change notifications to modify the part of VPP - // configuration relevant to them and use the VPP binary API to push it - // to VPP. - RegisterName(name string, idx uint32, metadata interface{}) - - // UnregisterName removes a mapping from the registry. Other plugins - // can be notified and remove the relevant parts of their own respective - // VPP configurations and use the VPP binary API to clean it up from - // VPP. - UnregisterName(name string) (idx uint32, metadata interface{}, exists bool) - - // UpdateMetadata replaces metadata value in existing name-to-index - // mapping entry. If mapping associated with the name does not - // exist, it is not created. - UpdateMetadata(name string, metadata interface{}) (success bool) - - // Clear removes all entries present in the name-to-index mapping. - // This action does not trigger any notification. - Clear() -} - -// NameToIdx is the "user API" to the NameToIdx registry. It provides -// read-only access to name-to-index mappings stored in the registry. It -// is intended for plugins that need to lookup the mappings. -// -// For example, a static L2 FIB table entry refers to an underlying network -// interface, which is specified as a logical interface name at the L2 -// plugin's NB API. During configuration, the LookupIdx() function must -// be called to determine which VPP if index corresponds to the -// specified logical name. -type NameToIdx interface { - // GetRegistryTitle returns the title assigned to the registry. - GetRegistryTitle() string - - // LookupIdx retrieves a previously stored index for a particular - // name. Metadata can be nil. If the 'exists' flag is set to false - // upon return, the init value is undefined and it should be ignored. - LookupIdx(name string) (idx uint32, metadata interface{}, exists bool) - - // LookupName retrieves a previously stored name by particular index. - // Metadata can be nil. Name contains nonempty value only if exists==true. - // - // Principle: - // A. Registry stores mappings between names and indexes. API can optionally - // attach metadata to a particular name. TBD index can be 0... - // B. Metadata is needed for example in the ifplugin. This metadata is used in the following scenarios: - // - for caching of data (even data that belong to a different agent), - // - for remembering the last processed object, - // - for indexing the BD to which a particular interface belongs to (see bd_configurator or fib_configurator). - LookupName(idx uint32) (name string, metadata interface{}, exists bool) - - // LookupNameByMetadata returns all indexes that contain particular meta field with the provided value. - LookupNameByMetadata(key string, value string) []string - - // ListNames returns all names in the mapping. - ListNames() (names []string) - - // Watch subscribes to watching changes to NameToIndex mappings. - // NOTE: Watching NameToIndex mapping can have negative impact on performance in case - // the events are handled slowly. - // - // Example: - // - // func (plugin *Plugin) watchEvents(ctx context.Context) { - // Watch(PluginID, plugin.isubscriberChan) - // ... - // select { - // case ifIdxEv := <-plugin.isubscriberChan: - // - // if ifIdxEv.IsDelete() { - // plugin.ResolveDeletedInterface(ifIdxEv.Name) - // } else { - // plugin.ResolveDeletedInterface(ifIdxEv.Name) - // } - // ifIdxEv.Done() - // ... - // } - Watch(subscriber string, callback func(NameToIdxDto)) -} diff --git a/idxvpp/cacheutil/cache_helper_idx.go b/idxvpp/cacheutil/cache_helper_idx.go deleted file mode 100644 index a9d34c6a23..0000000000 --- a/idxvpp/cacheutil/cache_helper_idx.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cacheutil - -import ( - "github.com/gogo/protobuf/proto" - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/vpp-agent/idxvpp" -) - -// CacheHelper is a helper the implementation of which is reused among multiple typesafe Caches. -// Beware: index stored in cached mapping is not valid. The meaningful values are the name and metadata. -type CacheHelper struct { - IDX idxvpp.NameToIdxRW - Prefix string - DataPrototype proto.Message - ParseName func(key string) (name string, err error) -} - -const placeHolderIndex uint32 = 0 - -// DoWatching is supposed to be used as a go routine. It selects the data from the channels in arguments. -func (helper *CacheHelper) DoWatching(resyncName string, watcher datasync.KeyValProtoWatcher) { - changeChan := make(chan datasync.ChangeEvent, 100) - resyncChan := make(chan datasync.ResyncEvent, 100) - - watcher.Watch(resyncName, changeChan, resyncChan, helper.Prefix) - - for { - select { - case resyncEv := <-resyncChan: - err := helper.DoResync(resyncEv) - resyncEv.Done(err) - case dataChng := <-changeChan: - err := helper.DoChange(dataChng) - dataChng.Done(err) - } - } -} - -// DoChange calls: -// - RegisterName in case of db.Put -// - UnregisterName in case of data.Del -func (helper *CacheHelper) DoChange(dataChng datasync.ChangeEvent) error { - var err error - switch dataChng.GetChangeType() { - case datasync.Put: - current := proto.Clone(helper.DataPrototype) - dataChng.GetValue(current) - name, err := helper.ParseName(dataChng.GetKey()) - if err == nil { - helper.IDX.RegisterName(name, placeHolderIndex, current) - } - case datasync.Delete: - name, err := helper.ParseName(dataChng.GetKey()) - if err == nil { - helper.IDX.UnregisterName(name) - } - } - return err -} - -// DoResync lists keys&values in ResyncEvent and then: -// - RegisterName (for names that are a part of ResyncEvent) -// - UnregisterName (for names that are not a part of ResyncEvent) -func (helper *CacheHelper) DoResync(resyncEv datasync.ResyncEvent) error { - var wasError error - - ifaces, found := resyncEv.GetValues()[helper.Prefix] - if found { - // Step 1: fill the existing items. - resyncNames := map[string]interface{}{} - for { - item, stop := ifaces.GetNext() - if stop { - break - } - ifaceName, err := helper.ParseName(item.GetKey()) - if err != nil { - wasError = err - } else { - current := proto.Clone(helper.DataPrototype) - item.GetValue(current) - helper.IDX.RegisterName(ifaceName, placeHolderIndex, current) - resyncNames[ifaceName] = nil - } - } - - // Step 2: - existingNames := []string{} //TODO - for _, existingName := range existingNames { - if _, found := resyncNames[existingName]; !found { - helper.IDX.UnregisterName(existingName) - } - } - } - return wasError -} - -func (helper *CacheHelper) String() string { - return helper.Prefix -} diff --git a/idxvpp/cacheutil/doc.go b/idxvpp/cacheutil/doc.go deleted file mode 100644 index 77f362cf26..0000000000 --- a/idxvpp/cacheutil/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package cacheutil is a base implementation of name-to-index cache on the -// top of which all typesafe caches are built. -package cacheutil diff --git a/idxvpp/doc.go b/idxvpp/doc.go deleted file mode 100644 index a2a2e0e993..0000000000 --- a/idxvpp/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package idxvpp implements name-to-index mapping used by VPP plugins to keep -// a map between VPP interface handles and northbound string-based -// identifiers. -package idxvpp diff --git a/idxvpp/nametoidx/config.go b/idxvpp/nametoidx/config.go deleted file mode 100644 index e93e663489..0000000000 --- a/idxvpp/nametoidx/config.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nametoidx - -import ( - "io/ioutil" - "time" - - "github.com/ghodss/yaml" -) - -// PersistentStorageConfig defines the configuration section dedicated for persistent storage. -type PersistentStorageConfig struct { - Location string `json:"location"` - SyncInterval time.Duration `json:"sync-interval"` - MaxSyncStartDelay time.Duration `json:"max-sync-start-delay"` -} - -// Config defines configuration for index-to-name maps. -type Config struct { - PersistentStorage PersistentStorageConfig `json:"persistent-storage"` -} - -const ( - /* Default location for the persistent storage of index-name maps */ - defaultPersistentStorageLocation = "/var/vnf-agent/idxmap" - - /* This is the default value for how often (in nanoseconds) to flush the underlying registry into the persistent storage. */ - defaultSyncInterval = 300 * time.Millisecond - - /* To evenly distribute I/O load, the start of the periodic synchronization for a given - index-name map gets delayed by a random time duration. This constant defines the maximum - allowed delay in nanoseconds as used by default. */ - defaultMaxSyncStartDelay = 3 * time.Second -) - -// ConfigFromFile loads the idxmap configuration from the specified file. -// If the specified file exists and contains valid configuration, the parsed configuration is returned. -// In case of an error, the default configuration is returned instead. -func ConfigFromFile(fpath string) (*Config, error) { - // default configuration - persistentStorageConfig := PersistentStorageConfig{} - persistentStorageConfig.Location = defaultPersistentStorageLocation - persistentStorageConfig.SyncInterval = defaultSyncInterval - persistentStorageConfig.MaxSyncStartDelay = defaultMaxSyncStartDelay - config := &Config{} - config.PersistentStorage = persistentStorageConfig - - if fpath == "" { - return config, nil - } - - b, err := ioutil.ReadFile(fpath) - if err != nil { - return config, err - } - - yamlConfig := Config{} - err = yaml.Unmarshal(b, &yamlConfig) - if err != nil { - return config, err - } - - if yamlConfig.PersistentStorage.Location != "" { - config.PersistentStorage.Location = yamlConfig.PersistentStorage.Location - } - if yamlConfig.PersistentStorage.SyncInterval != 0 { - config.PersistentStorage.SyncInterval = yamlConfig.PersistentStorage.SyncInterval - } - if yamlConfig.PersistentStorage.MaxSyncStartDelay != 0 { - config.PersistentStorage.MaxSyncStartDelay = yamlConfig.PersistentStorage.MaxSyncStartDelay - } - return config, nil -} diff --git a/idxvpp/nametoidx/doc.go b/idxvpp/nametoidx/doc.go deleted file mode 100644 index ca1f7497eb..0000000000 --- a/idxvpp/nametoidx/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package nametoidx is an in-memory implementation of the name-to-index -// mapping registry. -package nametoidx diff --git a/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go b/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go deleted file mode 100644 index ef8d1f938b..0000000000 --- a/idxvpp/nametoidx/inmemory_name_to_idx_mapping.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nametoidx - -import ( - "strconv" - "time" - - "github.com/ligato/cn-infra/idxmap" - "github.com/ligato/cn-infra/idxmap/mem" - "github.com/ligato/cn-infra/infra" - "github.com/ligato/cn-infra/logging" - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/idxvpp" -) - -const idxKey = "idxKey" - -type nameToIdxMeta struct { - // added index - idx uint32 - // original user's meta data - meta interface{} -} - -type nameToIdxMem struct { - logging.Logger - internal idxmap.NamedMappingRW -} - -// NewNameToIdx creates a new instance implementing NameToIdxRW. -// Argument indexFunction may be nil if you do not want to use secondary indexes. -func NewNameToIdx(logger logging.Logger, title string, - indexFunction func(interface{}) map[string][]string) idxvpp.NameToIdxRW { - m := nameToIdxMem{} - m.Logger = logger - m.internal = mem.NewNamedMapping(logger, title, func(meta interface{}) map[string][]string { - var idxs map[string][]string - - internalMeta, ok := meta.(*nameToIdxMeta) - if !ok { - return nil - } - if indexFunction != nil { - idxs = indexFunction(internalMeta.meta) - } - if idxs == nil { - idxs = map[string][]string{} - } - internal := indexInternalMetadata(meta) - for k, v := range internal { - idxs[k] = v - } - return idxs - }) - return &m -} - -// RegisterName inserts or updates index and metadata for the given name. -func (mem *nameToIdxMem) RegisterName(name string, idx uint32, metadata interface{}) { - mem.internal.Put(name, &nameToIdxMeta{idx, metadata}) -} - -// UnregisterName removes data associated with the given name. -func (mem *nameToIdxMem) UnregisterName(name string) (idx uint32, metadata interface{}, found bool) { - meta, found := mem.internal.Delete(name) - if found { - if internalMeta, ok := meta.(*nameToIdxMeta); ok { - return internalMeta.idx, internalMeta.meta, found - } - } - return -} - -// Update metadata in mapping entry associated with the provided name. -func (mem *nameToIdxMem) UpdateMetadata(name string, metadata interface{}) (success bool) { - meta, found := mem.internal.GetValue(name) - if found { - if internalMeta, ok := meta.(*nameToIdxMeta); ok { - return mem.internal.Update(name, &nameToIdxMeta{internalMeta.idx, metadata}) - } - } - return false -} - -// Clear removes all entries from the mapping -func (mem *nameToIdxMem) Clear() { - mem.internal.Clear() -} - -// GetRegistryTitle returns a name assigned to mapping. -func (mem *nameToIdxMem) GetRegistryTitle() string { - return mem.internal.GetRegistryTitle() -} - -// LookupIdx allows to retrieve previously stored index for particular name. -func (mem *nameToIdxMem) LookupIdx(name string) (uint32, interface{}, bool) { - meta, found := mem.internal.GetValue(name) - if found { - if internalMeta, ok := meta.(*nameToIdxMeta); ok { - return internalMeta.idx, internalMeta.meta, found - } - } - return 0, nil, false -} - -// LookupName looks up the name associated with the given softwareIfIndex. -func (mem *nameToIdxMem) LookupName(idx uint32) (name string, metadata interface{}, exists bool) { - res := mem.internal.ListNames(idxKey, strconv.FormatUint(uint64(idx), 10)) - if len(res) != 1 { - return - } - m, found := mem.internal.GetValue(res[0]) - if found { - if internalMeta, ok := m.(*nameToIdxMeta); ok { - return res[0], internalMeta.meta, found - } - } - return -} - -func (mem *nameToIdxMem) LookupNameByMetadata(key string, value string) []string { - return mem.internal.ListNames(key, value) -} - -// ListNames returns all names in the mapping. -func (mem *nameToIdxMem) ListNames() (names []string) { - return mem.internal.ListAllNames() -} - -// Watch starts monitoring a change in the mapping. When yhe change occurs, the callback is called. -// ToChan utility can be used to receive changes through channel. -func (mem *nameToIdxMem) Watch(subscriber string, callback func(idxvpp.NameToIdxDto)) { - watcher := func(dto idxmap.NamedMappingGenericEvent) { - internalMeta, ok := dto.Value.(*nameToIdxMeta) - if !ok { - return - } - msg := idxvpp.NameToIdxDto{ - NameToIdxDtoWithoutMeta: idxvpp.NameToIdxDtoWithoutMeta{ - NamedMappingEvent: dto.NamedMappingEvent, - Idx: internalMeta.idx}, - Metadata: internalMeta.meta, - } - callback(msg) - } - mem.internal.Watch(infra.PluginName(subscriber), watcher) -} - -// ToChan is an utility that allows to receive notification through a channel. -// If a notification can not be delivered until timeout, it is dropped. -func ToChan(ch chan idxvpp.NameToIdxDto) func(dto idxvpp.NameToIdxDto) { - return func(dto idxvpp.NameToIdxDto) { - select { - case ch <- dto: - case <-time.After(idxmap.DefaultNotifTimeout): - logrus.DefaultLogger().Warn("Unable to deliver notification") - } - } -} - -func indexInternalMetadata(metaData interface{}) map[string][]string { - indexes := map[string][]string{} - internalMeta, ok := metaData.(*nameToIdxMeta) - if !ok || internalMeta == nil { - return indexes - } - - idx := internalMeta.idx - indexes[idxKey] = []string{strconv.FormatUint(uint64(idx), 10)} - - return indexes -} diff --git a/idxvpp/nametoidx/inmemory_scenario_test.go b/idxvpp/nametoidx/inmemory_scenario_test.go deleted file mode 100644 index 5ae8c6d61d..0000000000 --- a/idxvpp/nametoidx/inmemory_scenario_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nametoidx - -import ( - "testing" - - "strconv" - - "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/idxvpp" - "github.com/onsi/gomega" -) - -const ( - idx1 = 1 - idx2 = 2 - idx3 = 3 -) - -var ( - eth0 MappingName = "eth0" - eth1 MappingName = "eth1" - eth2 MappingName = "eth2" -) - -func InMemory(reloaded bool) (idxvpp.NameToIdxRW, error) { - return NewNameToIdx(logrus.DefaultLogger(), "test", nil), nil -} - -func Test01UnregisteredMapsToNothing(t *testing.T) { - Given(t).NameToIdx(InMemory, nil). - When().Name(eth1).IsUnRegistered(). - Then().Name(eth1).MapsToNothing(). - And().Notification(eth1, Write).IsNotExpected() -} - -func Test02RegisteredReturnsIdx(t *testing.T) { - Given(t).NameToIdx(InMemory, nil). - When().Name(eth1).IsRegistered(idx1). - Then().Name(eth1).MapsTo(idx1). - And().Notification(eth1, Write).IsExpectedFor(idx1) -} - -func Test03RegFirstThenUnreg(t *testing.T) { - Given(t).NameToIdx(InMemory, map[MappingName]MappingIdx{eth1: idx1}). - When().Name(eth1).IsUnRegistered(). - Then().Name(eth1).MapsToNothing(). - And().Notification(eth1, Del).IsExpectedFor(idx1) -} - -func Test03Eth0RegPlusEth1Unreg(t *testing.T) { - Given(t).NameToIdx(InMemory, map[MappingName]MappingIdx{eth0: idx1, eth1: idx2}). - When().Name(eth1).IsUnRegistered(). - Then().Name(eth1).MapsToNothing(). - And().Notification(eth1, Del).IsExpectedFor(idx2). - And().Name(eth0).MapsTo(idx1). - And().Notification(eth0, Write).IsNotExpected() //because watch is registered after given keyword -} - -func Test04RegTwiceSameNameWithDifferentIdx(t *testing.T) { - Given(t).NameToIdx(InMemory, nil). - When().Name(eth1).IsRegistered(idx1). - Then().Name(eth1).MapsTo(idx1). //Notif eth1, idx1 - And().Notification(eth1, Write).IsExpectedFor(idx1). - When().Name(eth1).IsRegistered(idx2). - Then().Name(eth1).MapsTo(idx2). //Notif eth1, idx1 - And().Notification(eth1, Write).IsExpectedFor(idx2) -} - -const ( - flagMetaKey = "flag" - valsMetaKey = "vals" -) - -type metaInformation struct { - flag bool - vals []string -} - -func createIdx(meta interface{}) map[string][]string { - typed, ok := meta.(*metaInformation) - if !ok { - return nil - } - - return map[string][]string{ - flagMetaKey: {strconv.FormatBool(typed.flag)}, - valsMetaKey: typed.vals, - } -} - -func TestIndexedMetadata(t *testing.T) { - gomega.RegisterTestingT(t) - idxm := NewNameToIdx(logrus.DefaultLogger(), "title", createIdx) - - res := idxm.LookupNameByMetadata(flagMetaKey, "true") - gomega.Expect(res).To(gomega.BeNil()) - - meta1 := &metaInformation{ - flag: true, - vals: []string{"abc", "def", "xyz"}, - } - meta2 := &metaInformation{ - flag: false, - vals: []string{"abc", "klm", "opq"}, - } - meta3 := &metaInformation{ - flag: true, - vals: []string{"jkl"}, - } - - idxm.RegisterName(string(eth0), idx1, meta1) - idxm.RegisterName(string(eth1), idx2, meta2) - idxm.RegisterName(string(eth2), idx3, meta3) - - res = idxm.LookupNameByMetadata(flagMetaKey, "false") - gomega.Expect(res).NotTo(gomega.BeNil()) - gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth1)) - - res = idxm.LookupNameByMetadata(flagMetaKey, "true") - gomega.Expect(len(res)).To(gomega.BeEquivalentTo(2)) - gomega.Expect(res).To(gomega.ContainElement(string(eth0))) - gomega.Expect(res).To(gomega.ContainElement(string(eth2))) - - res = idxm.LookupNameByMetadata(valsMetaKey, "abc") - gomega.Expect(len(res)).To(gomega.BeEquivalentTo(2)) - gomega.Expect(res).To(gomega.ContainElement(string(eth0))) - gomega.Expect(res).To(gomega.ContainElement(string(eth1))) - - res = idxm.LookupNameByMetadata(valsMetaKey, "jkl") - gomega.Expect(len(res)).To(gomega.BeEquivalentTo(1)) - gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth2)) - - idxm.UnregisterName(string(eth0)) - res = idxm.LookupNameByMetadata(flagMetaKey, "true") - gomega.Expect(len(res)).To(gomega.BeEquivalentTo(1)) - gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth2)) - -} - -func TestOldIndexRemove(t *testing.T) { - gomega.RegisterTestingT(t) - idxm := NewNameToIdx(logrus.DefaultLogger(), "title", nil) - - idxm.RegisterName(string(eth0), idx1, nil) - - idx, _, found := idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx1)) - - name, _, found := idxm.LookupName(idx1) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(name).To(gomega.BeEquivalentTo(string(name))) - - idxm.RegisterName(string(eth0), idx2, nil) - - idx, _, found = idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx2)) - - name, _, found = idxm.LookupName(idx2) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(name).To(gomega.BeEquivalentTo(string(name))) - - name, _, found = idxm.LookupName(idx1) - gomega.Expect(found).To(gomega.BeFalse()) - gomega.Expect(name).To(gomega.BeEquivalentTo("")) -} - -func TestUpdateMetadata(t *testing.T) { - gomega.RegisterTestingT(t) - idxm := NewNameToIdx(logrus.DefaultLogger(), "title", nil) - - idxm.RegisterName(string(eth0), idx1, nil) - - idx, meta, found := idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx1)) - gomega.Expect(meta).To(gomega.BeNil()) - - success := idxm.UpdateMetadata(string(eth0), "dummy-meta") - gomega.Expect(success).To(gomega.BeTrue()) - - idx, meta, found = idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx1)) - gomega.Expect(meta).ToNot(gomega.BeNil()) -} - -func TestClearMapping(t *testing.T) { - gomega.RegisterTestingT(t) - idxm := NewNameToIdx(logrus.DefaultLogger(), "title", nil) - - idxm.RegisterName(string(eth0), idx1, nil) - idxm.RegisterName(string(eth1), idx2, nil) - idxm.RegisterName(string(eth2), idx3, nil) - - idx, _, found := idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx1)) - - idx, _, found = idxm.LookupIdx(string(eth1)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx2)) - - idx, _, found = idxm.LookupIdx(string(eth2)) - gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(idx).To(gomega.BeEquivalentTo(idx3)) - - idxm.Clear() - - _, _, found = idxm.LookupIdx(string(eth0)) - gomega.Expect(found).To(gomega.BeFalse()) - - _, _, found = idxm.LookupIdx(string(eth1)) - gomega.Expect(found).To(gomega.BeFalse()) - - _, _, found = idxm.LookupIdx(string(eth2)) - gomega.Expect(found).To(gomega.BeFalse()) -} diff --git a/idxvpp/nametoidx/testing.go b/idxvpp/nametoidx/testing.go deleted file mode 100644 index f397f5f3ac..0000000000 --- a/idxvpp/nametoidx/testing.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nametoidx - -import ( - "github.com/ligato/vpp-agent/idxvpp" - - "testing" - "time" - - . "github.com/onsi/gomega" -) - -// Factory defines type of a function used to create new instances of a name-to-index mapping. -type Factory func(reloaded bool) (idxvpp.NameToIdxRW, error) - -// MappingName defines type for names in the mappings. -type MappingName string - -// MappingIdx defines type for indexes in the mappings. -type MappingIdx uint32 - -// GivenKW defines the initial state of a testing scenario. -type GivenKW struct { - nameToIdxFactory Factory - plug1NameIdx idxvpp.NameToIdxRW - plug1NameIdxChan chan idxvpp.NameToIdxDto -} - -// When defines the actions/changes done to the tested registry. -type When struct { - given *GivenKW -} - -// Then defines the actions/changes expected from the tested registry. -type Then struct { - when *When -} - -// WhenName defines the actions/changes done to a registry for a given name. -type WhenName struct { - when *When - name MappingName -} - -// ThenName defines actions/changes expected from the registry for a given name. -type ThenName struct { - then *Then - name MappingName -} - -// Given prepares the initial state of a testing scenario. -func Given(t *testing.T) *GivenKW { - RegisterTestingT(t) - - return &GivenKW{} -} - -// When starts when-clause. -func (given *GivenKW) When() *When { - return &When{given: given} -} - -// NameToIdx sets up a given registry for the tested scenario. -func (given *GivenKW) NameToIdx(idxMapFactory Factory, reg map[MappingName]MappingIdx) *GivenKW { - Expect(given.nameToIdxFactory).Should(BeNil()) - Expect(given.plug1NameIdx).Should(BeNil()) - var err error - given.nameToIdxFactory = idxMapFactory - given.plug1NameIdx, err = idxMapFactory(false) - Expect(err).Should(BeNil()) - - for name, idx := range reg { - n := string(name) - given.plug1NameIdx.RegisterName(n, uint32(idx), nil) - } - - // Registration of given mappings is done before watch (therefore there will be no notifications). - given.watchNameIdx() - return given -} - -func (given *GivenKW) watchNameIdx() { - plug1NameIdxChan := make(chan idxvpp.NameToIdxDto, 1000) - given.plug1NameIdx.Watch("plugin2", ToChan(plug1NameIdxChan)) - given.plug1NameIdxChan = make(chan idxvpp.NameToIdxDto, 1000) - go func() { - for { - v := <-plug1NameIdxChan - given.plug1NameIdxChan <- v - v.Done() // We can mark event as processed by calling Done() because we want to have events buffered in given.plug1NameIdxChan (because of assertions). - } - }() - -} - -// Then starts a then-clause. -func (when *When) Then() *Then { - return &Then{when: when} -} - -// NameToIdxIsReloaded simulates a full registry reload. -func (when *When) NameToIdxIsReloaded() *When { - Expect(when.given.nameToIdxFactory).ShouldNot(BeNil()) - Expect(when.given.plug1NameIdx).ShouldNot(BeNil()) - - when.given.plug1NameIdx = nil - - var err error - when.given.plug1NameIdx, err = when.given.nameToIdxFactory(true) - Expect(err).Should(BeNil()) - return when -} - -// Name associates when-clause with a given name in the registry. -func (when *When) Name(name MappingName) *WhenName { - return &WhenName{when: when, name: name} -} - -// IsUnRegistered un-registers a given name from the registry. -func (whenName *WhenName) IsUnRegistered() *WhenName { - name := string(whenName.name) - whenName.when.given.plug1NameIdx.UnregisterName(name) - - return whenName -} - -// Then starts a then-clause. -func (whenName *WhenName) Then() *Then { - return &Then{when: whenName.when} -} - -// IsRegistered registers a given name-index pair into the registry. -func (whenName *WhenName) IsRegistered(idx MappingIdx) *WhenName { - name := string(whenName.name) - whenName.when.given.plug1NameIdx.RegisterName(name, uint32(idx), nil) - return whenName -} - -// And connects two when-clauses. -func (whenName *WhenName) And() *When { - return whenName.when -} - -// Name associates then-clause with a given name in the registry. -func (then *Then) Name(name MappingName) *ThenName { - return &ThenName{then: then, name: name} -} - -// MapsToNothing verifies that a given name really maps to nothing. -func (thenName *ThenName) MapsToNothing() *ThenName { - name := string(thenName.name) - _, _, exist := thenName.then.when.given.plug1NameIdx.LookupIdx(name) - Expect(exist).Should(BeFalse()) - - return thenName -} - -//MapsTo asserts the response of LookupIdx, LookupName and message in the channel. -func (thenName *ThenName) MapsTo(expectedIdx MappingIdx) *ThenName { - name := string(thenName.name) - retIdx, _, exist := thenName.then.when.given.plug1NameIdx.LookupIdx(name) - Expect(exist).Should(BeTrue()) - Expect(retIdx).Should(Equal(uint32(expectedIdx))) - - retName, _, exist := thenName.then.when.given.plug1NameIdx.LookupName(retIdx) - Expect(exist).Should(BeTrue()) - Expect(retName).ShouldNot(BeNil()) - Expect(retName).Should(Equal(name)) - - return thenName -} - -// Name associates then-clause with a given name in the registry. -func (thenName *ThenName) Name(name MappingName) *ThenName { - return &ThenName{then: thenName.then, name: name} -} - -// And connects two then-clauses. -func (thenName *ThenName) And() *Then { - return thenName.then -} - -// When starts a when-clause. -func (thenName *ThenName) When() *When { - return thenName.then.when -} - -// ThenNotification defines notification parameters for a then-clause. -type ThenNotification struct { - then *Then - name MappingName - del DelWriteEnum -} - -// DelWriteEnum defines type for the flag used to tell if a mapping was removed or not. -type DelWriteEnum bool - -// Del defines the value of a notification flag used when a mapping was removed. -const Del DelWriteEnum = true - -// Write defines the value of a notification flag used when a mapping was created. -const Write DelWriteEnum = false - -// Notification starts a section of then-clause referring to a given notification. -func (then *Then) Notification(name MappingName, del DelWriteEnum) *ThenNotification { - return &ThenNotification{then: then, name: name, del: del} -} - -// IsNotExpected verifies that a given notification was indeed NOT received. -func (thenNotif *ThenNotification) IsNotExpected() *ThenNotification { - _, exist := thenNotif.receiveChan() - Expect(exist).Should(BeFalse()) - return thenNotif -} - -// IsExpectedFor verifies that a given notification was really received. -func (thenNotif *ThenNotification) IsExpectedFor(idx MappingIdx) *ThenNotification { - notif, exist := thenNotif.receiveChan() - Expect(exist).Should(BeTrue()) - //Expect(notif.Idx).Should(BeEquivalentTo(uint32(int))) - Expect(notif.Del).Should(BeEquivalentTo(bool(thenNotif.del))) - return thenNotif -} - -// And connects two then-clauses. -func (thenNotif *ThenNotification) And() *Then { - return thenNotif.then -} - -// When starts a when-clause. -func (thenNotif *ThenNotification) When() *When { - return thenNotif.then.when -} - -func (thenNotif *ThenNotification) receiveChan() (*idxvpp.NameToIdxDto, bool) { - ch := thenNotif.then.when.given.plug1NameIdxChan - var x idxvpp.NameToIdxDto - select { - case x = <-ch: - return &x, true - case <-time.After(time.Second * 1): - return nil, false - } -} diff --git a/idxvpp/persist/persistent_name_mapping.go b/idxvpp/persist/persistent_name_mapping.go deleted file mode 100644 index 5d45bfad26..0000000000 --- a/idxvpp/persist/persistent_name_mapping.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package persist asynchronously writes changes in the map (name->idx) to -// file. -package persist - -import ( - "encoding/json" - "flag" - "io/ioutil" - "math/rand" - "os" - "path" - "sync" - "time" - - "github.com/ligato/cn-infra/logging" - log "github.com/ligato/cn-infra/logging/logrus" - "github.com/ligato/vpp-agent/idxvpp" - "github.com/ligato/vpp-agent/idxvpp/nametoidx" -) - -var ( - // configuration file path - idxMapConfigFile string - - // configuration common to all mappings - gConfig *nametoidx.Config -) - -// init serves for parsing the program's arguments. -func init() { - flag.StringVar(&idxMapConfigFile, "idxmap-config", "", - "Location of the configuration file for index-to-name maps; also set via 'IDXMAP_CONFIG' env variable.") - - if gConfig == nil { - var err error - gConfig, err = nametoidx.ConfigFromFile(idxMapConfigFile) - if err != nil { - log.DefaultLogger().WithFields(logging.Fields{"filepath": idxMapConfigFile, "err": err}).Warn( - "Failed to load idxmap configuration file") - } else { - log.DefaultLogger().WithFields(logging.Fields{"filepath": idxMapConfigFile}).Debug( - "Loaded idxmap configuration file") - } - } -} - -// Marshalling loads the config and starts watching for changes. -func Marshalling(agentLabel string, idxMap idxvpp.NameToIdx, loadedFromFile idxvpp.NameToIdxRW) error { - log.DefaultLogger().Debug("Persistence") - - changes := make(chan idxvpp.NameToIdxDto, 1000) - fileName := idxMap.GetRegistryTitle() + ".json" - persist := NewNameToIdxPersist(fileName, gConfig, agentLabel, changes) - err := persist.Init() - if err != nil { - return err - } - - idxMap.Watch("idxpersist", nametoidx.ToChan(changes)) - - err = persist.loadIdxMapFile(loadedFromFile) - if err != nil { - return err - } - - return nil -} - -// NameToIdxPersist is a decorator for NameToIdxRW implementing persistent storage. -type NameToIdxPersist struct { - // registrations notifies about the changes made in the mapping to be persisted - registrations chan idxvpp.NameToIdxDto - - // Configuration associated with this mapping. - // Unless this is a unit test, it is the same as the global configuration. - config *nametoidx.Config - - // (de)serialization of mapping data - nameToIdx map[string]uint32 - - // persistent storage location - fileDir string - filePath string - - // Synchronization between the underlying registry and the persistent storage. - syncLock sync.Mutex - syncCh chan bool - syncAckCh chan error -} - -// NewNameToIdxPersist initializes decorator for persistent storage of index-to-name mapping. -func NewNameToIdxPersist(fileName string, config *nametoidx.Config, namespace string, - registrations chan idxvpp.NameToIdxDto) *NameToIdxPersist { - - persist := NameToIdxPersist{} - persist.config = config - persist.nameToIdx = map[string]uint32{} - - persist.fileDir = path.Join(persist.config.PersistentStorage.Location, namespace) - persist.filePath = path.Join(persist.fileDir, fileName) - - persist.registrations = registrations - - return &persist -} - -// Init starts Go routine that watches chan idxvpp.NameToIdxDto. -func (persist *NameToIdxPersist) Init() error { - persist.syncCh = make(chan bool) - persist.syncAckCh = make(chan error) - - offset := rand.Int63n(int64(persist.config.PersistentStorage.MaxSyncStartDelay)) - go persist.periodicIdxMapSync(time.Duration(offset)) - - return nil -} - -// loadIdxMapFile loads persistently stored entries of the associated registry. -func (persist *NameToIdxPersist) loadIdxMapFile(loadedFromFile idxvpp.NameToIdxRW) error { - if _, err := os.Stat(persist.filePath); os.IsNotExist(err) { - log.DefaultLogger().WithFields(logging.Fields{"Filepath": persist.filePath}).Debug( - "Persistent storage for name-to-index mapping doesn't exist yet") - return nil - } - idxMapData, err := ioutil.ReadFile(persist.filePath) - if err != nil { - return err - } - - err = json.Unmarshal(idxMapData, &persist.nameToIdx) - if err != nil { - return err - } - - for name, idx := range persist.nameToIdx { - loadedFromFile.RegisterName(name, idx, nil) - } - return nil -} - -// periodicIdxMapSync periodically synchronizes the underlying registry with the persistent storage. -func (persist *NameToIdxPersist) periodicIdxMapSync(offset time.Duration) error { - for { - select { - case reg := <-persist.registrations: - if reg.Del { - persist.unregisterName(reg.Name) - } else { - persist.registerName(reg.Name, reg.Idx) - } - case <-persist.syncCh: - persist.syncAckCh <- persist.syncMapping() - case <-time.After(persist.config.PersistentStorage.SyncInterval + offset): - offset = 0 - err := persist.syncMapping() - if err != nil { - log.DefaultLogger().WithFields(logging.Fields{"Error": err, "Filepath": persist.filePath}).Error( - "Failed to sync idxMap with the persistent storage") - } - } - } -} - -// syncMapping updates the persistent storage with the new mappings. -// Current implementation simply re-builds the file content from the scratch. -// TODO: NICE-TO-HAVE incremental update -func (persist *NameToIdxPersist) syncMapping() error { - persist.syncLock.Lock() - defer persist.syncLock.Unlock() - - idxMapData, err := json.Marshal(persist.nameToIdx) - if err != nil { - return err - } - - err = os.MkdirAll(persist.fileDir, 0777) - if err != nil { - return err - } - - //log.Debug("Persist len=", len(persist.nameToIdx)," ", persist.filePath) - - return ioutil.WriteFile(persist.filePath, idxMapData, 0644) -} - -// RegisterName from NameToIdxPersist allows to add a name-to-index mapping into both the underlying registry and -// the persistent storage (with some delay in synchronization). -func (persist *NameToIdxPersist) registerName(name string, idx uint32) { - persist.nameToIdx[name] = idx -} - -// UnregisterName from NameToIdxPersist allows to remove mapping from both -// the underlying registry and the persistent storage. -func (persist *NameToIdxPersist) unregisterName(name string) { - delete(persist.nameToIdx, name) -} - -// Close triggers explicit synchronization and closes the underlying mapping. -func (persist *NameToIdxPersist) Close() error { - err := persist.Sync() - if err != nil { - return err - } - return nil -} - -// Sync triggers immediate synchronization between the underlying registry and the persistent storage. -// The function doesn't return until the operation has fully finished. -func (persist *NameToIdxPersist) Sync() error { - persist.syncCh <- true - return <-persist.syncAckCh -} diff --git a/pkg/idxvpp/README.md b/pkg/idxvpp/README.md new file mode 100644 index 0000000000..dbe06560f7 --- /dev/null +++ b/pkg/idxvpp/README.md @@ -0,0 +1,103 @@ +# NameToIndex + +Note: idxvpp package will completely replace idxvpp once all plugins are based on +`KVScheduler`. + +The NameToIndex mapping is an extension of the NamedMapping mapping. It is +used by VPP Agent plugins that interact with VPP/Linux to map between items +with integer handles and the string-based object identifiers used by northbound +clients of the Agent. + +The mappings are primarily used to match VPP dumps with the northbound +configuration. This is essential for the re-configuration and state +re-synchronization after failures. +Furthermore, a mapping registry may be shared between plugins. +For example, `ifplugin` exposes a `sw_if_index->iface_meta` mapping (extended +`NameToIndex`) so that other plugins may reference interfaces from objects +that depend on them, such as bridge domains or IP routes. + +**API** + +Every plugin is allowed to allocate a new mapping using the function +`NewNameToIndex(logger, title, indexfunction)`, giving in-memory-only +storage capabilities. Specifying `indexFunction` allows to add user-defined +secondary indices. + +The `NameToIndexRW` interface supports read and write operations. While the +registry owner is allowed to do both reads and writes, only the read +interface `NameToIndex` is typically exposed to other plugins. + +The read-only interface provides item-by-name and item-by-index look-ups using +the `LookupByName` and `LookupByIndex` functions, respectively. Additionally, +a client can use the `WatchItems` function to watch for changes in the registry +related to items with integer handles. The registry owner can change the mapping +content using the `Put/Delete/Update` functions from the underlying NamedMapping. + +**KVScheduler-owned mapping** + +Plugins configuring VPP items via `KVScheduler` (`ligato/cn-infra/kvscheduler`), +are able to let the scheduler to keep the mapping of item metadata up-to-date. +`WithMetadata()` function of `KVDescriptor` is used to enable/disable +the scheduler-managed mapping for item metadata. Normally, the scheduler uses +the basic `NamedMapping` to keep the association between item name and item +metadata. Descriptor, however, may provide a mapping factory, building mapping +with customized secondary indexes - like `NameToIndex` or its extensions. +The mapping is then available for reading to everyone via scheduler's method +`GetMetadataMap(descriptor)`. For mappings customized using the factory, +the returned `NamedMapping` can be then further casted to interface exposing +the extra look-ups, but keeping the access read-only. + +*Example* + +Here are some simplified code snippets from `ifplugin` showing how descriptor +can define mapping factory for the scheduler, and how the plugin then propagates +a read-only access to the mapping, including the extra secondary indexes: + +``` +// ifaceidx extends NameToIndex with IP lookups (for full code see plugins/vpp/ifplugin/ifaceidx2): + +type IfaceMetadataIndex interface { + LookupByName(name string) (metadata *IfaceMetadata, exists bool) + LookupBySwIfIndex(swIfIndex uint32) (name string, metadata *IfaceMetadata, exists bool) + LookupByIP(ip string) []string /* name */ + WatchInterfaces(subscriber string, channel chan<- IfaceMetadataDto) +} + +type IfaceMetadata struct { + SwIfIndex uint32 + IpAddresses []string +} + +// In descriptor: + +func (intfd *IntfDescriptorImpl) WithMetadata() (withMeta bool, customMapFactory kvscheduler.MetadataMapFactory) { + return true, func() idxmap.NamedMappingRW { + return ifaceidx.NewIfaceIndex(logrus.DefaultLogger(), "interface-index") + } +} + +// In ifplugin API: + +type IfPlugin struct { + Deps + + intfIndex ifaceidx.IfaceMetadataIndex +} + +func (p *IfPlugin) Init() error { + descriptor := adapter.NewIntfDescriptor(&descriptor.IntfDescriptorImpl{}) + p.Deps.Scheduler.RegisterKVDescriptor(descriptor) + + var withIndex bool + metadataMap := p.Deps.Scheduler.GetMetadataMap(descriptor.GetName()) + p.intfIndex, withIndex = metadataMap.(ifaceidx.IfaceMetadataIndex) + if !withIndex { + return errors.New("missing index with interface metadata") + } + return nil +} + +func (p *IfPlugin) GetInterfaceIndex() ifaceidx.IfaceMetadataIndex { + return p.intfIndex +} +``` diff --git a/pkg/idxvpp/clauses_test.go b/pkg/idxvpp/clauses_test.go new file mode 100644 index 0000000000..d3013e6937 --- /dev/null +++ b/pkg/idxvpp/clauses_test.go @@ -0,0 +1,226 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idxvpp + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" +) + +// Factory defines type of a function used to create new instances of a NameToIndex mapping. +type Factory func() (NameToIndexRW, error) + +// GivenKW defines the initial state of a testing scenario. +type GivenKW struct { + nameToIndexFactory Factory + nameToIndex NameToIndexRW + nameToIndexChan chan NameToIndexDto +} + +// When defines the actions/changes done to the tested registry. +type When struct { + given *GivenKW +} + +// Then defines the actions/changes expected from the tested registry. +type Then struct { + when *When +} + +// WhenName defines the actions/changes done to a registry for a given name. +type WhenName struct { + when *When + name string +} + +// ThenName defines actions/changes expected from the registry for a given name. +type ThenName struct { + then *Then + name string +} + +// Given prepares the initial state of a testing scenario. +func Given(t *testing.T) *GivenKW { + RegisterTestingT(t) + + return &GivenKW{} +} + +// When starts when-clause. +func (given *GivenKW) When() *When { + return &When{given: given} +} + +// NameToIdx sets up a given registry for the tested scenario. +func (given *GivenKW) NameToIdx(idxMapFactory Factory, reg map[string]uint32) *GivenKW { + Expect(given.nameToIndexFactory).Should(BeNil()) + Expect(given.nameToIndex).Should(BeNil()) + var err error + given.nameToIndexFactory = idxMapFactory + given.nameToIndex, err = idxMapFactory() + Expect(err).Should(BeNil()) + + for name, idx := range reg { + given.nameToIndex.Put(name, &OnlyIndex{idx}) + } + + // Registration of given mappings is done before watch (therefore there will be no notifications). + given.watchNameIdx() + return given +} + +func (given *GivenKW) watchNameIdx() { + given.nameToIndexChan = make(chan NameToIndexDto, 1000) + given.nameToIndex.WatchItems("plugin2", given.nameToIndexChan) +} + +// Then starts a then-clause. +func (when *When) Then() *Then { + return &Then{when: when} +} + +// Name associates when-clause with a given name in the registry. +func (when *When) Name(name string) *WhenName { + return &WhenName{when: when, name: name} +} + +// IsDeleted removes a given name from the registry. +func (whenName *WhenName) IsDeleted() *WhenName { + name := string(whenName.name) + whenName.when.given.nameToIndex.Delete(name) + + return whenName +} + +// Then starts a then-clause. +func (whenName *WhenName) Then() *Then { + return &Then{when: whenName.when} +} + +// IsAdded adds a given name-index pair into the registry. +func (whenName *WhenName) IsAdded(idx uint32) *WhenName { + name := string(whenName.name) + whenName.when.given.nameToIndex.Put(name, &OnlyIndex{idx}) + return whenName +} + +// And connects two when-clauses. +func (whenName *WhenName) And() *When { + return whenName.when +} + +// Name associates then-clause with a given name in the registry. +func (then *Then) Name(name string) *ThenName { + return &ThenName{then: then, name: name} +} + +// MapsToNothing verifies that a given name really maps to nothing. +func (thenName *ThenName) MapsToNothing() *ThenName { + name := string(thenName.name) + _, exist := thenName.then.when.given.nameToIndex.LookupByName(name) + Expect(exist).Should(BeFalse()) + + return thenName +} + +//MapsTo asserts the response of LookupIdx, LookupName and message in the channel. +func (thenName *ThenName) MapsTo(expectedIdx uint32) *ThenName { + name := string(thenName.name) + item, exist := thenName.then.when.given.nameToIndex.LookupByName(name) + Expect(exist).Should(BeTrue()) + Expect(item.GetIndex()).Should(Equal(uint32(expectedIdx))) + + retName, _, exist := thenName.then.when.given.nameToIndex.LookupByIndex(item.GetIndex()) + Expect(exist).Should(BeTrue()) + Expect(retName).ShouldNot(BeNil()) + Expect(retName).Should(Equal(name)) + + return thenName +} + +// Name associates then-clause with a given name in the registry. +func (thenName *ThenName) Name(name string) *ThenName { + return &ThenName{then: thenName.then, name: name} +} + +// And connects two then-clauses. +func (thenName *ThenName) And() *Then { + return thenName.then +} + +// When starts a when-clause. +func (thenName *ThenName) When() *When { + return thenName.then.when +} + +// ThenNotification defines notification parameters for a then-clause. +type ThenNotification struct { + then *Then + name string + del DelWriteEnum +} + +// DelWriteEnum defines type for the flag used to tell if a mapping was removed or not. +type DelWriteEnum bool + +// Del defines the value of a notification flag used when a mapping was removed. +const Del DelWriteEnum = true + +// Write defines the value of a notification flag used when a mapping was created. +const Write DelWriteEnum = false + +// Notification starts a section of then-clause referring to a given notification. +func (then *Then) Notification(name string, del DelWriteEnum) *ThenNotification { + return &ThenNotification{then: then, name: name, del: del} +} + +// IsNotExpected verifies that a given notification was indeed NOT received. +func (thenNotif *ThenNotification) IsNotExpected() *ThenNotification { + _, exist := thenNotif.receiveChan() + Expect(exist).Should(BeFalse()) + return thenNotif +} + +// IsExpectedFor verifies that a given notification was really received. +func (thenNotif *ThenNotification) IsExpectedFor(idx uint32) *ThenNotification { + notif, exist := thenNotif.receiveChan() + Expect(exist).Should(BeTrue()) + Expect(notif.Item.GetIndex()).Should(BeEquivalentTo(uint32(idx))) + Expect(notif.Del).Should(BeEquivalentTo(bool(thenNotif.del))) + return thenNotif +} + +// And connects two then-clauses. +func (thenNotif *ThenNotification) And() *Then { + return thenNotif.then +} + +// When starts a when-clause. +func (thenNotif *ThenNotification) When() *When { + return thenNotif.then.when +} + +func (thenNotif *ThenNotification) receiveChan() (*NameToIndexDto, bool) { + ch := thenNotif.then.when.given.nameToIndexChan + var x NameToIndexDto + select { + case x = <-ch: + return &x, true + case <-time.After(time.Second * 1): + return nil, false + } +} diff --git a/pkg/idxvpp/doc.go b/pkg/idxvpp/doc.go new file mode 100644 index 0000000000..e2cdb8b48f --- /dev/null +++ b/pkg/idxvpp/doc.go @@ -0,0 +1,4 @@ +// Package idxvpp extends NamedMapping from cn-infra to provide a map between +// VPP/Linux items with integer handles and northbound string-based identifiers +// (logical names). +package idxvpp diff --git a/pkg/idxvpp/idxvpp.go b/pkg/idxvpp/idxvpp.go new file mode 100644 index 0000000000..269fee8602 --- /dev/null +++ b/pkg/idxvpp/idxvpp.go @@ -0,0 +1,173 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idxvpp + +import ( + "strconv" + "time" + + "github.com/ligato/cn-infra/idxmap" + "github.com/ligato/cn-infra/idxmap/mem" + "github.com/ligato/cn-infra/logging" +) + +// WithIndex is interface that items with integer handle must implement to get +// indexed by NameToIndex. +type WithIndex interface { + // GetIndex should return integer handle assigned to the item. + GetIndex() uint32 +} + +// NameToIndex is the "user API" to the registry of items with integer handles. +// It provides read-only access intended for plugins that need to do the conversions +// between logical names from NB and VPP/Linux item IDs. +type NameToIndex interface { + // LookupByName retrieves a previously stored item identified by + // . If there is no item associated with the give name in the mapping, + // the is returned as *false* and as *nil*. + LookupByName(name string) (item WithIndex, exists bool) + + // LookupByIndex retrieves a previously stored item identified in VPP/Linux + // by the given . + // If there is no item associated with the given index, is returned + // as *false* with and both set to empty values. + LookupByIndex(index uint32) (name string, item WithIndex, exists bool) + + // WatchItems subscribes to receive notifications about the changes in the + // mapping related to items with integer handles. + WatchItems(subscriber string, channel chan<- NameToIndexDto) +} + +// NameToIndexRW is the "owner API" to the NameToIndex registry. Using this +// API the owner is able to add/update and delete associations between logical +// names and VPP/Linux items identified by integer handles. +type NameToIndexRW interface { + NameToIndex + idxmap.NamedMappingRW +} + +// OnlyIndex can be used to add items into NameToIndex with the integer handle +// as the only information associated with each item. +type OnlyIndex struct { + Index uint32 +} + +// GetIndex returns index assigned to the item. +func (item *OnlyIndex) GetIndex() uint32 { + return item.Index +} + +// NameToIndexDto represents an item sent through watch channel in NameToIndex. +// In contrast to NamedMappingGenericEvent, it contains item casted to WithIndex. +type NameToIndexDto struct { + idxmap.NamedMappingEvent + Item WithIndex +} + +// nameToIndex implements NamedMapping for items with integer handles. +type nameToIndex struct { + idxmap.NamedMappingRW + log logging.Logger +} + +const ( + // indexKey is a secondary index used to create association between + // item name and the integer handle. + indexKey = "index" +) + +// NewNameToIndex creates a new instance implementing NameToIndexRW. +// User can optionally extend the secondary indexes through . +func NewNameToIndex(logger logging.Logger, title string, + indexFunction mem.IndexFunction) NameToIndexRW { + return &nameToIndex{ + NamedMappingRW: mem.NewNamedMapping(logger, title, + func(item interface{}) map[string][]string { + idxs := internalIndexFunction(item) + + if indexFunction != nil { + userIdxs := indexFunction(item) + for k, v := range userIdxs { + idxs[k] = v + } + } + return idxs + }), + } +} + +// LookupByName retrieves a previously stored item identified by +// . If there is no item associated with the give name in the mapping, +// the is returned as *false* and as *nil*. +func (idx *nameToIndex) LookupByName(name string) (item WithIndex, exists bool) { + value, found := idx.GetValue(name) + if found { + if itemWithIndex, ok := value.(WithIndex); ok { + return itemWithIndex, found + } + } + return nil, false +} + +// LookupByIndex retrieves a previously stored item identified in VPP/Linux +// by the given . +// If there is no item associated with the given index, is returned +// as *false* with and both set to empty values. +func (idx *nameToIndex) LookupByIndex(index uint32) (name string, item WithIndex, exists bool) { + res := idx.ListNames(indexKey, strconv.FormatUint(uint64(index), 10)) + if len(res) != 1 { + return + } + value, found := idx.GetValue(res[0]) + if found { + if itemWithIndex, ok := value.(WithIndex); ok { + return res[0], itemWithIndex, found + } + } + return +} + +// WatchItems subscribes to receive notifications about the changes in the +// mapping related to items with integer handles. +func (idx *nameToIndex) WatchItems(subscriber string, channel chan<- NameToIndexDto) { + watcher := func(dto idxmap.NamedMappingGenericEvent) { + itemWithIndex, ok := dto.Value.(WithIndex) + if !ok { + return + } + msg := NameToIndexDto{ + NamedMappingEvent: dto.NamedMappingEvent, + Item: itemWithIndex, + } + select { + case channel <- msg: + case <-time.After(idxmap.DefaultNotifTimeout): + idx.log.Warn("Unable to deliver notification") + } + } + idx.Watch(subscriber, watcher) +} + +// internalIndexFunction is an index function used internally for nameToIndex. +func internalIndexFunction(item interface{}) map[string][]string { + indexes := map[string][]string{} + itemWithIndex, ok := item.(WithIndex) + if !ok || itemWithIndex == nil { + return indexes + } + + indexes[indexKey] = []string{strconv.FormatUint(uint64(itemWithIndex.GetIndex()), 10)} + return indexes +} diff --git a/pkg/idxvpp/idxvpp_test.go b/pkg/idxvpp/idxvpp_test.go new file mode 100644 index 0000000000..34d84e9abd --- /dev/null +++ b/pkg/idxvpp/idxvpp_test.go @@ -0,0 +1,240 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package idxvpp + +import ( + "testing" + + "strconv" + + "github.com/ligato/cn-infra/logging/logrus" + "github.com/onsi/gomega" +) + +const ( + idx1 = 1 + idx2 = 2 + idx3 = 3 +) + +var ( + eth0 = "eth0" + eth1 = "eth1" + eth2 = "eth2" +) + +func IndexFactory() (NameToIndexRW, error) { + return NewNameToIndex(logrus.DefaultLogger(), "test", nil), nil +} + +func Test01UnregisteredMapsToNothing(t *testing.T) { + Given(t).NameToIdx(IndexFactory, nil). + When().Name(eth1).IsDeleted(). + Then().Name(eth1).MapsToNothing(). + And().Notification(eth1, Write).IsNotExpected() +} + +func Test02RegisteredReturnsIdx(t *testing.T) { + Given(t).NameToIdx(IndexFactory, nil). + When().Name(eth1).IsAdded(idx1). + Then().Name(eth1).MapsTo(idx1). + And().Notification(eth1, Write).IsExpectedFor(idx1) +} + +func Test03RegFirstThenUnreg(t *testing.T) { + Given(t).NameToIdx(IndexFactory, map[string]uint32{eth1: idx1}). + When().Name(eth1).IsDeleted(). + Then().Name(eth1).MapsToNothing(). + And().Notification(eth1, Del).IsExpectedFor(idx1) +} + +func Test03Eth0RegPlusEth1Unreg(t *testing.T) { + Given(t).NameToIdx(IndexFactory, map[string]uint32{eth0: idx1, eth1: idx2}). + When().Name(eth1).IsDeleted(). + Then().Name(eth1).MapsToNothing(). + And().Notification(eth1, Del).IsExpectedFor(idx2). + And().Name(eth0).MapsTo(idx1). + And().Notification(eth0, Write).IsNotExpected() //because watch is registered after given keyword +} + +func Test04RegTwiceSameNameWithDifferentIdx(t *testing.T) { + Given(t).NameToIdx(IndexFactory, nil). + When().Name(eth1).IsAdded(idx1). + Then().Name(eth1).MapsTo(idx1). //Notif eth1, idx1 + And().Notification(eth1, Write).IsExpectedFor(idx1). + When().Name(eth1).IsAdded(idx2). + Then().Name(eth1).MapsTo(idx2). //Notif eth1, idx1 + And().Notification(eth1, Write).IsExpectedFor(idx2) +} + +const ( + flagKey = "flag" + valsKey = "vals" +) + +type Item struct { + index uint32 + flag bool + vals []string +} + +func (item *Item) GetIndex() uint32 { + return item.index +} + +func createIdx(item interface{}) map[string][]string { + typed, ok := item.(*Item) + if !ok { + return nil + } + + return map[string][]string{ + flagKey: {strconv.FormatBool(typed.flag)}, + valsKey: typed.vals, + } +} + +func TestIndexedMetadata(t *testing.T) { + gomega.RegisterTestingT(t) + idxm := NewNameToIndex(logrus.DefaultLogger(), "title", createIdx) + + res := idxm.ListNames(flagKey, "true") + gomega.Expect(res).To(gomega.BeNil()) + + item1 := &Item{ + index: idx1, + flag: true, + vals: []string{"abc", "def", "xyz"}, + } + item2 := &Item{ + index: idx2, + flag: false, + vals: []string{"abc", "klm", "opq"}, + } + item3 := &Item{ + index: idx3, + flag: true, + vals: []string{"jkl"}, + } + + idxm.Put(eth0, item1) + idxm.Put(eth1, item2) + idxm.Put(eth2, item3) + + res = idxm.ListNames(flagKey, "false") + gomega.Expect(res).NotTo(gomega.BeNil()) + gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth1)) + + res = idxm.ListNames(flagKey, "true") + gomega.Expect(len(res)).To(gomega.BeEquivalentTo(2)) + gomega.Expect(res).To(gomega.ContainElement(string(eth0))) + gomega.Expect(res).To(gomega.ContainElement(string(eth2))) + + res = idxm.ListNames(valsKey, "abc") + gomega.Expect(len(res)).To(gomega.BeEquivalentTo(2)) + gomega.Expect(res).To(gomega.ContainElement(string(eth0))) + gomega.Expect(res).To(gomega.ContainElement(string(eth1))) + + res = idxm.ListNames(valsKey, "jkl") + gomega.Expect(len(res)).To(gomega.BeEquivalentTo(1)) + gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth2)) + + idxm.Delete(eth0) + res = idxm.ListNames(flagKey, "true") + gomega.Expect(len(res)).To(gomega.BeEquivalentTo(1)) + gomega.Expect(res[0]).To(gomega.BeEquivalentTo(eth2)) + +} + +func TestOldIndexRemove(t *testing.T) { + gomega.RegisterTestingT(t) + idxm := NewNameToIndex(logrus.DefaultLogger(), "title", nil) + + idxm.Put(eth0, &OnlyIndex{idx1}) + + item, found := idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx1)) + + name, _, found := idxm.LookupByIndex(idx1) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(name).To(gomega.BeEquivalentTo(eth0)) + + idxm.Put(eth0, &OnlyIndex{idx2}) + + item, found = idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx2)) + + name, item, found = idxm.LookupByIndex(idx2) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(name).To(gomega.BeEquivalentTo(string(eth0))) + gomega.Expect(item).ToNot(gomega.BeNil()) + + name, item, found = idxm.LookupByIndex(idx1) + gomega.Expect(found).To(gomega.BeFalse()) + gomega.Expect(name).To(gomega.BeEquivalentTo("")) + gomega.Expect(item).To(gomega.BeNil()) +} + +func TestUpdateIndex(t *testing.T) { + gomega.RegisterTestingT(t) + idxm := NewNameToIndex(logrus.DefaultLogger(), "title", nil) + + idxm.Put(eth0, &OnlyIndex{idx1}) + + item, found := idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx1)) + + success := idxm.Update(eth0, &OnlyIndex{idx2}) + gomega.Expect(success).To(gomega.BeTrue()) + + item, found = idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx2)) +} + +func TestClearMapping(t *testing.T) { + gomega.RegisterTestingT(t) + idxm := NewNameToIndex(logrus.DefaultLogger(), "title", nil) + + idxm.Put(eth0, &OnlyIndex{idx1}) + idxm.Put(eth1, &OnlyIndex{idx2}) + idxm.Put(eth2, &OnlyIndex{idx3}) + + item, found := idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx1)) + + item, found = idxm.LookupByName(eth1) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx2)) + + item, found = idxm.LookupByName(eth2) + gomega.Expect(found).To(gomega.BeTrue()) + gomega.Expect(item.GetIndex()).To(gomega.BeEquivalentTo(idx3)) + + idxm.Clear() + + _, found = idxm.LookupByName(eth0) + gomega.Expect(found).To(gomega.BeFalse()) + + _, found = idxm.LookupByName(eth1) + gomega.Expect(found).To(gomega.BeFalse()) + + _, found = idxm.LookupByName(eth2) + gomega.Expect(found).To(gomega.BeFalse()) +} diff --git a/pkg/models/encoding.go b/pkg/models/encoding.go new file mode 100644 index 0000000000..5cc30a684e --- /dev/null +++ b/pkg/models/encoding.go @@ -0,0 +1,193 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "fmt" + "reflect" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + + api "github.com/ligato/vpp-agent/api/genericmanager" + "github.com/ligato/cn-infra/datasync" +) + +// This constant is used as prefix for TypeUrl when marshalling to Any. +const ligatoModels = "models.ligato.io/" + +func UnmarshalLazyValue(key string, lazy datasync.LazyValue) (proto.Message, error) { + for _, model := range registeredModels { + if !model.IsKeyValid(key) { + continue + } + valueType := proto.MessageType(model.ProtoName()) + if valueType == nil { + return nil, fmt.Errorf("unknown proto message defined for key %s", key) + } + value := reflect.New(valueType.Elem()).Interface().(proto.Message) + // try to deserialize the value + err := lazy.GetValue(value) + if err != nil { + return nil, err + } + return value, nil + } + return nil, fmt.Errorf("no model registered for key %s", key) +} + +// Unmarshal is helper function for unmarshalling items. +func UnmarshalItem(m *api.Item) (proto.Message, error) { + protoName, err := types.AnyMessageName(m.GetData().GetAny()) + if err != nil { + return nil, err + } + model, found := registeredModels[protoName] + if !found { + return nil, fmt.Errorf("message %s is not registered as model", protoName) + } + + itemModel := m.Id.Model + if itemModel.Module != model.Module || + itemModel.Version != model.Version || + itemModel.Type != model.Type { + return nil, fmt.Errorf("item model does not match the one registered (%+v)", itemModel) + } + + var any types.DynamicAny + if err := types.UnmarshalAny(m.GetData().GetAny(), &any); err != nil { + return nil, err + } + return any.Message, nil +} + +// Marshal is helper function for marshalling items. +func MarshalItem(pb proto.Message) (*api.Item, error) { + id, err := getItemID(pb) + if err != nil { + return nil, err + } + + any, err := types.MarshalAny(pb) + if err != nil { + return nil, err + } + any.TypeUrl = ligatoModels + proto.MessageName(pb) + + /*name, err := model.nameFunc(pb) + if err != nil { + return nil, err + } + path := path.Join(model.Path(), name)*/ + + item := &api.Item{ + /*Ref: &api.ItemRef{ + Path: model.modelPath, + Name: name, + },*/ + Id: id, + //Key: path, + Data: &api.Data{ + Any: any, + }, + } + return item, nil +} + +func getItemID(pb proto.Message) (*api.Item_ID, error) { + protoName := proto.MessageName(pb) + model, found := registeredModels[protoName] + if !found { + return nil, fmt.Errorf("message %s is not registered as model", protoName) + } + + name, err := model.nameFunc(pb) + if err != nil { + return nil, err + } + + return &api.Item_ID{ + Name: name, + Model: &api.Model{ + Module: model.Module, + Version: model.Version, + Type: model.Type, + }, + }, nil +} + +type model interface { + GetModule() string + GetVersion() string + GetType() string +} + +func getModelPath(m model) string { + return buildModelPath(m.GetVersion(), m.GetModule(), m.GetType()) +} + +// ModelForItem +func ModelForItem(item *api.Item) (registeredModel, error) { + if data := item.GetData(); data != nil { + return GetModel(data) + } + if id := item.GetId(); id != nil { + modelPath := getModelPath(id.Model) + protoName, found := modelPaths[modelPath] + if found { + model, _ := registeredModels[protoName] + return *model, nil + } + } + + return registeredModel{}, fmt.Errorf("no model found for item %v", item) +} + +func ItemKey(item *api.Item) (string, error) { + if data := item.GetData(); data != nil { + return GetKey(data) + } + if id := item.GetId(); id != nil { + modelPath := getModelPath(id.Model) + protoName, found := modelPaths[modelPath] + if found { + model, _ := registeredModels[protoName] + key := model.KeyPrefix() + id.Name + return key, nil + } + } + + return "", fmt.Errorf("invalid item: %v", item) +} + +// RegisteredModels returns all registered modules. +func RegisteredModels() (models []*api.ModelInfo) { + for _, s := range registeredModels { + models = append(models, &api.ModelInfo{ + Model: &api.Model{ + Module: s.Module, + Type: s.Type, + Version: s.Version, + }, + Info: map[string]string{ + "nameTemplate": s.nameTemplate, + "protoName": s.protoName, + "modelPath": s.modelPath, + "keyPrefix": s.keyPrefix, + }, + }) + } + return +} diff --git a/pkg/models/keys_test.go b/pkg/models/keys_test.go new file mode 100644 index 0000000000..ac44f30e79 --- /dev/null +++ b/pkg/models/keys_test.go @@ -0,0 +1,171 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models_test + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/vpp-agent/api/models/linux/interfaces" + "github.com/ligato/vpp-agent/pkg/models" +) + +func TestEncoding(t *testing.T) { + in := &linux_interfaces.Interface{ + Name: "testName", + Type: linux_interfaces.Interface_VETH, + } + + item, err := models.MarshalItem(in) + if err != nil { + t.Fatalf("marshal error: %v", err) + } + t.Logf("marshalled:\n%+v", proto.MarshalTextString(item)) + + out, err := models.UnmarshalItem(item) + if err != nil { + t.Fatalf("unmarshal error: %v", err) + } + t.Logf("unmarshalled:\n%+v", proto.MarshalTextString(out)) +} + +/*func TestKeys(t *testing.T) { + tests := []struct { + name string + model proto.Message + expectedKey string + }{ + { + name: "linux iface", + model: &linux_interfaces.Interface{ + Name: "testName", + Type: linux_interfaces.Interface_VETH, + }, + expectedKey: "linux/config/v2/interface/testName", + }, + { + name: "linux route", + model: &linux_l3.Route{ + DstNetwork: "1.1.1.1/24", + OutgoingInterface: "eth0", + GwAddr: "9.9.9.9", + }, + expectedKey: "linux/config/v2/route/1.1.1.0/24/eth0", + }, + { + name: "linux arp", + model: &linux_l3.ARPEntry{ + Interface: "if1", + IpAddress: "1.2.3.4", + HwAddress: "11:22:33:44:55:66", + }, + expectedKey: "linux/config/v2/arp/if1/1.2.3.4", + }, + { + name: "vpp acl", + model: &vpp_acl.Acl{ + Name: "myacl5", + }, + expectedKey: "vpp/config/v2/acl/myacl5", + }, + { + name: "vpp bd", + model: &vpp_l2.BridgeDomain{ + Name: "bd3", + }, + expectedKey: "vpp/config/v2/bd/bd3", + }, + { + name: "vpp nat global", + model: &vpp_nat.Nat44Global{ + Forwarding: true, + }, + expectedKey: "vpp/config/v2/nat44/GLOBAL", + }, + { + name: "vpp dnat", + model: &vpp_nat.DNat44{ + Label: "mynat1", + }, + expectedKey: "vpp/config/v2/nat44/dnat/mynat1", + }, + { + name: "vpp arp", + model: &vpp_l3.ARPEntry{ + Interface: "if1", + IpAddress: "1.2.3.4", + PhysAddress: "11:22:33:44:55:66", + }, + expectedKey: "vpp/config/v2/arp/if1/1.2.3.4", + }, + { + name: "vpp route", + model: &vpp_l3.Route{ + VrfId: 0, + DstNetwork: "10.10.0.10/24", + NextHopAddr: "0.0.0.0", + }, + expectedKey: "vpp/config/v2/route/vrf/0/dst/10.10.0.0/24/gw/0.0.0.0", + }, + { + name: "vpp stn", + model: &vpp_stn.Rule{ + Interface: "eth0", + IpAddress: "1.1.1.1", + }, + expectedKey: "vpp/config/v2/stn/rule/eth0/ip/1.1.1.1", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + key := models.Key(test.model) + + if key != test.expectedKey { + t.Errorf("expected key: \n%q\ngot: \n%q", test.expectedKey, key) + } else { + spec := models.Model(test.model) + t.Logf("key: %q (%v)\n", key, spec) + } + }) + } +}*/ + +/*func TestParseKeys(t *testing.T) { + tests := []struct { + name string + key string + expectedParts map[string]string + }{ + { + name: "vpp arp", + key: "vpp/config/v2/arp/if1/1.2.3.4", + expectedParts: map[string]string{ + "Interface": "if1", + "IpAddress": "1.2.3.4", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + parts := models.ParseKey(test.key) + t.Logf("parts: %q", parts) + + if len(parts) != len(test.expectedParts) { + t.Errorf("expected parts: %v, got: %v", test.expectedParts, parts) + } + }) + } +}*/ diff --git a/pkg/models/models.go b/pkg/models/models.go new file mode 100644 index 0000000000..0c6a158f3d --- /dev/null +++ b/pkg/models/models.go @@ -0,0 +1,107 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "fmt" + "path" + + "github.com/gogo/protobuf/proto" +) + +// Key is a shorthand for the GetKey for avoid error checking. +func Key(x proto.Message) string { + key, err := GetKey(x) + if err != nil { + panic(err) + } + return key +} + +// Name is a shorthand for the GetName for avoid error checking. +func Name(x proto.Message) string { + name, err := GetName(x) + if err != nil { + panic(err) + } + return name +} + +// Path is a shorthand for the GetPath for avoid error checking. +func Path(x proto.Message) string { + path, err := GetPath(x) + if err != nil { + panic(err) + } + return path +} + +// Model returns registered model for the given proto message. +func Model(x proto.Message) registeredModel { + model, err := GetModel(x) + if err != nil { + panic(err) + } + return model +} + +// GetKey returns complete key for gived model, +// including key prefix defined by model specification. +// It returns error if given model is not registered. +func GetKey(x proto.Message) (string, error) { + model, err := GetModel(x) + if err != nil { + return "", err + } + name, err := model.nameFunc(x) + if err != nil { + return "", err + } + key := path.Join(model.keyPrefix, name) + return key, nil +} + +// GetName +func GetName(x proto.Message) (string, error) { + model, err := GetModel(x) + if err != nil { + return "", err + } + name, err := model.nameFunc(x) + if err != nil { + return "", err + } + return name, nil +} + +// GetKeyPrefix returns key prefix for gived model. +// It returns error if given model is not registered. +func GetPath(x proto.Message) (string, error) { + model, err := GetModel(x) + if err != nil { + return "", err + } + return model.Path(), nil +} + +// GetModel returns registered model for the given proto message. +func GetModel(x proto.Message) (registeredModel, error) { + protoName := proto.MessageName(x) + model, found := registeredModels[protoName] + if !found { + return registeredModel{}, fmt.Errorf("no model registered for %s", protoName) + } + return *model, nil +} diff --git a/pkg/models/spec.go b/pkg/models/spec.go new file mode 100644 index 0000000000..f776beb0a5 --- /dev/null +++ b/pkg/models/spec.go @@ -0,0 +1,200 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package models + +import ( + "fmt" + "net" + "os" + "regexp" + "strings" + "text/template" + + "github.com/gogo/protobuf/proto" + api "github.com/ligato/vpp-agent/api/genericmanager" +) + +var ( + validModule = regexp.MustCompile(`^[-a-z0-9_]+(?:\.[-a-z0-9_]+)?$`) + validType = regexp.MustCompile(`^[-a-z0-9_]+$`) +) + +// Spec defines model specification used for registering model. +type Spec api.Model + +type registeredModel struct { + Spec + + protoName string + keyPrefix string + modelPath string + + modelOptions +} + +type modelOptions struct { + nameTemplate string + nameFunc NameFunc +} + +// ModelOption defines function type which sets model options. +type ModelOption func(*modelOptions) + +// WithNameTemplate returns option for models which sets function +// for generating name of instances using custom template. +func WithNameTemplate(t string) ModelOption { + return func(opts *modelOptions) { + opts.nameFunc = NameTemplate(t) + opts.nameTemplate = t + } +} + +// ProtoName returns proto message name registered with the model. +func (m registeredModel) ProtoName() string { + return m.protoName +} + +// Path returns path for the model. +func (m registeredModel) Path() string { + return m.modelPath +} + +// KeyPrefix returns key prefix for the model. +func (m registeredModel) KeyPrefix() string { + return m.keyPrefix +} + +// ParseKey parses the given key and returns item name +// or returns empty name and valid as false if the key is not valid. +func (m registeredModel) ParseKey(key string) (name string, valid bool) { + name = strings.TrimPrefix(key, m.keyPrefix) + if name == key || name == "" { + name = strings.TrimPrefix(key, m.modelPath) + } + if name != key && name != "" { + // TODO: validate name? + return name, true + } + return "", false +} + +// IsKeyValid returns true if given key is valid for this model. +func (m registeredModel) IsKeyValid(key string) bool { + _, valid := m.ParseKey(key) + return valid +} + +// StripKeyPrefix returns key with prefix stripped. +func (m registeredModel) StripKeyPrefix(key string) string { + if name, valid := m.ParseKey(key); valid { + return name + } + return key +} + +var ( + registeredModels = make(map[string]*registeredModel) + modelPaths = make(map[string]string) + + debugRegister = strings.Contains(os.Getenv("DEBUG_MODELS"), "register") +) + +// Register registers the protobuf message with given model specification. +func Register(pb proto.Message, spec Spec, opts ...ModelOption) *registeredModel { + model := ®isteredModel{ + Spec: spec, + protoName: proto.MessageName(pb), + } + + // Check duplicate registration + if _, ok := registeredModels[model.protoName]; ok { + panic(fmt.Sprintf("proto message %q already registered", model.protoName)) + } + + // Validate model spec + if !validModule.MatchString(spec.Module) { + panic(fmt.Sprintf("module for model %s is invalid", model.protoName)) + } + if !validType.MatchString(spec.Type) { + panic(fmt.Sprintf("model type for %s is invalid", model.protoName)) + } + if !strings.HasPrefix(spec.Version, "v") { + panic(fmt.Sprintf("model version for %s is invalid", model.protoName)) + } + + // Generate keys & paths + model.modelPath = buildModelPath(spec.Version, spec.Module, spec.Type) + if pn, ok := modelPaths[model.modelPath]; ok { + panic(fmt.Sprintf("path prefix %q already used by: %s", model.modelPath, pn)) + } + modulePath := strings.Replace(spec.Module, ".", "/", -1) + model.keyPrefix = fmt.Sprintf("config/%s/%s/%s/", modulePath, spec.Version, spec.Type) + + // Use GetName as fallback for generating name + if _, ok := pb.(named); ok { + model.nameFunc = func(obj interface{}) (s string, e error) { + return obj.(named).GetName(), nil + } + } + + // Apply custom options + for _, opt := range opts { + opt(&model.modelOptions) + } + + registeredModels[model.protoName] = model + modelPaths[model.modelPath] = model.protoName + + if debugRegister { + fmt.Printf("- registered model: %+v\t%q\n", model, model.modelPath) + } + + return model +} + +func buildModelPath(version, module, typ string) string { + return fmt.Sprintf("%s.%s.%s", module, version, typ) +} + +type named interface { + GetName() string +} + +// NameFunc represents function which can name model instance. +type NameFunc func(obj interface{}) (string, error) + +func NameTemplate(t string) NameFunc { + tmpl := template.Must( + template.New("name").Funcs(funcMap).Option("missingkey=error").Parse(t), + ) + return func(obj interface{}) (string, error) { + var s strings.Builder + if err := tmpl.Execute(&s, obj); err != nil { + return "", err + } + return s.String(), nil + } +} + +var funcMap = template.FuncMap{ + "ipnet": func(s string) map[string]interface{} { + _, ipNet, _ := net.ParseCIDR(s) + maskSize, _ := ipNet.Mask.Size() + return map[string]interface{}{ + "IP": ipNet.IP.String(), + "MaskSize": maskSize, + } + }, +} diff --git a/pkg/util/proto.go b/pkg/util/proto.go new file mode 100644 index 0000000000..12368b5217 --- /dev/null +++ b/pkg/util/proto.go @@ -0,0 +1,76 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "reflect" + + "github.com/gogo/protobuf/proto" +) + +func ExtractProtos(from ...interface{}) (protos []proto.Message) { + for _, v := range from { + if reflect.ValueOf(v).IsNil() { + continue + } + val := reflect.ValueOf(v).Elem() + typ := val.Type() + if typ.Kind() != reflect.Struct { + return + } + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + if field.Kind() == reflect.Slice { + for idx := 0; idx < field.Len(); idx++ { + elem := field.Index(idx) + if msg, ok := elem.Interface().(proto.Message); ok { + protos = append(protos, msg) + } + } + } else if field.Kind() == reflect.Ptr && !field.IsNil() { + if msg, ok := field.Interface().(proto.Message); ok && !field.IsNil() { + protos = append(protos, msg) + } + } + } + } + return +} + +func PlaceProtos(protos map[string]proto.Message, dsts ...interface{}) { + for _, prot := range protos { + protTyp := reflect.TypeOf(prot) + for _, dst := range dsts { + dstVal := reflect.ValueOf(dst).Elem() + dstTyp := dstVal.Type() + if dstTyp.Kind() != reflect.Struct { + return + } + for i := 0; i < dstTyp.NumField(); i++ { + field := dstVal.Field(i) + if field.Kind() == reflect.Slice { + if protTyp.AssignableTo(field.Type().Elem()) { + field.Set(reflect.Append(field, reflect.ValueOf(prot))) + } + } else { + if field.Type() == protTyp { + field.Set(reflect.ValueOf(prot)) + } + } + } + } + } + return +} diff --git a/plugins/configurator/configurator.go b/plugins/configurator/configurator.go new file mode 100644 index 0000000000..62a311c67c --- /dev/null +++ b/plugins/configurator/configurator.go @@ -0,0 +1,97 @@ +package configurator + +import ( + "github.com/gogo/status" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/vpp-agent/api/models/linux" + "github.com/ligato/vpp-agent/api/models/vpp" + "github.com/ligato/vpp-agent/pkg/util" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + + rpc "github.com/ligato/vpp-agent/api/configurator" + "github.com/ligato/vpp-agent/pkg/models" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/orchestrator" +) + +// configuratorServer implements DataSyncer service. +type configuratorServer struct { + dumpService + notifyService + + log logging.Logger + dispatch orchestrator.Dispatcher +} + +// Get retrieves actual configuration data. +func (svc *configuratorServer) Get(context.Context, *rpc.GetRequest) (*rpc.GetResponse, error) { + config := newConfig() + + util.PlaceProtos(svc.dispatch.ListData(), config.LinuxConfig, config.VppConfig) + + return &rpc.GetResponse{Config: config}, nil +} + +// Update adds configuration data present in data request to the VPP/Linux +func (svc *configuratorServer) Update(ctx context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) { + protos := util.ExtractProtos(req.Update.VppConfig, req.Update.LinuxConfig) + + var kvPairs []orchestrator.KeyVal + for _, p := range protos { + key, err := models.GetKey(p) + if err != nil { + svc.log.Debug("models.GetKey failed: %s", err) + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + kvPairs = append(kvPairs, orchestrator.KeyVal{ + Key: key, + Val: p, + }) + } + + if req.FullResync { + ctx = kvs.WithResync(ctx, kvs.FullResync, true) + } + + ctx = orchestrator.DataSrcContext(ctx, "grpc") + if _, err := svc.dispatch.PushData(ctx, kvPairs); err != nil { + st := status.New(codes.FailedPrecondition, err.Error()) + return nil, st.Err() + } + + return &rpc.UpdateResponse{}, nil +} + +// Delete removes configuration data present in data request from the VPP/linux +func (svc *configuratorServer) Delete(ctx context.Context, req *rpc.DeleteRequest) (*rpc.DeleteResponse, error) { + protos := util.ExtractProtos(req.Delete.VppConfig, req.Delete.LinuxConfig) + + var kvPairs []orchestrator.KeyVal + for _, p := range protos { + key, err := models.GetKey(p) + if err != nil { + svc.log.Debug("models.GetKey failed: %s", err) + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + kvPairs = append(kvPairs, orchestrator.KeyVal{ + Key: key, + Val: nil, + }) + } + + ctx = orchestrator.DataSrcContext(ctx, "grpc") + if _, err := svc.dispatch.PushData(ctx, kvPairs); err != nil { + st := status.New(codes.FailedPrecondition, err.Error()) + return nil, st.Err() + } + + return &rpc.DeleteResponse{}, nil +} + +func newConfig() *rpc.Config { + return &rpc.Config{ + LinuxConfig: &linux.ConfigData{}, + VppConfig: &vpp.ConfigData{}, + } +} diff --git a/plugins/configurator/dump.go b/plugins/configurator/dump.go new file mode 100644 index 0000000000..e40df9ec04 --- /dev/null +++ b/plugins/configurator/dump.go @@ -0,0 +1,279 @@ +package configurator + +import ( + "github.com/ligato/cn-infra/logging" + "golang.org/x/net/context" + + rpc "github.com/ligato/vpp-agent/api/configurator" + "github.com/ligato/vpp-agent/api/models/vpp/acl" + "github.com/ligato/vpp-agent/api/models/vpp/interfaces" + "github.com/ligato/vpp-agent/api/models/vpp/ipsec" + "github.com/ligato/vpp-agent/api/models/vpp/l2" + "github.com/ligato/vpp-agent/api/models/vpp/l3" + "github.com/ligato/vpp-agent/api/models/vpp/punt" + iflinuxcalls "github.com/ligato/vpp-agent/plugins/linux/ifplugin/linuxcalls" + l3linuxcalls "github.com/ligato/vpp-agent/plugins/linux/l3plugin/linuxcalls" + aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" + ipsecvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/vppcalls" + l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" + l3vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" + natvppcalls "github.com/ligato/vpp-agent/plugins/vpp/natplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/puntplugin/vppcalls" +) + +type dumpService struct { + log logging.Logger + + // VPP Handlers + aclHandler aclvppcalls.ACLVppRead + ifHandler ifvppcalls.IfVppRead + natHandler natvppcalls.NatVppRead + bdHandler l2vppcalls.BridgeDomainVppRead + fibHandler l2vppcalls.FIBVppRead + xcHandler l2vppcalls.XConnectVppRead + arpHandler l3vppcalls.ArpVppRead + pArpHandler l3vppcalls.ProxyArpVppRead + rtHandler l3vppcalls.RouteVppRead + ipsecHandler ipsecvppcalls.IPSecVPPRead + puntHandler vppcalls.PuntVPPRead + // Linux handlers + linuxIfHandler iflinuxcalls.NetlinkAPIRead + linuxL3Handler l3linuxcalls.NetlinkAPIRead +} + +func (svc *dumpService) Dump(context.Context, *rpc.DumpRequest) (*rpc.DumpResponse, error) { + dump := newConfig() + + dump.VppConfig.Interfaces, _ = svc.DumpInterfaces() + dump.VppConfig.Acls, _ = svc.DumpAcls() + dump.VppConfig.IpsecSpds, _ = svc.DumpIPSecSPDs() + dump.VppConfig.IpsecSas, _ = svc.DumpIPSecSAs() + dump.VppConfig.BridgeDomains, _ = svc.DumpBDs() + dump.VppConfig.Routes, _ = svc.DumpRoutes() + dump.VppConfig.Arps, _ = svc.DumpARPs() + dump.VppConfig.Fibs, _ = svc.DumpFIBs() + dump.VppConfig.XconnectPairs, _ = svc.DumpXConnects() + dump.VppConfig.PuntTohosts, _ = svc.DumpPunt() + + // FIXME: linux interface handler should return known proto instead of netlink + // state.LinuxData.Interfaces, _ = svc.DumpLinuxInterfaces() + + return &rpc.DumpResponse{Dump: dump}, nil +} + +// DumpAcls reads IP/MACIP access lists and returns them as an *AclResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpAcls() ([]*vpp_acl.ACL, error) { + var acls []*vpp_acl.ACL + ipACLs, err := svc.aclHandler.DumpACL() + if err != nil { + return nil, err + } + macIPACLs, err := svc.aclHandler.DumpMACIPACL() + if err != nil { + return nil, err + } + for _, aclDetails := range ipACLs { + acls = append(acls, aclDetails.ACL) + } + for _, aclDetails := range macIPACLs { + acls = append(acls, aclDetails.ACL) + } + + return acls, nil +} + +// DumpInterfaces reads interfaces and returns them as an *InterfaceResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpInterfaces() ([]*vpp_interfaces.Interface, error) { + var ifs []*vpp_interfaces.Interface + ifDetails, err := svc.ifHandler.DumpInterfaces() + if err != nil { + return nil, err + } + for _, iface := range ifDetails { + ifs = append(ifs, iface.Interface) + } + + return ifs, nil +} + +// DumpIPSecSPDs reads IPSec SPD and returns them as an *IPSecSPDResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpIPSecSPDs() ([]*vpp_ipsec.SecurityPolicyDatabase, error) { + var spds []*vpp_ipsec.SecurityPolicyDatabase + spdDetails, err := svc.ipsecHandler.DumpIPSecSPD() + if err != nil { + return nil, err + } + for _, spd := range spdDetails { + spds = append(spds, spd.Spd) + } + + return spds, nil +} + +// DumpIPSecSAs reads IPSec SA and returns them as an *IPSecSAResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpIPSecSAs() ([]*vpp_ipsec.SecurityAssociation, error) { + var sas []*vpp_ipsec.SecurityAssociation + saDetails, err := svc.ipsecHandler.DumpIPSecSA() + if err != nil { + return nil, err + } + for _, sa := range saDetails { + sas = append(sas, sa.Sa) + } + + return sas, nil +} + +// DumpIPSecTunnels reads IPSec tunnels and returns them as an *IPSecTunnelResponse. If reading ends up with error, +// only error is send back in response +/*func (svc *dumpService) DumpIPSecTunnels() (*rpc.IPSecTunnelResponse, error) { + var tuns []*vpp_ipsec. + tunDetails, err := svc.ipSecHandler.DumpIPSecTunnelInterfaces() + if err != nil { + return nil, err + } + for _, tun := range tunDetails { + tuns = append(tuns, tun.Tunnel) + } + + return &rpc.IPSecTunnelResponse{Tunnels: tuns}, nil +}*/ + +// DumpBDs reads bridge domains and returns them as an *BDResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpBDs() ([]*vpp_l2.BridgeDomain, error) { + var bds []*vpp_l2.BridgeDomain + bdDetails, err := svc.bdHandler.DumpBridgeDomains() + if err != nil { + return nil, err + } + for _, bd := range bdDetails { + bds = append(bds, bd.Bd) + } + + return bds, nil +} + +// DumpFIBs reads FIBs and returns them as an *FibResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpFIBs() ([]*vpp_l2.FIBEntry, error) { + var fibs []*vpp_l2.FIBEntry + fibDetails, err := svc.fibHandler.DumpL2FIBs() + if err != nil { + return nil, err + } + for _, fib := range fibDetails { + fibs = append(fibs, fib.Fib) + } + + return fibs, nil +} + +// DumpXConnects reads cross connects and returns them as an *XcResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpXConnects() ([]*vpp_l2.XConnectPair, error) { + var xcs []*vpp_l2.XConnectPair + xcDetails, err := svc.xcHandler.DumpXConnectPairs() + if err != nil { + return nil, err + } + for _, xc := range xcDetails { + xcs = append(xcs, xc.Xc) + } + + return xcs, nil +} + +// DumpRoutes reads VPP routes and returns them as an *RoutesResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpRoutes() ([]*vpp_l3.Route, error) { + var routes []*vpp_l3.Route + rtDetails, err := svc.rtHandler.DumpRoutes() + if err != nil { + return nil, err + } + for _, rt := range rtDetails { + routes = append(routes, rt.Route) + } + + return routes, nil +} + +// DumpARPs reads VPP ARPs and returns them as an *ARPsResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpARPs() ([]*vpp_l3.ARPEntry, error) { + var arps []*vpp_l3.ARPEntry + arpDetails, err := svc.arpHandler.DumpArpEntries() + if err != nil { + return nil, err + } + for _, arp := range arpDetails { + arps = append(arps, arp.Arp) + } + + return arps, nil +} + +// DumpPunt reads VPP Punt socket registrations and returns them as an *PuntResponse. +func (svc *dumpService) DumpPunt() (punts []*vpp_punt.ToHost, err error) { + dump, err := svc.puntHandler.DumpPuntRegisteredSockets() + if err != nil { + return nil, err + } + for _, puntDetails := range dump { + punts = append(punts, puntDetails.PuntData) + } + + return punts, nil +} + +// DumpLinuxInterfaces reads linux interfaces and returns them as an *LinuxInterfaceResponse. If reading ends up with error, +// only error is send back in response +/*func (svc *dumpService) DumpLinuxInterfaces() ([]*linux_interfaces.Interface, error) { + var linuxIfs []*linux_interfaces.Interface + ifDetails, err := svc.linuxIfHandler.GetLinkList() + if err != nil { + return nil, err + } + for _, iface := range ifDetails { + linuxIfs = append(linuxIfs, ) + } + + return linuxIfs, nil +} + +// DumpLinuxARPs reads linux ARPs and returns them as an *LinuxARPsResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpLinuxARPs(ctx context.Context, request *rpc.DumpRequest) (*rpc.LinuxARPsResponse, error) { + var linuxArps []*linuxL3.LinuxStaticArpEntries_ArpEntry + arpDetails, err := svc.linuxL3Handler.DumpArpEntries() + if err != nil { + return nil, err + } + for _, arp := range arpDetails { + linuxArps = append(linuxArps, arp.Arp) + } + + return &rpc.LinuxARPsResponse{LinuxArpEntries: linuxArps}, nil +} + +// DumpLinuxRoutes reads linux routes and returns them as an *LinuxRoutesResponse. If reading ends up with error, +// only error is send back in response +func (svc *dumpService) DumpLinuxRoutes(ctx context.Context, request *rpc.DumpRequest) (*rpc.LinuxRoutesResponse, error) { + var linuxRoutes []*linuxL3.LinuxStaticRoutes_Route + rtDetails, err := svc.linuxL3Handler.DumpRoutes() + if err != nil { + return nil, err + } + for _, rt := range rtDetails { + linuxRoutes = append(linuxRoutes, rt.Route) + } + + return &rpc.LinuxRoutesResponse{LinuxRoutes: linuxRoutes}, nil +} +*/ diff --git a/plugins/configurator/notify.go b/plugins/configurator/notify.go new file mode 100644 index 0000000000..d24fbd974f --- /dev/null +++ b/plugins/configurator/notify.go @@ -0,0 +1,79 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configurator + +import ( + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/logging" + rpc "github.com/ligato/vpp-agent/api/configurator" +) + +// Maximum number of messages stored in the buffer. Buffer is always filled from left +// to right (it means that if the buffer is full, a new entry is written to the index 0) +const bufferSize = 1000 + +// notifyService forwards GRPC messages to external servers. +type notifyService struct { + log logging.Logger + + // VPP notifications available for clients + mx sync.RWMutex + buffer [bufferSize]rpc.NotificationResponse + curIdx uint32 +} + +// Notify returns all required VPP notifications (or those available in the buffer) in the same order as they were received +func (svc *notifyService) Notify(from *rpc.NotificationRequest, server rpc.Configurator_NotifyServer) error { + svc.mx.RLock() + defer svc.mx.RUnlock() + + // Copy requested index locally + fromIdx := from.Idx + + // Check if requested index overflows buffer length + if svc.curIdx-from.Idx > bufferSize { + fromIdx = svc.curIdx - bufferSize + } + + // Start from requested index until the most recent entry + for i := fromIdx; i < svc.curIdx; i++ { + entry := svc.buffer[i%bufferSize] + if err := server.Send(&entry); err != nil { + svc.log.Error("Send notification error: %v", err) + return err + } + } + + return nil +} + +// Pushes new notification to the buffer. The order of notifications is preserved. +func (svc *notifyService) pushNotification(notification *rpc.Notification) { + // notification is cloned to ensure it does not get changed after storing + notifCopy := proto.Clone(notification).(*rpc.Notification) + + svc.mx.Lock() + defer svc.mx.Unlock() + + // Notification index starts with 1 + notif := rpc.NotificationResponse{ + NextIdx: svc.curIdx + 1, + Notification: notifCopy, + } + svc.buffer[svc.curIdx%bufferSize] = notif + svc.curIdx++ +} diff --git a/plugins/configurator/options.go b/plugins/configurator/options.go new file mode 100644 index 0000000000..4b2ed3d451 --- /dev/null +++ b/plugins/configurator/options.go @@ -0,0 +1,56 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configurator + +import ( + "github.com/ligato/cn-infra/rpc/grpc" + "github.com/ligato/vpp-agent/plugins/govppmux" + "github.com/ligato/vpp-agent/plugins/orchestrator" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin" +) + +// DefaultPlugin is default instance of Plugin +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provides Options +func NewPlugin(opts ...Option) *Plugin { + p := &Plugin{} + + p.PluginName = "configurator" + p.GRPCServer = &grpc.DefaultPlugin + p.Dispatch = &orchestrator.DefaultPlugin + p.GoVppmux = &govppmux.DefaultPlugin + p.VPPIfPlugin = &ifplugin.DefaultPlugin + p.VPPL2Plugin = &l2plugin.DefaultPlugin + + for _, o := range opts { + o(p) + } + + p.PluginDeps.Setup() + + return p +} + +// Option is a function that acts on a Plugin to inject Dependencies or configuration +type Option func(*Plugin) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Plugin) { + cb(&p.Deps) + } +} diff --git a/plugins/configurator/plugin.go b/plugins/configurator/plugin.go new file mode 100644 index 0000000000..0e4384595c --- /dev/null +++ b/plugins/configurator/plugin.go @@ -0,0 +1,127 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configurator + +import ( + "git.fd.io/govpp.git/api" + "github.com/ligato/vpp-agent/api/models/vpp" + "github.com/ligato/vpp-agent/plugins/orchestrator" + ipsecvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ipsecplugin/vppcalls" + puntvppcalls "github.com/ligato/vpp-agent/plugins/vpp/puntplugin/vppcalls" + + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/rpc/grpc" + + rpc "github.com/ligato/vpp-agent/api/configurator" + "github.com/ligato/vpp-agent/plugins/govppmux" + iflinuxcalls "github.com/ligato/vpp-agent/plugins/linux/ifplugin/linuxcalls" + l3linuxcalls "github.com/ligato/vpp-agent/plugins/linux/l3plugin/linuxcalls" + aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/ifplugin" + ifvppcalls "github.com/ligato/vpp-agent/plugins/vpp/ifplugin/vppcalls" + "github.com/ligato/vpp-agent/plugins/vpp/l2plugin" + l2vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l2plugin/vppcalls" + l3vppcalls "github.com/ligato/vpp-agent/plugins/vpp/l3plugin/vppcalls" + natvppcalls "github.com/ligato/vpp-agent/plugins/vpp/natplugin/vppcalls" +) + +// Plugin registers VPP GRPC services in *grpc.Server. +type Plugin struct { + Deps + + configurator configuratorServer + + // Channels + vppChan api.Channel + dumpChan api.Channel +} + +// Deps - dependencies of Plugin +type Deps struct { + infra.PluginDeps + GRPCServer grpc.Server + Dispatch orchestrator.Dispatcher + GoVppmux govppmux.TraceAPI + VPPIfPlugin ifplugin.API + VPPL2Plugin *l2plugin.L2Plugin +} + +// Init sets plugin child loggers +func (p *Plugin) Init() error { + p.configurator.log = p.Log.NewLogger("configurator") + p.configurator.notifyService.log = p.Log.NewLogger("configurator-notify") + p.configurator.dispatch = p.Dispatch + + if err := p.initHandlers(); err != nil { + return err + } + + grpcServer := p.GRPCServer.GetServer() + if grpcServer != nil { + rpc.RegisterConfiguratorServer(grpcServer, &p.configurator) + } + + if p.VPPIfPlugin != nil { + p.VPPIfPlugin.SetNotifyService(func(vppNotification *vpp.Notification) { + p.configurator.notifyService.pushNotification(&rpc.Notification{ + Notification: &rpc.Notification_VppNotification{ + VppNotification: vppNotification, + }, + }) + }) + } + + return nil +} + +// Close does nothing. +func (p *Plugin) Close() error { + return nil +} + +// helper method initializes all VPP/Linux plugin handlers +func (p *Plugin) initHandlers() (err error) { + // VPP channels + if p.vppChan, err = p.GoVppmux.NewAPIChannel(); err != nil { + return err + } + if p.dumpChan, err = p.GoVppmux.NewAPIChannel(); err != nil { + return err + } + + // VPP Indexes + ifIndexes := p.VPPIfPlugin.GetInterfaceIndex() + bdIndexes := p.VPPL2Plugin.GetBDIndex() + dhcpIndexes := p.VPPIfPlugin.GetDHCPIndex() + + // Initialize VPP handlers + p.configurator.aclHandler = aclvppcalls.NewACLVppHandler(p.vppChan, p.dumpChan, ifIndexes) + p.configurator.ifHandler = ifvppcalls.NewIfVppHandler(p.vppChan, p.Log) + p.configurator.natHandler = natvppcalls.NewNatVppHandler(p.vppChan, ifIndexes, dhcpIndexes, p.Log) + p.configurator.bdHandler = l2vppcalls.NewBridgeDomainVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.fibHandler = l2vppcalls.NewFIBVppHandler(p.vppChan, ifIndexes, bdIndexes, p.Log) + p.configurator.xcHandler = l2vppcalls.NewXConnectVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.arpHandler = l3vppcalls.NewArpVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.pArpHandler = l3vppcalls.NewProxyArpVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.rtHandler = l3vppcalls.NewRouteVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.ipsecHandler = ipsecvppcalls.NewIPsecVppHandler(p.vppChan, ifIndexes, p.Log) + p.configurator.puntHandler = puntvppcalls.NewPuntVppHandler(p.vppChan, ifIndexes, p.Log) + + // Linux indexes and handlers + p.configurator.linuxIfHandler = iflinuxcalls.NewNetLinkHandler() + p.configurator.linuxL3Handler = l3linuxcalls.NewNetLinkHandler() + + return nil +} diff --git a/plugins/govppmux/adapter_stubs.go b/plugins/govppmux/adapter_stubs.go index 649621dfe9..62499cd7e1 100644 --- a/plugins/govppmux/adapter_stubs.go +++ b/plugins/govppmux/adapter_stubs.go @@ -29,4 +29,4 @@ func NewVppAdapter(shmPrefix string) adapter.VppAPI { // NewStatsAdapter returns stats vpp api adapter, used for reading statistics with vppapiclient library. func NewStatsAdapter(socketName string) adapter.StatsAPI { return govppmock.NewStatsAdapter() -} \ No newline at end of file +} diff --git a/plugins/govppmux/mock/mock_govppmux.go b/plugins/govppmux/mock/mock_govppmux.go index 9ccc050037..2aab9ca180 100644 --- a/plugins/govppmux/mock/mock_govppmux.go +++ b/plugins/govppmux/mock/mock_govppmux.go @@ -1,7 +1,22 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package mock import ( "fmt" + "git.fd.io/govpp.git/adapter" "git.fd.io/govpp.git/adapter/mock" govppapi "git.fd.io/govpp.git/api" @@ -12,7 +27,7 @@ import ( // GoVPPMux implements GoVPP Mux API with stats type GoVPPMux struct { connection *core.Connection - stats *mock.StatsAdapter + stats *mock.StatsAdapter } // NewMockGoVPPMux prepares new mock of GoVPP multiplexor with given context @@ -24,7 +39,7 @@ func NewMockGoVPPMux(ctx *vppcallmock.TestCtx) (*GoVPPMux, error) { return &GoVPPMux{ connection: connection, - stats: ctx.MockStats, + stats: ctx.MockStats, }, nil } @@ -45,7 +60,7 @@ func (p *GoVPPMux) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize int) ( } // ListStats lists stats from mocked stats API -func (p *GoVPPMux) ListStats(patterns... string) ([]string, error) { +func (p *GoVPPMux) ListStats(patterns ...string) ([]string, error) { if p.stats == nil { return nil, fmt.Errorf("failed to list VPP stats, nil stats adapter") } @@ -53,7 +68,7 @@ func (p *GoVPPMux) ListStats(patterns... string) ([]string, error) { } // DumpStats dumps stats from mocked stats API -func (p *GoVPPMux) DumpStats(patterns... string) ([]*adapter.StatEntry, error) { +func (p *GoVPPMux) DumpStats(patterns ...string) ([]*adapter.StatEntry, error) { if p.stats == nil { return nil, fmt.Errorf("failed to dump VPP stats, nil stats adapter") } diff --git a/plugins/govppmux/plugin_api_govppmux.go b/plugins/govppmux/plugin_api_govppmux.go index 6e2282e369..e65bd44721 100644 --- a/plugins/govppmux/plugin_api_govppmux.go +++ b/plugins/govppmux/plugin_api_govppmux.go @@ -34,7 +34,7 @@ type StatsAPI interface { // ListStats returns all stats names present on the VPP. Patterns can be used as a prefix // to filter the output - ListStats(patterns... string) ([]string, error) + ListStats(patterns ...string) ([]string, error) // ListStats returns all stats names, types and values from the VPP. Patterns can be used as a prefix // to filter the output. Stats are divided between workers. Example: @@ -48,7 +48,7 @@ type StatsAPI interface { // 20 for sw_if_index 1 // 40 for sw_if_index 2 (sum of stats from all workers) // - DumpStats(patterns... string) ([]*adapter.StatEntry, error) + DumpStats(patterns ...string) ([]*adapter.StatEntry, error) } // API for other plugins to get connectivity to VPP. @@ -69,5 +69,3 @@ type API interface { // ch.SendRequest(req).ReceiveReply NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize int) (govppapi.Channel, error) } - - diff --git a/plugins/govppmux/plugin_impl_govppmux.go b/plugins/govppmux/plugin_impl_govppmux.go index dbcdf95b69..065b994204 100644 --- a/plugins/govppmux/plugin_impl_govppmux.go +++ b/plugins/govppmux/plugin_impl_govppmux.go @@ -16,14 +16,9 @@ package govppmux import ( "context" - "reflect" "sync" "time" - "github.com/go-errors/errors" - "github.com/ligato/cn-infra/logging/measure" - "github.com/ligato/cn-infra/logging/measure/model/apitrace" - "git.fd.io/govpp.git/adapter" govppapi "git.fd.io/govpp.git/api" govpp "git.fd.io/govpp.git/core" @@ -32,10 +27,14 @@ import ( "github.com/ligato/cn-infra/infra" "github.com/ligato/cn-infra/logging" "github.com/ligato/cn-infra/logging/logrus" + "github.com/ligato/cn-infra/logging/measure" + "github.com/ligato/cn-infra/logging/measure/model/apitrace" + "github.com/pkg/errors" + "github.com/ligato/vpp-agent/plugins/govppmux/vppcalls" - aclvppcalls "github.com/ligato/vpp-agent/plugins/vpp/aclplugin/vppcalls" ) +// Default path to socket for VPP stats const defaultStatsSocket = "/run/vpp/stats.sock" // Plugin implements the govppmux plugin interface. @@ -72,6 +71,7 @@ type Deps struct { // Config groups the configurable parameter of GoVpp. type Config struct { TraceEnabled bool `json:"trace-enabled"` + ReconnectResync bool `json:"resync-after-reconnect"` HealthCheckProbeInterval time.Duration `json:"health-check-probe-interval"` HealthCheckReplyTimeout time.Duration `json:"health-check-reply-timeout"` HealthCheckThreshold int `json:"health-check-threshold"` @@ -80,7 +80,6 @@ type Config struct { // shared memory segments are created directly in the SHM directory /dev/shm. ShmPrefix string `json:"shm-prefix"` StatsSocketName string `json:"stats-socket-name"` - ReconnectResync bool `json:"resync-after-reconnect"` // How many times can be request resent in case vpp is suddenly disconnected. RetryRequestCount int `json:"retry-request-count"` // Time between request resend attempts. Default is 500ms. @@ -90,95 +89,115 @@ type Config struct { func defaultConfig() *Config { return &Config{ HealthCheckProbeInterval: time.Second, - HealthCheckReplyTimeout: 100 * time.Millisecond, + HealthCheckReplyTimeout: 250 * time.Millisecond, HealthCheckThreshold: 1, ReplyTimeout: time.Second, RetryRequestTimeout: 500 * time.Millisecond, } } +func (p *Plugin) loadConfig() (*Config, error) { + cfg := defaultConfig() + + found, err := p.Cfg.LoadValue(cfg) + if err != nil { + return nil, err + } else if found { + p.Log.Debugf("config loaded from file %q", p.Cfg.GetConfigName()) + } else { + p.Log.Debugf("config file %q not found, using default config", p.Cfg.GetConfigName()) + } + + return cfg, nil +} + // Init is the entry point called by Agent Core. A single binary-API connection to VPP is established. -func (plugin *Plugin) Init() error { +func (p *Plugin) Init() error { var err error - govppLogger := plugin.Deps.Log.NewLogger("GoVpp") + govppLogger := p.Deps.Log.NewLogger("govpp") if govppLogger, ok := govppLogger.(*logrus.Logger); ok { govppLogger.SetLevel(logging.InfoLevel) govpp.SetLogger(govppLogger.StandardLogger()) } - plugin.PluginName = plugin.Deps.PluginName - - plugin.config = defaultConfig() - found, err := plugin.Cfg.LoadValue(plugin.config) - if err != nil { + if p.config, err = p.loadConfig(); err != nil { return err } - if found { - govpp.HealthCheckProbeInterval = plugin.config.HealthCheckProbeInterval - govpp.HealthCheckReplyTimeout = plugin.config.HealthCheckReplyTimeout - govpp.HealthCheckThreshold = plugin.config.HealthCheckThreshold - if plugin.config.TraceEnabled { - plugin.tracer = measure.NewTracer("govpp-mux") - plugin.Log.Info("VPP API trace enabled") - } + + p.Log.Debugf("config: %+v", p.config) + govpp.HealthCheckProbeInterval = p.config.HealthCheckProbeInterval + govpp.HealthCheckReplyTimeout = p.config.HealthCheckReplyTimeout + govpp.HealthCheckThreshold = p.config.HealthCheckThreshold + govpp.DefaultReplyTimeout = p.config.ReplyTimeout + if p.config.TraceEnabled { + p.tracer = measure.NewTracer("govpp-mux") + p.Log.Info("VPP API trace enabled") } - if plugin.vppAdapter == nil { - plugin.vppAdapter = NewVppAdapter(plugin.config.ShmPrefix) + if p.vppAdapter == nil { + p.vppAdapter = NewVppAdapter(p.config.ShmPrefix) } else { - plugin.Log.Info("Reusing existing vppAdapter") //this is used for testing purposes + // this is used for testing purposes + p.Log.Info("Reusing existing vppAdapter") } startTime := time.Now() - plugin.vppConn, plugin.vppConChan, err = govpp.AsyncConnect(plugin.vppAdapter) + p.vppConn, p.vppConChan, err = govpp.AsyncConnect(p.vppAdapter) if err != nil { return err } // TODO: Async connect & automatic reconnect support is not yet implemented in the agent, // so synchronously wait until connected to VPP. - status := <-plugin.vppConChan + status := <-p.vppConChan if status.State != govpp.Connected { return errors.New("unable to connect to VPP") } vppConnectTime := time.Since(startTime) - plugin.Log.Info("Connecting to VPP took ", vppConnectTime) - plugin.retrieveVersion() + info, err := p.retrieveVpeInfo() + if err != nil { + p.Log.Errorf("retrieving vpe info failed: %v", err) + return err + } + p.Log.Infof("Connected to VPP [PID:%d] (took %s)", + info.PID, vppConnectTime.Truncate(time.Millisecond)) + p.retrieveVersion() // Register providing status reports (push mode) - plugin.StatusCheck.Register(plugin.PluginName, nil) - plugin.StatusCheck.ReportStateChange(plugin.PluginName, statuscheck.OK, nil) + p.StatusCheck.Register(p.PluginName, nil) + p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil) var ctx context.Context - ctx, plugin.cancel = context.WithCancel(context.Background()) - go plugin.handleVPPConnectionEvents(ctx) + ctx, p.cancel = context.WithCancel(context.Background()) + go p.handleVPPConnectionEvents(ctx) // Connect to VPP status socket - if plugin.config.StatsSocketName != "" { - plugin.statsAdapter = NewStatsAdapter(plugin.config.StatsSocketName) + if p.config.StatsSocketName != "" { + p.statsAdapter = NewStatsAdapter(p.config.StatsSocketName) } else { - plugin.statsAdapter = NewStatsAdapter(defaultStatsSocket) + p.statsAdapter = NewStatsAdapter(defaultStatsSocket) } - if err := plugin.statsAdapter.Connect(); err != nil { - plugin.Log.Errorf("Unable to connect to VPP statistics socket, %v", err) + if err := p.statsAdapter.Connect(); err != nil { + p.Log.Warnf("Unable to connect to VPP statistics socket, %v", err) + p.statsAdapter = nil } return nil } // Close cleans up the resources allocated by the govppmux plugin. -func (plugin *Plugin) Close() error { - plugin.cancel() - plugin.wg.Wait() +func (p *Plugin) Close() error { + p.cancel() + p.wg.Wait() defer func() { - if plugin.vppConn != nil { - plugin.vppConn.Disconnect() + if p.vppConn != nil { + p.vppConn.Disconnect() } - if plugin.statsAdapter != nil { - if err := plugin.statsAdapter.Disconnect(); err != nil { - plugin.Log.Errorf("VPP statistics socket adapter disconnect error: %v", err) + if p.statsAdapter != nil { + if err := p.statsAdapter.Disconnect(); err != nil { + p.Log.Errorf("VPP statistics socket adapter disconnect error: %v", err) } } }() @@ -192,19 +211,16 @@ func (plugin *Plugin) Close() error { // Example of binary API call from some plugin using GOVPP: // ch, _ := govpp_mux.NewAPIChannel() // ch.SendRequest(req).ReceiveReply -func (plugin *Plugin) NewAPIChannel() (govppapi.Channel, error) { - ch, err := plugin.vppConn.NewAPIChannel() +func (p *Plugin) NewAPIChannel() (govppapi.Channel, error) { + ch, err := p.vppConn.NewAPIChannel() if err != nil { return nil, err } - if plugin.config.ReplyTimeout > 0 { - ch.SetReplyTimeout(plugin.config.ReplyTimeout) - } retryCfg := retryConfig{ - plugin.config.RetryRequestCount, - plugin.config.RetryRequestTimeout, + p.config.RetryRequestCount, + p.config.RetryRequestTimeout, } - return &goVppChan{ch, retryCfg, plugin.tracer}, nil + return &goVppChan{ch, retryCfg, p.tracer}, nil } // NewAPIChannelBuffered returns a new API channel for communication with VPP via govpp core. @@ -213,69 +229,67 @@ func (plugin *Plugin) NewAPIChannel() (govppapi.Channel, error) { // Example of binary API call from some plugin using GOVPP: // ch, _ := govpp_mux.NewAPIChannelBuffered(100, 100) // ch.SendRequest(req).ReceiveReply -func (plugin *Plugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize int) (govppapi.Channel, error) { - ch, err := plugin.vppConn.NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize) +func (p *Plugin) NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize int) (govppapi.Channel, error) { + ch, err := p.vppConn.NewAPIChannelBuffered(reqChanBufSize, replyChanBufSize) if err != nil { return nil, err } - if plugin.config.ReplyTimeout > 0 { - ch.SetReplyTimeout(plugin.config.ReplyTimeout) - } retryCfg := retryConfig{ - plugin.config.RetryRequestCount, - plugin.config.RetryRequestTimeout, + p.config.RetryRequestCount, + p.config.RetryRequestTimeout, } - return &goVppChan{ch, retryCfg, plugin.tracer}, nil + return &goVppChan{ch, retryCfg, p.tracer}, nil } // GetTrace returns all trace entries measured so far -func (plugin *Plugin) GetTrace() *apitrace.Trace { - if !plugin.config.TraceEnabled { - plugin.Log.Warnf("VPP API trace is disabled") +func (p *Plugin) GetTrace() *apitrace.Trace { + if !p.config.TraceEnabled { + p.Log.Warnf("VPP API trace is disabled") return nil } - return plugin.tracer.Get() + return p.tracer.Get() } // ListStats returns all stats names -func (plugin *Plugin) ListStats(prefixes... string) ([]string, error) { - if reflect.ValueOf(plugin.statsAdapter).IsNil() { +func (p *Plugin) ListStats(prefixes ...string) ([]string, error) { + if p.statsAdapter == nil { return nil, nil } - return plugin.statsAdapter.ListStats(prefixes...) + return p.statsAdapter.ListStats(prefixes...) } // DumpStats returns all stats with name, type and value -func (plugin *Plugin) DumpStats(prefixes... string) ([]*adapter.StatEntry, error) { - if reflect.ValueOf(plugin.statsAdapter).IsNil() { +func (p *Plugin) DumpStats(prefixes ...string) ([]*adapter.StatEntry, error) { + if p.statsAdapter == nil { return nil, nil } - return plugin.statsAdapter.DumpStats(prefixes...) + return p.statsAdapter.DumpStats(prefixes...) } // handleVPPConnectionEvents handles VPP connection events. -func (plugin *Plugin) handleVPPConnectionEvents(ctx context.Context) { - plugin.wg.Add(1) - defer plugin.wg.Done() +func (p *Plugin) handleVPPConnectionEvents(ctx context.Context) { + p.wg.Add(1) + defer p.wg.Done() for { select { - case status := <-plugin.vppConChan: + case status := <-p.vppConChan: if status.State == govpp.Connected { - plugin.retrieveVersion() - if plugin.config.ReconnectResync && plugin.lastConnErr != nil { - plugin.Log.Info("Starting resync after VPP reconnect") - if plugin.Resync != nil { - plugin.Resync.DoResync() - plugin.lastConnErr = nil + p.retrieveVpeInfo() + p.retrieveVersion() + if p.config.ReconnectResync && p.lastConnErr != nil { + p.Log.Info("Starting resync after VPP reconnect") + if p.Resync != nil { + p.Resync.DoResync() + p.lastConnErr = nil } else { - plugin.Log.Warn("Expected resync after VPP reconnect could not start because of missing Resync plugin") + p.Log.Warn("Expected resync after VPP reconnect could not start because of missing Resync plugin") } } - plugin.StatusCheck.ReportStateChange(plugin.PluginName, statuscheck.OK, nil) + p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil) } else { - plugin.lastConnErr = errors.New("VPP disconnected") - plugin.StatusCheck.ReportStateChange(plugin.PluginName, statuscheck.Error, plugin.lastConnErr) + p.lastConnErr = errors.New("VPP disconnected") + p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.Error, p.lastConnErr) } case <-ctx.Done(): @@ -284,28 +298,46 @@ func (plugin *Plugin) handleVPPConnectionEvents(ctx context.Context) { } } -func (plugin *Plugin) retrieveVersion() { - vppAPIChan, err := plugin.vppConn.NewAPIChannel() +func (p *Plugin) retrieveVpeInfo() (*vppcalls.VpeInfo, error) { + vppAPIChan, err := p.vppConn.NewAPIChannel() + if err != nil { + p.Log.Error("getting new api channel failed:", err) + return nil, err + } + defer vppAPIChan.Close() + + info, err := vppcalls.GetVpeInfo(vppAPIChan) + if err != nil { + p.Log.Warn("getting version info failed:", err) + return nil, err + } + p.Log.Debugf("connection info: %+v", info) + + return info, nil +} + +func (p *Plugin) retrieveVersion() { + vppAPIChan, err := p.vppConn.NewAPIChannel() if err != nil { - plugin.Log.Error("getting new api channel failed:", err) + p.Log.Error("getting new api channel failed:", err) return } defer vppAPIChan.Close() - info, err := vppcalls.GetVersionInfo(vppAPIChan) + version, err := vppcalls.GetVersionInfo(vppAPIChan) if err != nil { - plugin.Log.Warn("getting version info failed:", err) + p.Log.Warn("getting version info failed:", err) return } - plugin.Log.Debugf("version info: %+v", info) - plugin.Log.Infof("VPP version: %q (%v)", info.Version, info.BuildDate) + p.Log.Debugf("version info: %+v", version) + p.Log.Infof("VPP version: %q (%v)", version.Version, version.BuildDate) // Get VPP ACL plugin version var aclVersion string - if aclVersion, err = aclvppcalls.GetACLPluginVersion(vppAPIChan); err != nil { - plugin.Log.Warn("getting acl version info failed:", err) + if aclVersion, err = vppcalls.GetACLPluginVersion(vppAPIChan); err != nil { + p.Log.Warn("getting acl version info failed:", err) return } - plugin.Log.Infof("VPP ACL plugin version: %q", aclVersion) + p.Log.Infof("VPP ACL plugin version: %q", aclVersion) } diff --git a/plugins/govppmux/vppcalls/acl_vppcalls.go b/plugins/govppmux/vppcalls/acl_vppcalls.go new file mode 100644 index 0000000000..13473555ee --- /dev/null +++ b/plugins/govppmux/vppcalls/acl_vppcalls.go @@ -0,0 +1,36 @@ +// Copyright (c) 2017 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vppcalls + +import ( + "fmt" + + govppapi "git.fd.io/govpp.git/api" + aclapi "github.com/ligato/vpp-agent/plugins/vpp/binapi/acl" +) + +// GetACLPluginVersion retrieves ACL plugin version. +func GetACLPluginVersion(ch govppapi.Channel) (string, error) { + req := &aclapi.ACLPluginGetVersion{} + reply := &aclapi.ACLPluginGetVersionReply{} + + if err := ch.SendRequest(req).ReceiveReply(reply); err != nil { + return "", fmt.Errorf("failed to get VPP ACL plugin version: %v", err) + } + + version := fmt.Sprintf("%d.%d", reply.Major, reply.Minor) + + return version, nil +} \ No newline at end of file diff --git a/plugins/govppmux/vppcalls/vpe_vppcalls.go b/plugins/govppmux/vppcalls/vpe_vppcalls.go index bad3eae90c..7532e44b03 100644 --- a/plugins/govppmux/vppcalls/vpe_vppcalls.go +++ b/plugins/govppmux/vppcalls/vpe_vppcalls.go @@ -22,9 +22,61 @@ import ( "strings" govppapi "git.fd.io/govpp.git/api" + "github.com/ligato/vpp-agent/plugins/vpp/binapi/memclnt" "github.com/ligato/vpp-agent/plugins/vpp/binapi/vpe" ) +// VpeInfo contains information about VPP connection and process. +type VpeInfo struct { + PID uint32 + ClientIdx uint32 + ModuleVersions map[string]ModuleVersion +} + +type ModuleVersion struct { + Name string + Major uint32 + Minor uint32 + Patch uint32 +} + +// GetVpeInfo retrieves vpe information. +func GetVpeInfo(vppChan govppapi.Channel) (*VpeInfo, error) { + req := &vpe.ControlPing{} + reply := &vpe.ControlPingReply{} + + if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + return nil, err + } + + info := &VpeInfo{ + PID: reply.VpePID, + ClientIdx: reply.ClientIndex, + ModuleVersions: make(map[string]ModuleVersion), + } + + { + req := &memclnt.APIVersions{} + reply := &memclnt.APIVersionsReply{} + + if err := vppChan.SendRequest(req).ReceiveReply(reply); err != nil { + return nil, err + } + + for _, v := range reply.APIVersions { + name := string(cleanBytes(v.Name)) + info.ModuleVersions[name] = ModuleVersion{ + Name: name, + Major: v.Major, + Minor: v.Minor, + Patch: v.Patch, + } + } + } + + return info, nil +} + // VersionInfo contains values returned from ShowVersion type VersionInfo struct { Program string @@ -45,10 +97,10 @@ func GetVersionInfo(vppChan govppapi.Channel) (*VersionInfo, error) { } info := &VersionInfo{ - Program: reply.Program, - Version: reply.Version, - BuildDate: reply.BuildDate, - BuildDirectory: reply.BuildDirectory, + Program: string(cleanBytes(reply.Program)), + Version: string(cleanBytes(reply.Version)), + BuildDate: string(cleanBytes(reply.BuildDate)), + BuildDirectory: string(cleanBytes(reply.BuildDirectory)), } return info, nil @@ -57,7 +109,8 @@ func GetVersionInfo(vppChan govppapi.Channel) (*VersionInfo, error) { // RunCliCommand executes CLI command and returns output func RunCliCommand(vppChan govppapi.Channel, cmd string) (string, error) { req := &vpe.CliInband{ - Cmd: cmd, + Cmd: []byte(cmd), + Length: uint32(len(cmd)), } reply := &vpe.CliInbandReply{} @@ -67,7 +120,7 @@ func RunCliCommand(vppChan govppapi.Channel, cmd string) (string, error) { return "", fmt.Errorf("%s returned %d", reply.GetMessageName(), reply.Retval) } - return reply.Reply, nil + return string(cleanBytes(reply.Reply)), nil } // MemoryInfo contains values returned from 'show memory' @@ -266,7 +319,7 @@ func GetRuntimeInfo(vppChan govppapi.Channel) (*RuntimeInfo, error) { VectorRatesPunt: strToFloat64(fields[10]), } - itemMatches := runtimeItemsRe.FindAllStringSubmatch(string(fields[11]), -1) + itemMatches := runtimeItemsRe.FindAllStringSubmatch(fields[11], -1) for _, matches := range itemMatches { fields := matches[1:] if len(fields) != 7 { diff --git a/plugins/govppmux/vppcalls/vpe_vppcalls_test.go b/plugins/govppmux/vppcalls/vpe_vppcalls_test.go index 0cb2fe8523..b1d5187999 100644 --- a/plugins/govppmux/vppcalls/vpe_vppcalls_test.go +++ b/plugins/govppmux/vppcalls/vpe_vppcalls_test.go @@ -38,7 +38,7 @@ func TestGetBuffers(t *testing.T) { 0 replication-recycle 7 1024 0 0 0 0 0 default 8 2048 0 0 0 0 ` ctx.MockVpp.MockReply(&vpe.CliInbandReply{ - Reply: reply, + Reply: []byte(reply), }) info, err := vppcalls.GetBuffersInfo(ctx.MockChannel) @@ -135,7 +135,7 @@ Time 21.5, average vectors/node 0.00, last 128 main loops 0.00 per node 0.00 unix-epoll-input polling 20563870 0 0 3.56e3 0.00 ` ctx.MockVpp.MockReply(&vpe.CliInbandReply{ - Reply: reply, + Reply: []byte(reply), }) info, err := vppcalls.GetRuntimeInfo(ctx.MockChannel) @@ -166,7 +166,7 @@ Thread 2 vpp_wk_1 22991 objects, 19199k of 24937k used, 5196k free, 5168k reclaimed, 361k overhead, 1048572k capacity ` ctx.MockVpp.MockReply(&vpe.CliInbandReply{ - Reply: reply, + Reply: []byte(reply), }) info, err := vppcalls.GetMemory(ctx.MockChannel) @@ -214,7 +214,7 @@ func TestGetNodeCounters(t *testing.T) { 1 cdp-input good cdp packets (processed) ` ctx.MockVpp.MockReply(&vpe.CliInbandReply{ - Reply: reply, + Reply: []byte(reply), }) info, err := vppcalls.GetNodeCounters(ctx.MockChannel) diff --git a/plugins/kvscheduler/api/errors.go b/plugins/kvscheduler/api/errors.go new file mode 100644 index 0000000000..7c0cde70f3 --- /dev/null +++ b/plugins/kvscheduler/api/errors.go @@ -0,0 +1,213 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +var ( + // ErrCombinedDownstreamResyncWithChange is returned when transaction combines downstream-resync with data changes. + ErrCombinedDownstreamResyncWithChange = errors.New("downstream resync combined with data changes in one transaction") + + // ErrRevertNotSupportedWithResync is returned when transaction combines resync with revert. + ErrRevertNotSupportedWithResync = errors.New("it is not supported to combine resync with revert") + + // ErrClosedScheduler is returned when scheduler is closed during transaction execution. + ErrClosedScheduler = errors.New("scheduler was closed") + + // ErrTxnWaitCanceled is returned when waiting for result of blocking transaction is canceled. + ErrTxnWaitCanceled = errors.New("waiting for result of blocking transaction was canceled") + + // ErrTxnQueueFull is returned when the queue of pending transactions is full. + ErrTxnQueueFull = errors.New("transaction queue is full") + + // ErrUnimplementedCreate is returned when NB transaction attempts to Create value + // for which there is a descriptor, but Create operation is not implemented. + ErrUnimplementedCreate = errors.New("operation Create is not implemented") + + // ErrUnimplementedDelete is returned when NB transaction attempts to Delete value + // for which there is a descriptor, but Delete operation is not implemented. + ErrUnimplementedDelete = errors.New("operation Delete is not implemented") +) + +// ErrInvalidValueType is returned to scheduler by auto-generated descriptor adapter +// when value does not match expected type. +func ErrInvalidValueType(key string, value proto.Message) error { + if key == "" { + return errors.Errorf("value (%v) has invalid type", value) + } + return errors.Errorf("value (%v) has invalid type for key: %s", value, key) +} + +// ErrInvalidMetadataType is returned to scheduler by auto-generated descriptor adapter +// when value metadata does not match expected type. +func ErrInvalidMetadataType(key string) error { + if key == "" { + return errors.Errorf("metadata has invalid type") + } + return errors.Errorf("metadata has invalid type for key: %s", key) +} + +/****************************** Transaction Error *****************************/ + +// TransactionError implements Error interface, wrapping all errors encountered +// during the processing of a single transaction. +type TransactionError struct { + txnInitError error + kvErrors []KeyWithError +} + +// NewTransactionError is a constructor for transaction error. +func NewTransactionError(txnInitError error, kvErrors []KeyWithError) *TransactionError { + return &TransactionError{txnInitError: txnInitError, kvErrors: kvErrors} +} + +// Error returns a string representation of all errors encountered during +// the transaction processing. +func (e *TransactionError) Error() string { + if e == nil { + return "" + } + if e.txnInitError != nil { + return e.txnInitError.Error() + } + if len(e.kvErrors) > 0 { + var kvErrMsgs []string + for _, kvError := range e.kvErrors { + kvErrMsgs = append(kvErrMsgs, fmt.Sprintf( + "%s (%v): %v", kvError.Key, kvError.TxnOperation, kvError.Error, + )) + } + return fmt.Sprintf("KeyErrors: [%s]", strings.Join(kvErrMsgs, ", ")) + } + return "" +} + +// GetKVErrors returns errors for key-value pairs that failed to get applied. +func (e *TransactionError) GetKVErrors() (kvErrors []KeyWithError) { + if e == nil { + return kvErrors + } + return e.kvErrors +} + +// GetTxnInitError returns error thrown during the transaction initialization. +// If the transaction initialization fails, the other stages of the transaction +// processing are not even started, therefore either GetTxnInitError or GetKVErrors +// may return some errors, but not both. +func (e *TransactionError) GetTxnInitError() error { + if e == nil { + return nil + } + return e.txnInitError +} + +/******************************** Invalid Value *******************************/ + +// InvalidValueError can be used by descriptor for the Validate method to return +// validation error together with a list of invalid fields for further +// clarification. +type InvalidValueError struct { + err error + invalidFields []string +} + +// NewInvalidValueError is a constructor for invalid-value error. +func NewInvalidValueError(err error, invalidFields ...string) *InvalidValueError { + return &InvalidValueError{err: err, invalidFields: invalidFields} +} + +// Error returns a string representation of all errors encountered during +// the transaction processing. +func (e *InvalidValueError) Error() string { + if e == nil || e.err == nil { + return "" + } + if len(e.invalidFields) == 0 { + return e.err.Error() + } + if len(e.invalidFields) == 1 { + return fmt.Sprintf("field %v is invalid: %v", e.invalidFields[0], e.err) + } + return fmt.Sprintf("fields %v are invalid: %v", e.invalidFields, e.err) +} + +// GetValidationError returns internally stored validation error. +func (e *InvalidValueError) GetValidationError() error { + return e.err +} + +// GetInvalidFields returns internally stored slice of invalid fields. +func (e *InvalidValueError) GetInvalidFields() []string { + return e.invalidFields +} + +/***************************** Verification Failure ****************************/ + +type VerificationErrorType int + +const ( + // ExpectedToExist marks verification error returned when configured (non-nil) + // value is not found by the refresh. + ExpectedToExist VerificationErrorType = iota + + // ExpectedToNotExist marks verification error returned when removed (nil) + // value is found by the refresh to still exist. + ExpectedToNotExist + + // NotEquivalent marks verification error returned when applied value is not + // equivalent with the refreshed value. + NotEquivalent +) + +// VerificationError is returned by the scheduler for a transaction when an applied +// value does not match with the refreshed value. +type VerificationError struct { + key string + errType VerificationErrorType +} + +// NewVerificationError is constructor for a verification error. +func NewVerificationError(key string, errType VerificationErrorType) *VerificationError { + return &VerificationError{key: key, errType: errType} +} + +// Error returns a string representation of the error. +func (e *VerificationError) Error() string { + switch e.errType { + case ExpectedToExist: + return "value is not actually configured" + case ExpectedToNotExist: + return "value is not actually removed" + case NotEquivalent: + return "applied value is not equivalent with the refreshed value" + } + return "" +} + +// Key returns the key of the value for which the verification failed. +func (e *VerificationError) Key() string { + return e.key +} + +// Type returns the verification error type. +func (e *VerificationError) Type() VerificationErrorType { + return e.errType +} diff --git a/plugins/kvscheduler/api/kv_descriptor_api.go b/plugins/kvscheduler/api/kv_descriptor_api.go new file mode 100644 index 0000000000..e78c2c3009 --- /dev/null +++ b/plugins/kvscheduler/api/kv_descriptor_api.go @@ -0,0 +1,248 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/idxmap" +) + +// Dependency references another kv pair that must exist before the associated +// value can be created. +type Dependency struct { + // Label should be a short human-readable string labeling the dependency. + // Must be unique in the list of dependencies for a value. + Label string + + // Key of another kv pair that the associated value depends on. + // If empty, AnyOf must be defined instead. + Key string + + // AnyOf, if not nil, must return true for at least one of the already created + // keys for the dependency to be considered satisfied. + // Either Key or AnyOf should be defined, but not both at the same time. + // Note: AnyOf comes with more overhead than a static key dependency, + // so prefer to use the latter whenever possible. + AnyOf KeySelector +} + +// MetadataMapFactory can be used by descriptor to define a custom map associating +// value labels with value metadata, potentially extending the basic in-memory +// implementation (memNamedMapping) with secondary indexes, type-safe watch, etc. +// If metadata are enabled (by WithMetadata method), the scheduler will create +// an instance of the map using the provided factory during the descriptor +// registration (RegisterKVDescriptor). Immediately afterwards, the mapping +// is available read-only via scheduler's method GetMetadataMap. The returned +// map can be then casted to the customized implementation, but it should remain +// read-only (i.e. define read-only interface for the customized implementation). +type MetadataMapFactory func() idxmap.NamedMappingRW + +// ValueOrigin is one of: FromNB, FromSB, UnknownOrigin. +type ValueOrigin int + +const ( + // UnknownOrigin is given to a retrieved value when it cannot be determined + // if the value was previously created by NB or not. + // Scheduler will then look into its history to find out if the value was + // ever managed by NB to determine the origin heuristically. + UnknownOrigin ValueOrigin = iota + + // FromNB marks value created via NB transaction. + FromNB + + // FromSB marks value not managed by NB - i.e. created automatically or + // externally in SB. + FromSB +) + +// String converts ValueOrigin to string. +func (vo ValueOrigin) String() string { + switch vo { + case FromNB: + return "from-NB" + case FromSB: + return "from-SB" + default: + return "unknown" + } +} + +// KVDescriptor teaches KVScheduler how to CRUD values under keys matched +// by KeySelector(). +// +// Every SB component should define one or more descriptors to cover all +// (non-property) keys under its management. The descriptor is what in essence +// gives meaning to individual key-value pairs. The list of available keys and +// their purpose should be properly documented so that clients from the NB plane +// can use them correctly. The scheduler does not care what CRUD methods do, +// it only needs to call the right callbacks at the right time. +// +// Every key-value pair must have at most one descriptor associated with it. +// NB base value without descriptor is considered unimplemented and will never +// be created. +// On the other hand, derived value is allowed to have no descriptor associated +// with it. Typically, properties of base values are implemented as derived +// (often empty) values without attached SB operations, used as targets for +// dependencies. +type KVDescriptor struct { + // Name of the descriptor unique across all registered descriptors. + Name string + + // TODO: replace KeySelector, KeyLabel & NBKeyPrefix with methods from + // models.Spec. + + // KeySelector selects keys described by this descriptor. + KeySelector KeySelector + + // TODO: obsolete, remove once Orchestrator is completed + // ValueTypeName defines name of the proto.Message type used to represent + // described values. This attribute is mandatory, otherwise LazyValue-s + // received from NB (e.g. datasync package) cannot be un-marshalled. + // Note: proto Messages are registered against this type name in the generated + // code using proto.RegisterType(). + ValueTypeName string + + // KeyLabel can be *optionally* defined to provide a *shorter* value + // identifier, that, unlike the original key, only needs to be unique in the + // key scope of the descriptor and not necessarily in the entire key space. + // If defined, key label will be used as value identifier in the metadata map + // and in the non-verbose logs. + KeyLabel func(key string) string + + // NBKeyPrefix is a key prefix that the scheduler should watch + // in NB to receive all NB-values described by this descriptor. + // The key space defined by NBKeyPrefix may cover more than KeySelector + // selects - the scheduler will filter the received values and pass + // to the descriptor only those that are really chosen by KeySelector. + // The opposite may be true as well - KeySelector may select some extra + // SB-only values, which the scheduler will not watch for in NB. Furthermore, + // the keys may already be requested for watching by another descriptor + // within the same plugin and in such case it is not needed to mention the + // same prefix again. + NBKeyPrefix string + + // ValueComparator can be *optionally* provided to customize comparision + // of values for equality. + // Scheduler compares values to determine if Update operation is really + // needed. + // For NB values, was either previously set by NB or refreshed + // from SB, whereas is a new value to be applied by NB. + ValueComparator func(key string, oldValue, newValue proto.Message) bool + + // WithMetadata tells scheduler whether to enable metadata - run-time, + // descriptor-owned, scheduler-opaque, data carried alongside a created + // (non-derived) value. + // If enabled, the scheduler will maintain a map between key (-label, if + // KeyLabel is defined) and the associated metadata. + // If is false, metadata returned by Create will be ignored + // and other methods will receive nil metadata. + WithMetadata bool + + // MetadataMapFactory can be used to provide a customized map implementation + // for value metadata, possibly extended with secondary lookups. + // If not defined, the scheduler will use the bare NamedMapping from + // the idxmap package. + MetadataMapFactory MetadataMapFactory + + // Validate value handler (optional). + // Validate is called for every new value before it is Created or Updated. + // If the validations fails (returned is non-nil), the scheduler will + // mark the value as invalid and will not attempt to apply it. + // The descriptor can further specify which field(s) are not valid + // by wrapping the validation error together with a slice of invalid fields + // using the error InvalidValueError (see errors.go). + Validate func(key string, value proto.Message) error + + // Create new value handler. + // For non-derived values, descriptor may return metadata to associate with + // the value. + // For derived values, Create+Delete+Update are optional. Typically, properties + // of base values are implemented as derived (often empty) values without + // attached SB operations, used as targets for dependencies. + Create func(key string, value proto.Message) (metadata Metadata, err error) + + // Delete value handler. + // If Create is defined, Delete handler must be provided as well. + Delete func(key string, value proto.Message, metadata Metadata) error + + // Update value handler. + // The handler is optional - if not defined, value change will be carried out + // via full re-creation (Delete followed by Create with the new value). + // can re-use the . + Update func(key string, oldValue, newValue proto.Message, oldMetadata Metadata) (newMetadata Metadata, err error) + + // UpdateWithRecreate can be defined to tell the scheduler if going from + // to requires the value to be completely re-created + // with Delete+Create handlers. + // If not defined, KVScheduler will decide based on the (un)availability + // of the Update operation - if provided, it is assumed that any change + // can be applied incrementally, otherwise a full re-creation is the only way + // to go. + UpdateWithRecreate func(key string, oldValue, newValue proto.Message, metadata Metadata) bool + + // Retrieve should return all non-derived values described by this descriptor + // that *really* exist in the southbound plane (and not what the current + // scheduler's view of SB is). Derived value will get automatically created + // using the method DerivedValues(). If some non-derived value doesn't + // actually exist, it shouldn't be returned by DerivedValues() for the + // retrieved base value! + // represents the non-derived values currently created + // as viewed from the northbound/scheduler point of view: + // -> startup resync: = values received from NB to be applied + // -> run-time/downstream resync: = values applied according + // to the in-memory kv-store (scheduler's view of SB) + // + // The callback is optional - if not defined, it is assumed that descriptor + // is not able to read the current SB state and thus refresh cannot be + // performed for its kv-pairs. + Retrieve func(correlate []KVWithMetadata) ([]KVWithMetadata, error) + + // IsRetriableFailure tells scheduler if the given error, returned by one + // of Create/Delete/Update handlers, will always be returned for the + // the same value (non-retriable) or if the value can be theoretically + // fixed merely by repeating the operation. + // If the callback is not defined, every error will be considered retriable. + IsRetriableFailure func(err error) bool + + // DerivedValues returns ("derived") values solely inferred from the current + // state of this ("base") value. Derived values cannot be changed by NB + // transaction. + // While their state and existence is bound to the state of their base value, + // they are allowed to have their own descriptors. + // + // Typically, derived value represents the base value's properties (that + // other kv pairs may depend on), or extra actions taken when additional + // dependencies are met, but otherwise not blocking the base + // value from being created. + // + // The callback is optional - if not defined, there will be no values derived + // from kv-pairs of the descriptor. + DerivedValues func(key string, value proto.Message) []KeyValuePair + + // Dependencies are keys that must already exist for the value to be created. + // Conversely, if a dependency is to be removed, all values that depend on it + // are deleted first and cached for a potential future re-creation. + // Dependencies returned in the list are AND-ed. + // The callback is optional - if not defined, the kv-pairs of the descriptor + // are assumed to have no dependencies. + Dependencies func(key string, value proto.Message) []Dependency + + // RetrieveDependencies is a list of descriptors whose values are needed + // and should be already retrieved prior to calling Retrieve for this + // descriptor. + // Metadata for values already retrieved are available via GetMetadataMap(). + // TODO: define dependencies as a slice of models, not descriptors. + RetrieveDependencies []string /* descriptor name */ +} diff --git a/plugins/kvscheduler/api/kv_scheduler_api.go b/plugins/kvscheduler/api/kv_scheduler_api.go new file mode 100644 index 0000000000..1129a02fb7 --- /dev/null +++ b/plugins/kvscheduler/api/kv_scheduler_api.go @@ -0,0 +1,286 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate protoc --proto_path=. --gogo_out=. value_status.proto + +package api + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/idxmap" + "time" +) + +// KeySelector is used to filter keys. +type KeySelector func(key string) bool + +// KeyValuePair groups key with value. +type KeyValuePair struct { + // Key identifies value. + Key string + + // Value may represent some object, action or property. + // + // Value can be created either via northbound transaction (NB-value, + // ValueOrigin = FromNB) or pushed (as already created) through SB notification + // (SB-value, ValueOrigin = FromSB). Values from NB take priority as they + // overwrite existing SB values (via Modify operation), whereas notifications + // for existing NB values are ignored. For values retrieved with unknown + // origin the scheduler reviews the value's history to determine where it came + // from. + // + // For descriptors the values are mutable objects - Create, Update and Delete + // methods should reflect the value content without changing it. + // To add and maintain extra (runtime) attributes alongside the value, descriptor + // can use the value metadata. + Value proto.Message +} + +// Metadata are extra information carried alongside non-derived (base) value +// that descriptor may use for runtime attributes, secondary lookups, etc. This +// data are opaque for the scheduler and fully owned by the descriptor. +// Descriptor is supposed to create/edit (and use) metadata inside the Create, +// Update, Delete methods and return the latest state with Retrieve. +// Metadata, however, should not be used to determine the list of derived values +// and dependencies for a value - this needs to be fixed for a given value +// (Update is effectively replace) and known even before the value is created. +// +// The only way how scheduler can learn anything from metadata, is if MetadataMap +// is enabled by descriptor (using WithMetadata method) and a custom NamedMapping +// implementation is provided that defines secondary indexes (over metadata). +// The scheduler exposes the current snapshot of secondary indexes, but otherwise +// is not familiar with their semantics. +type Metadata interface{} + +// KeyWithError stores error for a key whose value failed to get updated. +type KeyWithError struct { + Key string + TxnOperation TxnOperation + Error error +} + +// KVWithMetadata encapsulates key-value pair with metadata and the origin mark. +type KVWithMetadata struct { + Key string + Value proto.Message + Metadata Metadata + Origin ValueOrigin +} + +// View chooses from which point of view to look at the key-value space when +// retrieving values. +type View int + +const ( + // SBView means to look directly into SB via Retrieve methods of descriptors + // to learn the real and up-to-date state of the system. + SBView View = iota + + // NBView means to look at the key-value space from NB point of view, i.e. + // what key-values were requested and are assumed by NB to be applied. + NBView + + // CachedView means to obtain the kvscheduler's current view of SB. + CachedView +) + +// String converts View to string. +func (v View) String() string { + switch v { + case SBView: + return "SB" + case NBView: + return "NB" + default: + return "cached" + } +} + +// KVScheduler synchronizes the *desired* system state described by northbound +// (NB) components via transactions with the *actual* state of the southbound (SB). +// The system state is represented as a set of inter-dependent key-value pairs +// that can be created, updated, deleted from within NB transactions or be notified +// about via notifications from the SB plane. +// The scheduling basically implements "state reconciliation" - periodically and +// on any change the scheduler attempts to update every value which has satisfied +// dependencies but is out-of-sync with the desired state given by NB. +// +// For the scheduler, the key-value pairs are just abstract items that need +// to be managed in a synchronized fashion according to the described relations. +// It is up to the SB components to assign actual meaning to the individual +// values via provided implementations for CRUD operations. +// +// The idea behind scheduler is based on the Mediator pattern - SB components +// do not communicate directly, but instead interact through the mediator. +// This reduces the dependencies between communicating objects, thereby reducing +// coupling. +// +// The values are described for scheduler by registered KVDescriptor-s. +// The scheduler learns two kinds of relations between values that have to be +// respected by the scheduling algorithm: +// -> A depends on B: +// - A cannot exist without B +// - request to create A without B existing must be postponed by storing +// A into the cache of values with unmet dependencies (a.k.a. pending) +// - if B is to be removed and A exists, A must be removed first +// and cached in case B is restored in the future +// - Note: values pushed from SB are not checked for dependencies +// -> B is derived from A: +// - value B is not created directly (by NB or SB) but gets derived +// from base value A (using the DerivedValues() method of the base +// value's descriptor) +// - derived value exists only as long as its base does and gets removed +// (without caching) once the base value goes away +// - derived value may be described by a different descriptor than +// the base and usually represents property of the base value (that +// other values may depend on) or an extra action to be taken +// when additional dependencies are met. +// +// Every key-value pair must have at most one descriptor associated with it. +// Base NB value without descriptor is considered unimplemented and will never +// be created. +// On the other hand, derived value is allowed to have no descriptor associated +// with it. Typically, properties of base values are implemented as derived +// (often empty) values without attached SB operations, used as targets for +// dependencies. +// +// For descriptors the values are mutable objects - Create, Update and Delete +// methods should reflect the value content without changing it. +// To add and maintain extra (runtime) attributes alongside the value, scheduler +// allows descriptors to append metadata - of any type - to each created +// non-derived Object value. Descriptor can also use the metadata to define +// secondary lookups, exposed via MetadataMap. +// +// Advantages of the centralized scheduling are: +// - easy to add new descriptors and dependencies +// - decreases the likelihood of race conditions and deadlocks in systems with +// complex dependencies +// - allows to write loosely-coupled SB components (mediator pattern) +// - descriptor API will force new SB components to follow the same +// code structure which will make them easier to familiarize with +// - NB components should never worry about dependencies between requests - +// it is taken care of by the scheduler +// - single cache for all (not only pending) values (exposed via REST, +// easier to debug) +// +// Apart from scheduling and execution, KVScheduler also offers the following +// features: +// - collecting and counting present and past errors individually for every +// key +// - retry for previously failed actions +// - transaction reverting +// - exposing history of actions, errors and past value revisions over the REST +// interface +// - clearly describing the sequence of actions to be executed and postponed +// in the log file +// - allows to print verbose log messages describing graph traversal during +// transactions for debugging purposes +// - exposing graph snapshot, in the present state or after a given transaction, +// as a plotted graph (returned via REST) with values as nodes (colored to +// distinguish various value states) and dependencies/derivations as edges. +type KVScheduler interface { + // RegisterKVDescriptor registers descriptor for a set of selected + // keys. It should be called in the Init phase of agent plugins. + // Every key-value pair must have at most one descriptor associated with it + // (none for derived values expressing properties). + RegisterKVDescriptor(descriptor *KVDescriptor) error + + // GetRegisteredNBKeyPrefixes returns a list of key prefixes from NB with values + // described by registered descriptors and therefore managed by the scheduler. + GetRegisteredNBKeyPrefixes() []string + + // StartNBTransaction starts a new transaction from NB to SB plane. + // The enqueued actions are scheduled for execution by Txn.Commit(). + StartNBTransaction() Txn + + // TransactionBarrier ensures that all notifications received prior to the call + // are associated with transactions that have already finalized. + TransactionBarrier() + + // PushSBNotification notifies about a spontaneous value change in the SB + // plane (i.e. not triggered by NB transaction). + // + // Pass as nil if the value was removed, non-nil otherwise. + // + // Values pushed from SB do not trigger Create/Update/Delete operations + // on the descriptors - the change has already happened in SB - only + // dependencies and derived values are updated. + // + // Values pushed from SB are overwritten by those created via NB transactions, + // however. For example, notifications for values already created by NB + // are ignored. But otherwise, SB values (not managed by NB) are untouched + // by reconciliation or any other operation of the scheduler/descriptor. + PushSBNotification(key string, value proto.Message, metadata Metadata) error + + // GetMetadataMap returns (read-only) map associating value label with value + // metadata of a given descriptor. + // Returns nil if the descriptor does not expose metadata. + GetMetadataMap(descriptor string) idxmap.NamedMapping + + // GetValueStatus returns the status of a non-derived value with the given + // key. + GetValueStatus(key string) *BaseValueStatus + + // WatchValueStatus allows to watch for changes in the status of non-derived + // values with keys selected by the selector (all if keySelector==nil). + WatchValueStatus(channel chan<- *BaseValueStatus, keySelector KeySelector) + + // GetTransactionHistory returns history of transactions started within + // the specified time window, or the full recorded history if the timestamps + // are zero values. + GetTransactionHistory(since, until time.Time) (history RecordedTxns) + + // GetRecordedTransaction returns record of a transaction referenced + // by the sequence number. + GetRecordedTransaction(SeqNum uint64) (txn *RecordedTxn) + + // DumpValuesByDescriptor dumps values associated with the given + // descriptor as viewed from either NB (what was requested to be applied), + // SB (what is actually applied) or from the inside (what kvscheduler's + // cached view of SB is). + DumpValuesByDescriptor(descriptor string, view View) (kvs []KVWithMetadata, err error) + + // DumpValuesByKeyPrefix like DumpValuesByDescriptor returns a dump of values, + // but the descriptor is selected based on the key prefix. + DumpValuesByKeyPrefix(keyPrefix string, view View) (kvs []KVWithMetadata, err error) +} + +// Txn represent a single transaction. +// Scheduler starts to plan and execute actions only after Commit is called. +type Txn interface { + // SetValue changes (non-derived) value. + // If is nil, the value will get deleted. + SetValue(key string, value proto.Message) Txn + + // Commit orders scheduler to execute enqueued operations. + // Operations with unmet dependencies will get postponed and possibly + // executed later. + // allows to pass transaction options (see With* functions from + // txn_options.go) or to cancel waiting for the end of a blocking transaction. + // + // For blocking transactions, the method returns the sequence number + // of the (finalized) transaction or ^uint64(0) (max uint64) if the transaction + // failed to even get initialized. In case of failures during the initialization + // or transaction processing, the method will return non-nil error, which is + // always an instance of TransactionError (see errors.go), wrapping all errors + // encountered during the transaction processing. + // + // Non-blocking transactions return immediately and always without errors. + // Subscribe with KVScheduler.WatchValueStatus() to get notified about all + // changes/errors, including those related to actions triggered later + // or asynchronously by a SB notification. + Commit(ctx context.Context) (txnSeqNum uint64, err error) +} diff --git a/plugins/kvscheduler/api/txn_options.go b/plugins/kvscheduler/api/txn_options.go new file mode 100644 index 0000000000..156836fb0a --- /dev/null +++ b/plugins/kvscheduler/api/txn_options.go @@ -0,0 +1,230 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "time" +) + +type schedulerCtxKey int + +const ( + // resyncCtxKey is a key under which *resync* txn option is stored + // into the context. + resyncCtxKey schedulerCtxKey = iota + + // nonBlockingTxnCtxKey is a key under which *non-blocking* txn option is + // stored into the context. + nonBlockingTxnCtxKey + + // retryCtxKey is a key under which *retry* txn option is stored into + // the context. + retryCtxKey + + // revertCtxKey is a key under which *revert* txn option is stored into + // the context. + revertCtxKey + + // txnDescriptionKey is a key under which transaction description is stored + // into the context. + txnDescriptionKey +) + +// modifiable default parameters for the *retry* txn option +var ( + // DefaultRetryPeriod delays first retry by one second. + DefaultRetryPeriod = time.Second + + // DefaultRetryMaxCount limits the number of retries to 3 attempts at maximum. + DefaultRetryMaxCount = 3 + + // DefaultRetryBackoff enables exponential back-off for retry delay. + DefaultRetryBackoff = true +) + +/* Full-Resync */ + +// resyncOpt represents the *resync* transaction option. +type resyncOpt struct { + resyncType ResyncType + verboseSBRefresh bool +} + +// ResyncType is one of: Upstream, Downstream, Full. +type ResyncType int + +const ( + // NotResync is the default value for ResyncType, used when resync is actually + // not enabled. + NotResync ResyncType = iota + + // FullResync resynchronizes the agent with both SB and NB. + FullResync + + // UpstreamResync resynchronizes the agent with NB. + // It can be used by NB in situations when fully re-calculating the desired + // state is far easier or more efficient that to determine the minimal difference + // that needs to be applied to reach that state. + // The agent's view of SB is not refreshed, instead it is expected to be up-to-date. + UpstreamResync + + // DownstreamResync resynchronizes the agent with SB. + // In this case it is assumed that the state required by NB is up-to-date + // (transaction should be empty) and only the agent's view of SB is refreshed + // and any discrepancies are acted upon. + DownstreamResync +) + +// WithResync prepares context for transaction that, based on the resync type, +// will trigger resync between the configuration states of NB, the agent and SB. +// For DownstreamResync the transaction should be empty, otherwise it should +// carry non-NIL values - existing NB values not included in the transaction +// are automatically removed. +// When is enabled, the refreshed state of SB will be printed +// into stdout. The argument is irrelevant for UpstreamResync, where SB state is +// not refreshed. +func WithResync(ctx context.Context, resyncType ResyncType, verboseSBRefresh bool) context.Context { + return context.WithValue(ctx, resyncCtxKey, &resyncOpt{ + resyncType: resyncType, + verboseSBRefresh: verboseSBRefresh, + }) +} + +// IsResync returns true if the transaction context is configured to trigger resync. +func IsResync(ctx context.Context) (resyncType ResyncType, verboseSBRefresh bool) { + resyncArgs, isResync := ctx.Value(resyncCtxKey).(*resyncOpt) + if !isResync { + return NotResync, false + } + return resyncArgs.resyncType, resyncArgs.verboseSBRefresh +} + +/* Non-blocking Txn */ + +// nonBlockingTxnOpt represents the *non-blocking* transaction option. +type nonBlockingTxnOpt struct { + // no attributes +} + +// WithoutBlocking prepares context for transaction that should be scheduled +// for execution without blocking the caller of the Commit() method. +// By default, commit is blocking. +func WithoutBlocking(ctx context.Context) context.Context { + return context.WithValue(ctx, nonBlockingTxnCtxKey, &nonBlockingTxnOpt{}) +} + +// IsNonBlockingTxn returns true if transaction context is configured for +// non-blocking Commit. +func IsNonBlockingTxn(ctx context.Context) bool { + _, nonBlocking := ctx.Value(nonBlockingTxnCtxKey).(*nonBlockingTxnOpt) + return nonBlocking +} + +/* Retry */ + +// RetryOpt represents the *retry* transaction option. +type RetryOpt struct { + Period time.Duration + MaxCount int + ExpBackoff bool +} + +// WithRetry prepares context for transaction for which the scheduler will retry +// any (retriable) failed operations after given . If +// is enabled, every failed retry will double the next delay. Non-zero +// limits the maximum number of retries the scheduler will execute. +// Can be combined with revert - even failed revert operations will be re-tried. +// By default, the scheduler will not automatically retry failed operations. +func WithRetry(ctx context.Context, period time.Duration, maxCount int, expBackoff bool) context.Context { + return context.WithValue(ctx, retryCtxKey, &RetryOpt{ + Period: period, + MaxCount: maxCount, + ExpBackoff: expBackoff, + }) +} + +// WithRetryDefault is a specialization of WithRetry, where retry parameters +// are set to default values. +func WithRetryDefault(ctx context.Context) context.Context { + return context.WithValue(ctx, retryCtxKey, &RetryOpt{ + Period: DefaultRetryPeriod, + MaxCount: DefaultRetryMaxCount, + ExpBackoff: DefaultRetryBackoff, + }) +} + +// WithRetryMaxCount is a specialization of WithRetry, where and +// are set to default values and the maximum number of retries can be customized. +func WithRetryMaxCount(ctx context.Context, maxCount int) context.Context { + return context.WithValue(ctx, retryCtxKey, &RetryOpt{ + Period: DefaultRetryPeriod, + MaxCount: maxCount, + ExpBackoff: DefaultRetryBackoff, + }) +} + +// IsWithRetry returns true if transaction context is configured to allow retry, +// including the option parameters, or nil if retry is not enabled. +func IsWithRetry(ctx context.Context) (retryArgs *RetryOpt, withRetry bool) { + retryArgs, withRetry = ctx.Value(retryCtxKey).(*RetryOpt) + return +} + +/* Revert */ + +// revertOpt represents the *revert* transaction option. +type revertOpt struct { + // no attributes +} + +// WithRevert prepares context for transaction that will be reverted if any +// of its operations fails. +// By default, the scheduler executes transactions in a best-effort mode - even +// in the case of an error it will keep the effects of successful operations. +func WithRevert(ctx context.Context) context.Context { + return context.WithValue(ctx, revertCtxKey, &revertOpt{}) +} + +// IsWithRevert returns true if the transaction context is configured +// to revert transaction if any of its operations fails. +func IsWithRevert(ctx context.Context) bool { + _, isWithRevert := ctx.Value(revertCtxKey).(*revertOpt) + return isWithRevert +} + +/* Txn Description */ + +// txnDescriptionOpt represents the *txn-description* transaction option. +type txnDescriptionOpt struct { + description string +} + +// WithDescription prepares context for transaction that will have description +// provided. +// By default, transactions are without description. +func WithDescription(ctx context.Context, description string) context.Context { + return context.WithValue(ctx, txnDescriptionKey, &txnDescriptionOpt{description: description}) +} + +// IsWithDescription returns true if the transaction context is configured +// to include transaction description. +func IsWithDescription(ctx context.Context) (description string, withDescription bool) { + descriptionOpt, withDescription := ctx.Value(txnDescriptionKey).(*txnDescriptionOpt) + if !withDescription { + return "", false + } + return descriptionOpt.description, true +} diff --git a/plugins/kvscheduler/api/txn_record.go b/plugins/kvscheduler/api/txn_record.go new file mode 100644 index 0000000000..3acfaf7449 --- /dev/null +++ b/plugins/kvscheduler/api/txn_record.go @@ -0,0 +1,373 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// TxnType differentiates between NB transaction, retry of failed operations and +// SB notification. Once queued, all three different operations are classified +// as transactions, only with different parameters. +type TxnType int + +const ( + // SBNotification is notification from southbound. + SBNotification TxnType = iota + + // NBTransaction is transaction from northbound. + NBTransaction + + // RetryFailedOps is a transaction re-trying failed operations from previous + // northbound transaction. + RetryFailedOps +) + +// String returns human-readable string representation of the transaction type. +func (t TxnType) String() string { + switch t { + case SBNotification: + return "SB Notification" + case NBTransaction: + return "NB Transaction" + case RetryFailedOps: + return "RETRY" + } + return "UNKNOWN" +} + +// RecordedTxn is used to record executed transaction. +type RecordedTxn struct { + PreRecord bool // not yet fully recorded, only args + plan + pre-processing errors + + // timestamps + Start time.Time + Stop time.Time + + // arguments + SeqNum uint64 + TxnType TxnType + ResyncType ResyncType + Description string + RetryForTxn uint64 + RetryAttempt int + Values []RecordedKVPair + + // operations + Planned RecordedTxnOps + Executed RecordedTxnOps +} + +// RecordedTxnOp is used to record executed/planned transaction operation. +type RecordedTxnOp struct { + // identification + Operation TxnOperation + Key string + + // changes + PrevValue proto.Message + NewValue proto.Message + PrevState ValueState + NewState ValueState + PrevErr error + NewErr error + NOOP bool + + // flags + IsDerived bool + IsProperty bool + IsRevert bool + IsRetry bool + IsRecreate bool +} + +// RecordedKVPair is used to record key-value pair. +type RecordedKVPair struct { + Key string + Value proto.Message + Origin ValueOrigin +} + +// RecordedTxnOps is a list of recorded executed/planned transaction operations. +type RecordedTxnOps []*RecordedTxnOp + +// RecordedTxns is a list of recorded transactions. +type RecordedTxns []*RecordedTxn + +// String returns a *multi-line* human-readable string representation of recorded transaction. +func (txn *RecordedTxn) String() string { + return txn.StringWithOpts(false, false, 0) +} + +// StringWithOpts allows to format string representation of recorded transaction. +func (txn *RecordedTxn) StringWithOpts(resultOnly, verbose bool, indent int) string { + var str string + indent1 := strings.Repeat(" ", indent) + indent2 := strings.Repeat(" ", indent+4) + indent3 := strings.Repeat(" ", indent+8) + + if !resultOnly { + // transaction arguments + str += indent1 + "* transaction arguments:\n" + str += indent2 + fmt.Sprintf("- seq-num: %d\n", txn.SeqNum) + if txn.TxnType == NBTransaction && txn.ResyncType != NotResync { + ResyncType := "Full Resync" + if txn.ResyncType == DownstreamResync { + ResyncType = "SB Sync" + } + if txn.ResyncType == UpstreamResync { + ResyncType = "NB Sync" + } + str += indent2 + fmt.Sprintf("- type: %s, %s\n", txn.TxnType.String(), ResyncType) + } else { + if txn.TxnType == RetryFailedOps { + str += indent2 + fmt.Sprintf("- type: %s (for txn %d, attempt #%d)\n", + txn.TxnType.String(), txn.RetryForTxn, txn.RetryAttempt) + } else { + str += indent2 + fmt.Sprintf("- type: %s\n", txn.TxnType.String()) + } + } + if txn.Description != "" { + descriptionLines := strings.Split(txn.Description, "\n") + for idx, line := range descriptionLines { + if idx == 0 { + str += indent2 + fmt.Sprintf("- Description: %s\n", line) + } else { + str += indent3 + fmt.Sprintf("%s\n", line) + } + } + } + if txn.ResyncType == DownstreamResync { + goto printOps + } + if len(txn.Values) == 0 { + str += indent2 + fmt.Sprintf("- values: NONE\n") + } else { + str += indent2 + fmt.Sprintf("- values:\n") + } + for _, kv := range txn.Values { + if txn.ResyncType != NotResync && kv.Origin == FromSB { + // do not print SB values updated during resync + continue + } + str += indent3 + fmt.Sprintf("- key: %s\n", kv.Key) + str += indent3 + fmt.Sprintf(" val: %s\n", utils.ProtoToString(kv.Value)) + } + + printOps: + // planned operations + str += indent1 + "* planned operations:\n" + str += txn.Planned.StringWithOpts(verbose, indent+4) + } + + if !txn.PreRecord { + if len(txn.Executed) == 0 { + str += indent1 + "* executed operations:\n" + } else { + str += indent1 + fmt.Sprintf("* executed operations (%s -> %s, dur: %s):\n", + txn.Start.Round(time.Millisecond), + txn.Stop.Round(time.Millisecond), + txn.Stop.Sub(txn.Start).Round(time.Millisecond)) + } + str += txn.Executed.StringWithOpts(verbose, indent+4) + } + + return str +} + +// String returns a *multi-line* human-readable string representation of a recorded +// transaction operation. +func (op *RecordedTxnOp) String() string { + return op.StringWithOpts(0, false, 0) +} + +// StringWithOpts allows to format string representation of a transaction operation. +func (op *RecordedTxnOp) StringWithOpts(index int, verbose bool, indent int) string { + var str string + indent1 := strings.Repeat(" ", indent) + indent2 := strings.Repeat(" ", indent+4) + + var flags []string + // operation flags + if op.IsDerived && !op.IsProperty { + flags = append(flags, "DERIVED") + } + if op.IsProperty { + flags = append(flags, "PROPERTY") + } + if op.NOOP { + flags = append(flags, "NOOP") + } + if op.IsRevert && !op.IsProperty { + flags = append(flags, "REVERT") + } + if op.IsRetry && !op.IsProperty { + flags = append(flags, "RETRY") + } + if op.IsRecreate { + flags = append(flags, "RECREATE") + } + // value state transition + // -> OBTAINED + if op.NewState == ValueState_OBTAINED { + flags = append(flags, "OBTAINED") + } + if op.PrevState == ValueState_OBTAINED && op.PrevState != op.NewState { + flags = append(flags, "WAS-OBTAINED") + } + // -> UNIMPLEMENTED + if op.NewState == ValueState_UNIMPLEMENTED { + flags = append(flags, "UNIMPLEMENTED") + } + if op.PrevState == ValueState_UNIMPLEMENTED && op.PrevState != op.NewState { + flags = append(flags, "WAS-UNIMPLEMENTED") + } + // -> REMOVED / MISSING + if op.PrevState == ValueState_REMOVED && !op.IsRecreate { + flags = append(flags, "ALREADY-REMOVED") + } + if op.PrevState == ValueState_MISSING { + if op.NewState == ValueState_REMOVED { + flags = append(flags, "ALREADY-MISSING") + } else { + flags = append(flags, "WAS-MISSING") + } + } + // -> DISCOVERED + if op.PrevState == ValueState_DISCOVERED { + flags = append(flags, "DISCOVERED") + } + // -> PENDING + if op.PrevState == ValueState_PENDING { + if op.NewState == ValueState_PENDING { + flags = append(flags, "STILL-PENDING") + } else { + flags = append(flags, "WAS-PENDING") + } + } else { + if op.NewState == ValueState_PENDING { + flags = append(flags, "IS-PENDING") + } + } + // -> FAILED / INVALID + if op.PrevState == ValueState_FAILED { + if op.NewState == ValueState_FAILED { + flags = append(flags, "STILL-FAILING") + } else if op.NewState == ValueState_CONFIGURED { + flags = append(flags, "FIXED") + } + } else { + if op.NewState == ValueState_FAILED { + flags = append(flags, "FAILED") + } + } + if op.PrevState == ValueState_INVALID { + if op.NewState == ValueState_INVALID { + flags = append(flags, "STILL-INVALID") + } else if op.NewState == ValueState_CONFIGURED { + flags = append(flags, "FIXED") + } + } else { + if op.NewState == ValueState_INVALID { + flags = append(flags, "INVALID") + } + } + + if index > 0 { + if len(flags) == 0 { + str += indent1 + fmt.Sprintf("%d. %s:\n", index, op.Operation.String()) + } else { + str += indent1 + fmt.Sprintf("%d. %s %v:\n", index, op.Operation.String(), flags) + } + } else { + if len(flags) == 0 { + str += indent1 + fmt.Sprintf("%s:\n", op.Operation.String()) + } else { + str += indent1 + fmt.Sprintf("%s %v:\n", op.Operation.String(), flags) + } + } + + str += indent2 + fmt.Sprintf("- key: %s\n", op.Key) + if op.Operation == TxnOperation_UPDATE { + str += indent2 + fmt.Sprintf("- prev-value: %s \n", utils.ProtoToString(op.PrevValue)) + str += indent2 + fmt.Sprintf("- new-value: %s \n", utils.ProtoToString(op.NewValue)) + } + if op.Operation == TxnOperation_DELETE { + str += indent2 + fmt.Sprintf("- value: %s \n", utils.ProtoToString(op.PrevValue)) + } + if op.Operation == TxnOperation_CREATE { + str += indent2 + fmt.Sprintf("- value: %s \n", utils.ProtoToString(op.NewValue)) + } + if op.PrevErr != nil { + str += indent2 + fmt.Sprintf("- prev-error: %s\n", utils.ErrorToString(op.PrevErr)) + } + if op.NewErr != nil { + str += indent2 + fmt.Sprintf("- error: %s\n", utils.ErrorToString(op.NewErr)) + } + if verbose { + str += indent2 + fmt.Sprintf("- prev-state: %s \n", op.PrevState.String()) + str += indent2 + fmt.Sprintf("- new-state: %s \n", op.NewState.String()) + } + + return str +} + +// String returns a *multi-line* human-readable string representation of transaction +// operations. +func (ops RecordedTxnOps) String() string { + return ops.StringWithOpts(false, 0) +} + +// StringWithOpts allows to format string representation of transaction operations. +func (ops RecordedTxnOps) StringWithOpts(verbose bool, indent int) string { + if len(ops) == 0 { + return strings.Repeat(" ", indent) + "\n" + } + + var str string + for idx, op := range ops { + str += op.StringWithOpts(idx+1, verbose, indent) + } + return str +} + +// String returns a *multi-line* human-readable string representation of a transaction +// list. +func (txns RecordedTxns) String() string { + return txns.StringWithOpts(false, false, 0) +} + +// StringWithOpts allows to format string representation of a transaction list. +func (txns RecordedTxns) StringWithOpts(resultOnly, verbose bool, indent int) string { + if len(txns) == 0 { + return strings.Repeat(" ", indent) + "\n" + } + + var str string + for idx, txn := range txns { + str += strings.Repeat(" ", indent) + fmt.Sprintf("Transaction #%d:\n", txn.SeqNum) + str += txn.StringWithOpts(resultOnly, verbose, indent+4) + if idx < len(txns)-1 { + str += "\n" + } + } + return str +} diff --git a/plugins/kvscheduler/api/value_status.go b/plugins/kvscheduler/api/value_status.go new file mode 100644 index 0000000000..d15efec5ee --- /dev/null +++ b/plugins/kvscheduler/api/value_status.go @@ -0,0 +1,36 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + + "github.com/gogo/protobuf/jsonpb" +) + +// MarshalJSON ensures data is correctly marshaled +func (m *ValueStatus) MarshalJSON() ([]byte, error) { + marshaller := &jsonpb.Marshaler{} + var buf bytes.Buffer + if err := marshaller.Marshal(&buf, m); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON ensures that data is correctly unmarshaled +func (m *ValueStatus) UnmarshalJSON(data []byte) error { + return jsonpb.Unmarshal(bytes.NewReader(data), m) +} diff --git a/plugins/kvscheduler/api/value_status.pb.go b/plugins/kvscheduler/api/value_status.pb.go new file mode 100644 index 0000000000..274b51c675 --- /dev/null +++ b/plugins/kvscheduler/api/value_status.pb.go @@ -0,0 +1,292 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: value_status.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ValueState int32 + +const ( + // ValueState_NONEXISTENT is assigned to value that was deleted or has never + // existed. + ValueState_NONEXISTENT ValueState = 0 + // ValueState_MISSING is assigned to NB value that was configured but refresh + // found it to be missing. + ValueState_MISSING ValueState = 1 + // ValueState_UNIMPLEMENTED marks value received from NB that cannot + // be configured because there is no registered descriptor associated + // with it. + ValueState_UNIMPLEMENTED ValueState = 2 + // ValueState_REMOVED is assigned to NB value after it was removed or when + // it is being re-created. The state is only temporary: for re-create, the + // value transits to whatever state the following Create operation produces, + // and delete values are removed from the graph (go to the NONEXISTENT state) + // immediately after the notification about the state change is sent. + ValueState_REMOVED ValueState = 3 + // ValueState_CONFIGURED marks value defined by NB and successfully configured. + ValueState_CONFIGURED ValueState = 4 + // ValueState_OBTAINED marks value not managed by NB, instead created + // automatically or externally in SB. The KVScheduler learns about the value + // either using Retrieve() or through a SB notification. + ValueState_OBTAINED ValueState = 5 + // ValueState_DISCOVERED marks NB value that was found (=retrieved) by refresh + // but not actually configured by the agent in this run. + ValueState_DISCOVERED ValueState = 6 + // ValueState_PENDING represents (NB) value that cannot be configured yet + // due to missing dependencies. + ValueState_PENDING ValueState = 7 + // ValueState_INVALID represents (NB) value that will not be configured + // because it has a logically invalid content as declared by the Validate + // method of the associated descriptor. + // The corresponding error and the list of affected fields are stored + // in the structure available via
for invalid + // value. + ValueState_INVALID ValueState = 8 + // ValueState_FAILED marks (NB) value for which the last executed operation + // returned an error. + // The error and the type of the operation which caused the error are stored + // in the structure available via
for failed + // value. + ValueState_FAILED ValueState = 9 + // ValueState_RETRYING marks unsucessfully applied (NB) value, for which, + // however, one or more attempts to fix the error by repeating the last + // operation are planned, and only if all the retries fail, the value will + // then transit to the FAILED state. + ValueState_RETRYING ValueState = 10 +) + +var ValueState_name = map[int32]string{ + 0: "NONEXISTENT", + 1: "MISSING", + 2: "UNIMPLEMENTED", + 3: "REMOVED", + 4: "CONFIGURED", + 5: "OBTAINED", + 6: "DISCOVERED", + 7: "PENDING", + 8: "INVALID", + 9: "FAILED", + 10: "RETRYING", +} +var ValueState_value = map[string]int32{ + "NONEXISTENT": 0, + "MISSING": 1, + "UNIMPLEMENTED": 2, + "REMOVED": 3, + "CONFIGURED": 4, + "OBTAINED": 5, + "DISCOVERED": 6, + "PENDING": 7, + "INVALID": 8, + "FAILED": 9, + "RETRYING": 10, +} + +func (x ValueState) String() string { + return proto.EnumName(ValueState_name, int32(x)) +} +func (ValueState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_value_status_cf49e3d7c6479977, []int{0} +} + +type TxnOperation int32 + +const ( + TxnOperation_UNDEFINED TxnOperation = 0 + TxnOperation_VALIDATE TxnOperation = 1 + TxnOperation_CREATE TxnOperation = 2 + TxnOperation_UPDATE TxnOperation = 3 + TxnOperation_DELETE TxnOperation = 4 +) + +var TxnOperation_name = map[int32]string{ + 0: "UNDEFINED", + 1: "VALIDATE", + 2: "CREATE", + 3: "UPDATE", + 4: "DELETE", +} +var TxnOperation_value = map[string]int32{ + "UNDEFINED": 0, + "VALIDATE": 1, + "CREATE": 2, + "UPDATE": 3, + "DELETE": 4, +} + +func (x TxnOperation) String() string { + return proto.EnumName(TxnOperation_name, int32(x)) +} +func (TxnOperation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_value_status_cf49e3d7c6479977, []int{1} +} + +type ValueStatus struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + State ValueState `protobuf:"varint,2,opt,name=state,proto3,enum=api.ValueState" json:"state,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + LastOperation TxnOperation `protobuf:"varint,4,opt,name=last_operation,json=lastOperation,proto3,enum=api.TxnOperation" json:"last_operation,omitempty"` + // - for invalid value, details is a list of invalid fields + // - for pending value, details is a list of missing dependencies (labels) + Details []string `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValueStatus) Reset() { *m = ValueStatus{} } +func (m *ValueStatus) String() string { return proto.CompactTextString(m) } +func (*ValueStatus) ProtoMessage() {} +func (*ValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_value_status_cf49e3d7c6479977, []int{0} +} +func (m *ValueStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValueStatus.Unmarshal(m, b) +} +func (m *ValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValueStatus.Marshal(b, m, deterministic) +} +func (dst *ValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueStatus.Merge(dst, src) +} +func (m *ValueStatus) XXX_Size() int { + return xxx_messageInfo_ValueStatus.Size(m) +} +func (m *ValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueStatus proto.InternalMessageInfo + +func (m *ValueStatus) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *ValueStatus) GetState() ValueState { + if m != nil { + return m.State + } + return ValueState_NONEXISTENT +} + +func (m *ValueStatus) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ValueStatus) GetLastOperation() TxnOperation { + if m != nil { + return m.LastOperation + } + return TxnOperation_UNDEFINED +} + +func (m *ValueStatus) GetDetails() []string { + if m != nil { + return m.Details + } + return nil +} + +type BaseValueStatus struct { + Value *ValueStatus `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + DerivedValues []*ValueStatus `protobuf:"bytes,2,rep,name=derived_values,json=derivedValues,proto3" json:"derived_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BaseValueStatus) Reset() { *m = BaseValueStatus{} } +func (m *BaseValueStatus) String() string { return proto.CompactTextString(m) } +func (*BaseValueStatus) ProtoMessage() {} +func (*BaseValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_value_status_cf49e3d7c6479977, []int{1} +} +func (m *BaseValueStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BaseValueStatus.Unmarshal(m, b) +} +func (m *BaseValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BaseValueStatus.Marshal(b, m, deterministic) +} +func (dst *BaseValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BaseValueStatus.Merge(dst, src) +} +func (m *BaseValueStatus) XXX_Size() int { + return xxx_messageInfo_BaseValueStatus.Size(m) +} +func (m *BaseValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BaseValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BaseValueStatus proto.InternalMessageInfo + +func (m *BaseValueStatus) GetValue() *ValueStatus { + if m != nil { + return m.Value + } + return nil +} + +func (m *BaseValueStatus) GetDerivedValues() []*ValueStatus { + if m != nil { + return m.DerivedValues + } + return nil +} + +func init() { + proto.RegisterType((*ValueStatus)(nil), "api.ValueStatus") + proto.RegisterType((*BaseValueStatus)(nil), "api.BaseValueStatus") + proto.RegisterEnum("api.ValueState", ValueState_name, ValueState_value) + proto.RegisterEnum("api.TxnOperation", TxnOperation_name, TxnOperation_value) +} + +func init() { proto.RegisterFile("value_status.proto", fileDescriptor_value_status_cf49e3d7c6479977) } + +var fileDescriptor_value_status_cf49e3d7c6479977 = []byte{ + // 400 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x41, 0x6f, 0xda, 0x40, + 0x10, 0x85, 0x63, 0x8c, 0x21, 0x0c, 0x01, 0x36, 0xab, 0x1e, 0x7c, 0x44, 0x91, 0x5a, 0xa1, 0x1c, + 0x38, 0xa4, 0x87, 0xf6, 0x4a, 0xd8, 0x21, 0x5a, 0x09, 0xd6, 0x68, 0xbd, 0x58, 0xed, 0x09, 0x6d, + 0xc5, 0x1e, 0xac, 0xa2, 0xd8, 0xf2, 0x9a, 0xa8, 0xfd, 0x4f, 0x3d, 0xf4, 0x27, 0x56, 0xb3, 0x34, + 0x69, 0x2a, 0xe5, 0x36, 0x33, 0xdf, 0x7b, 0xcf, 0x33, 0xd6, 0x02, 0x7f, 0xb2, 0xc7, 0x93, 0xdb, + 0xfb, 0xd6, 0xb6, 0x27, 0x3f, 0xaf, 0x9b, 0xaa, 0xad, 0x78, 0x6c, 0xeb, 0xf2, 0xe6, 0x77, 0x04, + 0xc3, 0x82, 0x58, 0x1e, 0x10, 0x67, 0x10, 0x7f, 0x77, 0x3f, 0xd3, 0x68, 0x1a, 0xcd, 0x06, 0x9a, + 0x4a, 0xfe, 0x1e, 0x12, 0xb2, 0xb9, 0xb4, 0x33, 0x8d, 0x66, 0xe3, 0xbb, 0xc9, 0xdc, 0xd6, 0xe5, + 0xfc, 0xc5, 0xe2, 0xf4, 0x99, 0xf2, 0x77, 0x90, 0xb8, 0xa6, 0xa9, 0x9a, 0x34, 0x0e, 0xd6, 0x73, + 0xc3, 0x3f, 0xc3, 0xf8, 0x68, 0x7d, 0xbb, 0xaf, 0x6a, 0xd7, 0xd8, 0xb6, 0xac, 0x1e, 0xd3, 0x6e, + 0x48, 0xb9, 0x0e, 0x29, 0xe6, 0xc7, 0x63, 0xf6, 0x0c, 0xf4, 0x88, 0x84, 0x2f, 0x2d, 0x4f, 0xa1, + 0x7f, 0x70, 0xad, 0x2d, 0x8f, 0x3e, 0x4d, 0xa6, 0xf1, 0x6c, 0xa0, 0x9f, 0xdb, 0x9b, 0x06, 0x26, + 0xf7, 0xd6, 0xbb, 0xd7, 0x5b, 0x7f, 0x80, 0x24, 0x1c, 0x18, 0xf6, 0x1e, 0xde, 0xb1, 0xff, 0x77, + 0x3c, 0x79, 0x7d, 0xc6, 0xfc, 0x13, 0x8c, 0x0f, 0xae, 0x29, 0x9f, 0xdc, 0x61, 0x1f, 0x06, 0x3e, + 0xed, 0x4c, 0xe3, 0x37, 0x0d, 0xa3, 0xbf, 0xba, 0x30, 0xf3, 0xb7, 0xbf, 0x22, 0x80, 0x7f, 0x37, + 0xf3, 0x09, 0x0c, 0x55, 0xa6, 0xf0, 0x8b, 0xcc, 0x0d, 0x2a, 0xc3, 0x2e, 0xf8, 0x10, 0xfa, 0x1b, + 0x99, 0xe7, 0x52, 0x3d, 0xb0, 0x88, 0x5f, 0xc3, 0x68, 0xa7, 0xe4, 0x66, 0xbb, 0xc6, 0x0d, 0x2a, + 0x83, 0x82, 0x75, 0x88, 0x6b, 0xdc, 0x64, 0x05, 0x0a, 0x16, 0xf3, 0x31, 0xc0, 0x32, 0x53, 0x2b, + 0xf9, 0xb0, 0xd3, 0x28, 0x58, 0x97, 0x5f, 0xc1, 0x65, 0x76, 0x6f, 0x16, 0x52, 0xa1, 0x60, 0x09, + 0x51, 0x21, 0xf3, 0x65, 0x56, 0x20, 0xd1, 0x1e, 0x59, 0xb7, 0xa8, 0x04, 0x45, 0xf7, 0xa9, 0x91, + 0xaa, 0x58, 0xac, 0xa5, 0x60, 0x97, 0x1c, 0xa0, 0xb7, 0x5a, 0xc8, 0x35, 0x0a, 0x36, 0xa0, 0x0c, + 0x8d, 0x46, 0x7f, 0x25, 0x19, 0xdc, 0x66, 0x70, 0xf5, 0xfa, 0xdf, 0xf2, 0x11, 0x0c, 0x76, 0x4a, + 0xe0, 0x2a, 0x7c, 0xe2, 0x82, 0xc4, 0x21, 0x63, 0x61, 0x90, 0x45, 0x14, 0xb3, 0xd4, 0x48, 0x75, + 0x87, 0xea, 0xdd, 0x36, 0xcc, 0x63, 0xaa, 0x05, 0xae, 0xd1, 0x20, 0xeb, 0x7e, 0xeb, 0x85, 0x27, + 0xf3, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0x7a, 0xa8, 0x7c, 0x48, 0x02, 0x00, 0x00, +} diff --git a/plugins/kvscheduler/api/value_status.proto b/plugins/kvscheduler/api/value_status.proto new file mode 100644 index 0000000000..c7d9c5a33f --- /dev/null +++ b/plugins/kvscheduler/api/value_status.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package api; + +enum ValueState { + // ValueState_NONEXISTENT is assigned to value that was deleted or has never + // existed. + NONEXISTENT = 0; + + // ValueState_MISSING is assigned to NB value that was configured but refresh + // found it to be missing. + MISSING = 1; + + // ValueState_UNIMPLEMENTED marks value received from NB that cannot + // be configured because there is no registered descriptor associated + // with it. + UNIMPLEMENTED = 2; + + // ValueState_REMOVED is assigned to NB value after it was removed or when + // it is being re-created. The state is only temporary: for re-create, the + // value transits to whatever state the following Create operation produces, + // and delete values are removed from the graph (go to the NONEXISTENT state) + // immediately after the notification about the state change is sent. + REMOVED = 3; + + // ValueState_CONFIGURED marks value defined by NB and successfully configured. + CONFIGURED = 4; + + // ValueState_OBTAINED marks value not managed by NB, instead created + // automatically or externally in SB. The KVScheduler learns about the value + // either using Retrieve() or through a SB notification. + OBTAINED = 5; + + // ValueState_DISCOVERED marks NB value that was found (=retrieved) by refresh + // but not actually configured by the agent in this run. + DISCOVERED = 6; + + // ValueState_PENDING represents (NB) value that cannot be configured yet + // due to missing dependencies. + PENDING = 7; + + // ValueState_INVALID represents (NB) value that will not be configured + // because it has a logically invalid content as declared by the Validate + // method of the associated descriptor. + // The corresponding error and the list of affected fields are stored + // in the structure available via
for invalid + // value. + INVALID = 8; + + // ValueState_FAILED marks (NB) value for which the last executed operation + // returned an error. + // The error and the type of the operation which caused the error are stored + // in the structure available via
for failed + // value. + FAILED = 9; + + // ValueState_RETRYING marks unsucessfully applied (NB) value, for which, + // however, one or more attempts to fix the error by repeating the last + // operation are planned, and only if all the retries fail, the value will + // then transit to the FAILED state. + RETRYING = 10; +} + +enum TxnOperation { + UNDEFINED = 0; + VALIDATE = 1; + CREATE = 2; + UPDATE = 3; + DELETE = 4; +} + +message ValueStatus { + string key = 1; + ValueState state = 2; + string error = 3; // error returned by the last operation (none if empty string) + TxnOperation last_operation = 4; + + // - for invalid value, details is a list of invalid fields + // - for pending value, details is a list of missing dependencies (labels) + repeated string details = 5; +} + +message BaseValueStatus { + ValueStatus value = 1; + repeated ValueStatus derived_values = 2; +} \ No newline at end of file diff --git a/plugins/kvscheduler/datachange_test.go b/plugins/kvscheduler/datachange_test.go new file mode 100644 index 0000000000..efa34d5ca2 --- /dev/null +++ b/plugins/kvscheduler/datachange_test.go @@ -0,0 +1,1700 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +/* TODO: fix and re-enable UTs +import ( + "context" + "errors" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + . "github.com/onsi/gomega" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +func TestDataChangeTransactions(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0) + // -> descriptor2: + descriptor2 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor2Name, + NBKeyPrefix: prefixB, + KeySelector: prefixSelector(prefixB), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixB+baseValue2+"/item1" { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixB+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item1" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + DumpDependencies: []string{descriptor1Name}, + }, mockSB, 0) + // -> descriptor3: + descriptor3 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor3Name, + NBKeyPrefix: prefixC, + KeySelector: prefixSelector(prefixC), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + ModifyWithRecreate: func(key string, oldValue, newValue proto.Message, metadata Metadata) bool { + return key == prefixC+baseValue3 + }, + WithMetadata: true, + DumpDependencies: []string{descriptor2Name}, + }, mockSB, 0) + + // register all 3 descriptors with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + scheduler.RegisterKVDescriptor(descriptor2) + scheduler.RegisterKVDescriptor(descriptor3) + + // get metadata map created for each descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger1, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor2.Name) + nameToInteger2, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor3.Name) + nameToInteger3, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run non-resync transaction against empty SB + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixB+baseValue2, test.NewLazyArrayValue("item1", "item2")) + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item2")) + schedulerTxn.SetValue(prefixC+baseValue3, test.NewLazyArrayValue("item1", "item2")) + description := "testing data change" + seqNum, err := schedulerTxn.Commit(WithDescription(context.Background(), description)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value was not added + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).To(BeNil()) + // -> item2 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is pending + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).To(BeNil()) + // -> base value 3 + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(7)) + + // check pending values + pendingValues := scheduler.GetPendingValues(nil) + checkValues(pendingValues, []KeyValuePair{ + {Key: prefixB + baseValue2 + "/item2", Value: test.NewStringValue("item2")}, + }) + + // check metadata + metadata, exists := nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(7)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[6] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(Equal(description)) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item2")), Origin: FromNB}, + {Key: prefixB + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + {Key: prefixC + baseValue3, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(5)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(8)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(3)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(8)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(2)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor3Name)) + Expect(descriptorStats.PerValueCount[descriptor3Name]).To(BeEquivalentTo(3)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(8)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(8)) + graphR.Release() + + // run 2nd non-resync transaction against empty SB + startTime = time.Now() + schedulerTxn2 := scheduler.StartNBTransaction() + schedulerTxn2.SetValue(prefixC+baseValue3, test.NewLazyArrayValue("item1")) + schedulerTxn2.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item1")) + seqNum, err = schedulerTxn2.Commit(context.Background()) + stopTime = time.Now() + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value = mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value was added + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 1 was deleted + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).To(BeNil()) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is no longer pending + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 3 + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(1)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 3 was deleted + value = mockSB.GetValue(prefixC + baseValue3 + "/item2") + Expect(value).To(BeNil()) + + // check pending values + Expect(scheduler.GetPendingValues(nil)).To(BeEmpty()) + + // check metadata + metadata, exists = nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(1)) // re-created + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(10)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[6] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[7] + Expect(operation.OpType).To(Equal(test.MockUpdate)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[8] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[9] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(startTime, stopTime) // first txn not included + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + {Key: prefixC + baseValue3, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Update, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(9)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(14)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(5)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(14)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(4)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(5)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor3Name)) + Expect(descriptorStats.PerValueCount[descriptor3Name]).To(BeEquivalentTo(5)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(14)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(14)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestDataChangeTransactionWithRevert(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0) + // -> descriptor2: + descriptor2 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor2Name, + NBKeyPrefix: prefixB, + KeySelector: prefixSelector(prefixB), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixB+baseValue2+"/item1" { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixB+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item1" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + DumpDependencies: []string{descriptor1Name}, + }, mockSB, 0) + // -> descriptor3: + descriptor3 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor3Name, + NBKeyPrefix: prefixC, + KeySelector: prefixSelector(prefixC), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + ModifyWithRecreate: func(key string, oldValue, newValue proto.Message, metadata Metadata) bool { + return key == prefixC+baseValue3 + }, + WithMetadata: true, + DumpDependencies: []string{descriptor2Name}, + }, mockSB, 0) + + // register all 3 descriptors with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + scheduler.RegisterKVDescriptor(descriptor2) + scheduler.RegisterKVDescriptor(descriptor3) + + // get metadata map created for each descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger1, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor2.Name) + nameToInteger2, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor3.Name) + nameToInteger3, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run 1st non-resync transaction against empty SB + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixB+baseValue2, test.NewLazyArrayValue("item1", "item2")) + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item2")) + schedulerTxn.SetValue(prefixC+baseValue3, test.NewLazyArrayValue("item1", "item2")) + seqNum, err := schedulerTxn.Commit(context.Background()) + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + mockSB.PopHistoryOfOps() + + // plan error before 2nd txn + failedModifyClb := func() { + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue(), + &test.OnlyInteger{Integer: 0}, FromNB, false) + } + mockSB.PlanError(prefixA+baseValue1, errors.New("failed to modify value"), failedModifyClb) + + // subscribe to receive notifications about errors + errorChan := make(chan KeyWithError, 5) + scheduler.SubscribeForErrors(errorChan, prefixSelector(prefixA)) + + // run 2nd non-resync transaction against empty SB that will fail and will be reverted + startTime := time.Now() + schedulerTxn2 := scheduler.StartNBTransaction() + schedulerTxn2.SetValue(prefixC+baseValue3, test.NewLazyArrayValue("item1")) + schedulerTxn2.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item1")) + seqNum, err = schedulerTxn2.Commit(WithRevert(context.Background())) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ToNot(BeNil()) + txnErr := err.(*TransactionError) + Expect(txnErr.GetTxnInitError()).ShouldNot(HaveOccurred()) + kvErrors := txnErr.GetKVErrors() + Expect(kvErrors).To(HaveLen(1)) + Expect(kvErrors[0].Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(kvErrors[0].TxnOperation).To(BeEquivalentTo(Modify)) + Expect(kvErrors[0].Error.Error()).To(BeEquivalentTo("failed to modify value")) + + // receive the error notification + var errorNotif KeyWithError + Eventually(errorChan, time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixA + baseValue1)) + Expect(errorNotif.TxnOperation).To(BeEquivalentTo(Modify)) + Expect(errorNotif.Error).ToNot(BeNil()) + Expect(errorNotif.Error.Error()).To(BeEquivalentTo("failed to modify value")) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value was NOT added + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).To(BeNil()) + // -> item2 derived from base value 1 was first deleted by then added back + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is still pending + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).To(BeNil()) + // -> base value 3 was reverted back to state after 1st txn + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(2)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // check metadata + metadata, exists := nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(2)) // re-created twice + + // check operations executed in SB during 2nd txn + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(16)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[6] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).ToNot(BeNil()) + Expect(operation.Err.Error()).To(BeEquivalentTo("failed to modify value")) + // reverting: + operation = opHistory[7] // refresh failed value + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item1"), + Metadata: &test.OnlyInteger{Integer: 0}, + Origin: FromNB, + }, + }) + operation = opHistory[8] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[9] + Expect(operation.OpType).To(Equal(test.MockUpdate)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[10] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[11] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[12] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[13] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[14] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[15] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + {Key: prefixC + baseValue3, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + // planned operations + txnOps := RecordedTxnOps{ + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Update, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + + // executed operations + txnOps = RecordedTxnOps{ + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + NewErr: errors.New("failed to modify value"), + }, + // reverting: + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue()), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + PrevErr: errors.New("failed to modify value"), + IsRevert: true, + }, + { + Operation: Update, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + }, + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + }, + { + Operation: Delete, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + IsRevert: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + WasPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRevert: true, + }, + } + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(1)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(10)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(17)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(7)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(17)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(5)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(4)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor3Name)) + Expect(descriptorStats.PerValueCount[descriptor3Name]).To(BeEquivalentTo(8)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(17)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(17)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestDependencyCycles(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> descriptor: + descriptor := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + KeySelector: prefixSelector(prefixA), + NBKeyPrefix: prefixA, + ValueTypeName: proto.MessageName(test.NewStringValue("")), + ValueComparator: test.StringValueComparator, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixA+baseValue1 { + depKey := prefixA + baseValue2 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixA+baseValue2 { + depKey := prefixA + baseValue3 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixA+baseValue3 { + depKey1 := prefixA + baseValue1 + depKey2 := prefixA + baseValue4 + return []Dependency{ + {Label: depKey1, Key: depKey1}, + {Label: depKey2, Key: depKey2}, + } + } + return nil + }, + WithMetadata: false, + }, mockSB, 0, test.WithoutDump) + + // register the descriptor + scheduler.RegisterKVDescriptor(descriptor) + + // run non-resync transaction against empty SB + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyStringValue("base-value1-data")) + schedulerTxn.SetValue(prefixA+baseValue2, test.NewLazyStringValue("base-value2-data")) + schedulerTxn.SetValue(prefixA+baseValue3, test.NewLazyStringValue("base-value3-data")) + description := "testing dependency cycles" + seqNum, err := schedulerTxn.Commit(WithDescription(context.Background(), description)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(HaveLen(0)) + + // check pending values + pendingValues := scheduler.GetPendingValues(nil) + checkValues(pendingValues, []KeyValuePair{ + {Key: prefixA + baseValue1, Value: test.NewStringValue("base-value1-data")}, + {Key: prefixA + baseValue2, Value: test.NewStringValue("base-value2-data")}, + {Key: prefixA + baseValue3, Value: test.NewStringValue("base-value3-data")}, + }) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(0)) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(Equal(description)) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewStringValue("base-value1-data")), Origin: FromNB}, + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(test.NewStringValue("base-value2-data")), Origin: FromNB}, + {Key: prefixA + baseValue3, Value: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value1-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue2, + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value2-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(3)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(0)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(3)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(3)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(3)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(3)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(3)) + graphR.Release() + + // run second transaction that will make the cycle of values ready to be added + startTime = time.Now() + schedulerTxn = scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue4, test.NewLazyStringValue("base-value4-data")) + seqNum, err = schedulerTxn.Commit(context.Background()) + stopTime = time.Now() + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(HaveLen(4)) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value1-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value2-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 3 + value = mockSB.GetValue(prefixA + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value3-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 4 + value = mockSB.GetValue(prefixA + baseValue4) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value4-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // check pending values + pendingValues = scheduler.GetPendingValues(nil) + Expect(pendingValues).To(BeEmpty()) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(4)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue4)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(2)) + txn = txnHistory[1] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue4, Value: utils.RecordProtoMessage(test.NewStringValue("base-value4-data")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue4, + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value4-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value2-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value2-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value1-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value1-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(3)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(0)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(7)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(7)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(7)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(7)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(7)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(7)) + graphR.Release() + + // plan error before 3rd txn + mockSB.PlanError(prefixA+baseValue2, errors.New("failed to remove the value"), nil) + + // run third transaction that will break the cycle even though the delete operation will fail + startTime = time.Now() + schedulerTxn = scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue2, nil) + seqNum, err = schedulerTxn.Commit(context.Background()) + stopTime = time.Now() + Expect(seqNum).To(BeEquivalentTo(2)) + Expect(err).ToNot(BeNil()) + txnErr := err.(*TransactionError) + Expect(txnErr.GetTxnInitError()).ShouldNot(HaveOccurred()) + kvErrors := txnErr.GetKVErrors() + Expect(kvErrors).To(HaveLen(1)) + Expect(kvErrors[0].Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(kvErrors[0].TxnOperation).To(BeEquivalentTo(Delete)) + Expect(kvErrors[0].Error.Error()).To(BeEquivalentTo("failed to remove the value")) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(HaveLen(2)) + // -> base value 1 - pending + value = mockSB.GetValue(prefixA + baseValue1) + Expect(value).To(BeNil()) + // -> base value 2 - failed to remove + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value2-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 3 - pending + value = mockSB.GetValue(prefixA + baseValue3) + Expect(value).To(BeNil()) + // -> base value 4 + value = mockSB.GetValue(prefixA + baseValue4) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value4-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // check pending values + pendingValues = scheduler.GetPendingValues(nil) + checkValues(pendingValues, []KeyValuePair{ + {Key: prefixA + baseValue1, Value: test.NewStringValue("base-value1-data")}, + {Key: prefixA + baseValue3, Value: test.NewStringValue("base-value3-data")}, + }) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(3)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err.Error()).To(BeEquivalentTo("failed to remove the value")) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(3)) + txn = txnHistory[2] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(2)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(nil), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Delete, + Key: prefixA + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Delete, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value1-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Delete, + Key: prefixA + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value2-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + txnOps[2].NewErr = errors.New("failed to remove the value") + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(1)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(5)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(0)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(10)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(10)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(10)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(10)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(10)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(10)) + graphR.Release() + + // finally, run 4th txn to get back the removed value + schedulerTxn = scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue2, test.NewLazyStringValue("base-value2-data-new")) + seqNum, err = schedulerTxn.Commit(context.Background()) + Expect(seqNum).To(BeEquivalentTo(3)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(HaveLen(4)) + // -> base value 1 + value = mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value1-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value2-data-new"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 3 + value = mockSB.GetValue(prefixA + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value3-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 4 + value = mockSB.GetValue(prefixA + baseValue4) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value4-data"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // check pending values + pendingValues = scheduler.GetPendingValues(nil) + Expect(pendingValues).To(BeEmpty()) +} + +func TestSpecialCase(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // descriptor: + descriptor := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0) + scheduler.RegisterKVDescriptor(descriptor) + + // run non-resync transaction against empty SB + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item1")) + seqNum, err := schedulerTxn.Commit(context.Background()) + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // plan error before 2nd txn + failedDeleteClb := func() { + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue(), + &test.OnlyInteger{Integer: 0}, FromNB, false) + } + mockSB.PlanError(prefixA+baseValue1+"/item1", errors.New("failed to delete value"), failedDeleteClb) + + // run 2nd non-resync transaction that will have errors + schedulerTxn2 := scheduler.StartNBTransaction() + schedulerTxn2.SetValue(prefixA+baseValue1, nil) + seqNum, err = schedulerTxn2.Commit(WithRevert(context.Background())) + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ToNot(BeNil()) + txnErr := err.(*TransactionError) + Expect(txnErr.GetTxnInitError()).ShouldNot(HaveOccurred()) + kvErrors := txnErr.GetKVErrors() + Expect(kvErrors).To(HaveLen(1)) + Expect(kvErrors[0].Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(kvErrors[0].TxnOperation).To(BeEquivalentTo(Delete)) + Expect(kvErrors[0].Error.Error()).To(BeEquivalentTo("failed to delete value")) + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} +*/ \ No newline at end of file diff --git a/plugins/kvscheduler/descriptor-adapter/generator.go b/plugins/kvscheduler/descriptor-adapter/generator.go new file mode 100644 index 0000000000..8a3a06961b --- /dev/null +++ b/plugins/kvscheduler/descriptor-adapter/generator.go @@ -0,0 +1,143 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// descriptor-generator generates all boiler-plate code needed to adapt type-safe +// KV descriptor for the KVDescriptor structure definition. +// +// To use the generator, add go generate command into your descriptor as a comment: +// //go:generate descriptor-adapter --descriptor-name --value-type [--meta-type ] [--output-dir ] [--import ]... +// +// Note: import paths can be relative to the file with the go:generate comment. + +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "text/template" +) + +// ArrayFlag implements repeated flag. +type ArrayFlag struct { + values []string +} + +// String return human-readable string representation of the array of flags. +func (af *ArrayFlag) String() string { + str := "[" + for idx, value := range af.values { + str += value + if idx < len(af.values)-1 { + str += ", " + } + } + str += "]" + return str +} + +// Set add value into the array. +func (af *ArrayFlag) Set(value string) error { + af.values = append(af.values, value) + return nil +} + +var ( + imports ArrayFlag + + outputDirFlag = flag.String("output-dir", ".", "Output directory where adapter package will be generated.") + descriptorNameFlag = flag.String("descriptor-name", "", "Name of the descriptor.") + valueTypeFlag = flag.String("value-type", "", "Type of the described values.") + metaTypeFlag = flag.String("meta-type", "interface{}", "Type of the metadata used by the descriptor.") +) + +// TemplateData encapsulates input arguments for the template. +type TemplateData struct { + DescriptorName string + ValueT string + MetadataT string + Imports []string +} + +// PathExists return true if the given path already exist in the file system. +func PathExists(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} + +func main() { + flag.Var(&imports, "import", "Package to be imported in the generated adapter (can be relative path).") + flag.Parse() + + // prepare input data for the template + inputData := TemplateData{ + DescriptorName: *descriptorNameFlag, + ValueT: *valueTypeFlag, + MetadataT: *metaTypeFlag, + } + + // expand relative import paths + gopath := os.Getenv("GOPATH") + cwd, err := os.Getwd() + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + os.Exit(2) + } + + for _, importPath := range imports.values { + if !PathExists(filepath.Join(gopath, "src", importPath)) { + asRelative := filepath.Join(cwd, importPath) + if PathExists(asRelative) { + importPath = filepath.Clean(asRelative) + importPath = strings.TrimPrefix(importPath, gopath+"/src") + importPath = strings.TrimLeft(importPath, "/") + } + } + inputData.Imports = append(inputData.Imports, importPath) + } + + if inputData.ValueT == "" || inputData.DescriptorName == "" { + fmt.Fprintln(os.Stderr, "ERROR: value-type and descriptor-name must be specified") + os.Exit(1) + } + + // generate adapter source code from the template + var buf bytes.Buffer + t := template.Must(template.New("").Parse(adapterTemplate)) + err = t.Execute(&buf, inputData) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + os.Exit(2) + } + + // prepare directory for the generated adapter + directory := *outputDirFlag + "/adapter/" + err = os.MkdirAll(directory, 0777) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + os.Exit(3) + } + + // output the generated adapter into the file + filename := directory + "/" + strings.ToLower(*descriptorNameFlag) + ".go" + err = ioutil.WriteFile(filename, buf.Bytes(), 0644) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + os.Exit(4) + } +} diff --git a/plugins/kvscheduler/descriptor-adapter/template.go b/plugins/kvscheduler/descriptor-adapter/template.go new file mode 100644 index 0000000000..3b0a7b965b --- /dev/null +++ b/plugins/kvscheduler/descriptor-adapter/template.go @@ -0,0 +1,253 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +const adapterTemplate = `// Code generated by adapter-generator. DO NOT EDIT. + +package adapter + +import ( + "github.com/gogo/protobuf/proto" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + + {{- range $i, $path := .Imports }} + "{{ $path }}" + {{- end }} +) + +////////// type-safe key-value pair with metadata ////////// + +type {{ .DescriptorName }}KVWithMetadata struct { + Key string + Value {{ .ValueT }} + Metadata {{ .MetadataT }} + Origin ValueOrigin +} + +////////// type-safe Descriptor structure ////////// + +type {{ .DescriptorName }}Descriptor struct { + Name string + KeySelector KeySelector + ValueTypeName string + KeyLabel func(key string) string + ValueComparator func(key string, oldValue, newValue {{ .ValueT }}) bool + NBKeyPrefix string + WithMetadata bool + MetadataMapFactory MetadataMapFactory + Validate func(key string, value {{ .ValueT }}) error + Create func(key string, value {{ .ValueT }}) (metadata {{ .MetadataT }}, err error) + Delete func(key string, value {{ .ValueT }}, metadata {{ .MetadataT }}) error + Update func(key string, oldValue, newValue {{ .ValueT }}, oldMetadata {{ .MetadataT }}) (newMetadata {{ .MetadataT }}, err error) + UpdateWithRecreate func(key string, oldValue, newValue {{ .ValueT }}, metadata {{ .MetadataT }}) bool + Retrieve func(correlate []{{ .DescriptorName }}KVWithMetadata) ([]{{ .DescriptorName }}KVWithMetadata, error) + IsRetriableFailure func(err error) bool + DerivedValues func(key string, value {{ .ValueT }}) []KeyValuePair + Dependencies func(key string, value {{ .ValueT }}) []Dependency + RetrieveDependencies []string /* descriptor name */ +} + +////////// Descriptor adapter ////////// + +type {{ .DescriptorName }}DescriptorAdapter struct { + descriptor *{{ .DescriptorName }}Descriptor +} + +func New{{ .DescriptorName }}Descriptor(typedDescriptor *{{ .DescriptorName }}Descriptor) *KVDescriptor { + adapter := &{{ .DescriptorName }}DescriptorAdapter{descriptor: typedDescriptor} + descriptor := &KVDescriptor{ + Name: typedDescriptor.Name, + KeySelector: typedDescriptor.KeySelector, + ValueTypeName: typedDescriptor.ValueTypeName, + KeyLabel: typedDescriptor.KeyLabel, + NBKeyPrefix: typedDescriptor.NBKeyPrefix, + WithMetadata: typedDescriptor.WithMetadata, + MetadataMapFactory: typedDescriptor.MetadataMapFactory, + IsRetriableFailure: typedDescriptor.IsRetriableFailure, + RetrieveDependencies: typedDescriptor.RetrieveDependencies, + } + if typedDescriptor.ValueComparator != nil { + descriptor.ValueComparator = adapter.ValueComparator + } + if typedDescriptor.Validate != nil { + descriptor.Validate = adapter.Validate + } + if typedDescriptor.Create != nil { + descriptor.Create = adapter.Create + } + if typedDescriptor.Delete != nil { + descriptor.Delete = adapter.Delete + } + if typedDescriptor.Update != nil { + descriptor.Update = adapter.Update + } + if typedDescriptor.UpdateWithRecreate != nil { + descriptor.UpdateWithRecreate = adapter.UpdateWithRecreate + } + if typedDescriptor.Retrieve != nil { + descriptor.Retrieve = adapter.Retrieve + } + if typedDescriptor.Dependencies != nil { + descriptor.Dependencies = adapter.Dependencies + } + if typedDescriptor.DerivedValues != nil { + descriptor.DerivedValues = adapter.DerivedValues + } + return descriptor +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) ValueComparator(key string, oldValue, newValue proto.Message) bool { + typedOldValue, err1 := cast{{ .DescriptorName }}Value(key, oldValue) + typedNewValue, err2 := cast{{ .DescriptorName }}Value(key, newValue) + if err1 != nil || err2 != nil { + return false + } + return da.descriptor.ValueComparator(key, typedOldValue, typedNewValue) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Validate(key string, value proto.Message) (err error) { + typedValue, err := cast{{ .DescriptorName }}Value(key, value) + if err != nil { + return err + } + return da.descriptor.Validate(key, typedValue) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Create(key string, value proto.Message) (metadata Metadata, err error) { + typedValue, err := cast{{ .DescriptorName }}Value(key, value) + if err != nil { + return nil, err + } + return da.descriptor.Create(key, typedValue) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Update(key string, oldValue, newValue proto.Message, oldMetadata Metadata) (newMetadata Metadata, err error) { + oldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue) + if err != nil { + return nil, err + } + newTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue) + if err != nil { + return nil, err + } + typedOldMetadata, err := cast{{ .DescriptorName }}Metadata(key, oldMetadata) + if err != nil { + return nil, err + } + return da.descriptor.Update(key, oldTypedValue, newTypedValue, typedOldMetadata) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Delete(key string, value proto.Message, metadata Metadata) error { + typedValue, err := cast{{ .DescriptorName }}Value(key, value) + if err != nil { + return err + } + typedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata) + if err != nil { + return err + } + return da.descriptor.Delete(key, typedValue, typedMetadata) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) UpdateWithRecreate(key string, oldValue, newValue proto.Message, metadata Metadata) bool { + oldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue) + if err != nil { + return true + } + newTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue) + if err != nil { + return true + } + typedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata) + if err != nil { + return true + } + return da.descriptor.UpdateWithRecreate(key, oldTypedValue, newTypedValue, typedMetadata) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Retrieve(correlate []KVWithMetadata) ([]KVWithMetadata, error) { + var correlateWithType []{{ .DescriptorName }}KVWithMetadata + for _, kvpair := range correlate { + typedValue, err := cast{{ .DescriptorName }}Value(kvpair.Key, kvpair.Value) + if err != nil { + continue + } + typedMetadata, err := cast{{ .DescriptorName }}Metadata(kvpair.Key, kvpair.Metadata) + if err != nil { + continue + } + correlateWithType = append(correlateWithType, + {{ .DescriptorName }}KVWithMetadata{ + Key: kvpair.Key, + Value: typedValue, + Metadata: typedMetadata, + Origin: kvpair.Origin, + }) + } + + typedValues, err := da.descriptor.Retrieve(correlateWithType) + if err != nil { + return nil, err + } + var values []KVWithMetadata + for _, typedKVWithMetadata := range typedValues { + kvWithMetadata := KVWithMetadata{ + Key: typedKVWithMetadata.Key, + Metadata: typedKVWithMetadata.Metadata, + Origin: typedKVWithMetadata.Origin, + } + kvWithMetadata.Value = typedKVWithMetadata.Value + values = append(values, kvWithMetadata) + } + return values, err +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) DerivedValues(key string, value proto.Message) []KeyValuePair { + typedValue, err := cast{{ .DescriptorName }}Value(key, value) + if err != nil { + return nil + } + return da.descriptor.DerivedValues(key, typedValue) +} + +func (da *{{ .DescriptorName }}DescriptorAdapter) Dependencies(key string, value proto.Message) []Dependency { + typedValue, err := cast{{ .DescriptorName }}Value(key, value) + if err != nil { + return nil + } + return da.descriptor.Dependencies(key, typedValue) +} + +////////// Helper methods ////////// + +func cast{{ .DescriptorName }}Value(key string, value proto.Message) ({{ .ValueT }}, error) { + typedValue, ok := value.({{ .ValueT }}) + if !ok { + return nil, ErrInvalidValueType(key, value) + } + return typedValue, nil +} + +func cast{{ .DescriptorName }}Metadata(key string, metadata Metadata) ({{ .MetadataT }}, error) { + if metadata == nil { + return nil, nil + } + typedMetadata, ok := metadata.({{ .MetadataT }}) + if !ok { + return nil, ErrInvalidMetadataType(key) + } + return typedMetadata, nil +} +` diff --git a/plugins/kvscheduler/descriptor_handler.go b/plugins/kvscheduler/descriptor_handler.go new file mode 100644 index 0000000000..8a9f60a09b --- /dev/null +++ b/plugins/kvscheduler/descriptor_handler.go @@ -0,0 +1,126 @@ +package kvscheduler + +import ( + "github.com/gogo/protobuf/proto" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +// descriptorHandler handles access to descriptor methods (callbacks). +// For callback not provided, a default return value is returned. +type descriptorHandler struct { + descriptor *kvs.KVDescriptor +} + +// keyLabel by default returns the key itself. +func (h *descriptorHandler) keyLabel(key string) string { + if h.descriptor == nil || h.descriptor.KeyLabel == nil { + return key + } + return h.descriptor.KeyLabel(key) +} + +// equivalentValues by default uses proto.Equal(). +func (h *descriptorHandler) equivalentValues(key string, oldValue, newValue proto.Message) bool { + if h.descriptor == nil || h.descriptor.ValueComparator == nil { + return proto.Equal(oldValue, newValue) + } + return h.descriptor.ValueComparator(key, oldValue, newValue) +} + +// validate return nil if Validate is not provided (optional method). +func (h *descriptorHandler) validate(key string, value proto.Message) error { + if h.descriptor == nil || h.descriptor.Validate == nil { + return nil + } + return h.descriptor.Validate(key, value) +} + +// create returns ErrUnimplementedCreate if Create is not provided. +func (h *descriptorHandler) create(key string, value proto.Message) (metadata kvs.Metadata, err error) { + if h.descriptor == nil { + return + } + if h.descriptor.Create == nil { + return nil, kvs.ErrUnimplementedCreate + } + return h.descriptor.Create(key, value) +} + +// update is not called if Update is not provided (updateWithRecreate() returns true). +func (h *descriptorHandler) update(key string, oldValue, newValue proto.Message, oldMetadata kvs.Metadata) (newMetadata kvs.Metadata, err error) { + if h.descriptor == nil { + return oldMetadata, nil + } + return h.descriptor.Update(key, oldValue, newValue, oldMetadata) +} + +// updateWithRecreate either forwards the call to UpdateWithRecreate if defined +// by the descriptor, or decides based on the availability of the Update operation. +func (h *descriptorHandler) updateWithRecreate(key string, oldValue, newValue proto.Message, metadata kvs.Metadata) bool { + if h.descriptor == nil { + return false + } + if h.descriptor.Update == nil { + // without Update, re-creation is the only way + return true + } + if h.descriptor.UpdateWithRecreate == nil { + // by default it is assumed that any change can be applied using Update without + // re-creation + return false + } + return h.descriptor.UpdateWithRecreate(key, oldValue, newValue, metadata) +} + +// delete returns ErrUnimplementedDelete if Delete is not provided. +func (h *descriptorHandler) delete(key string, value proto.Message, metadata kvs.Metadata) error { + if h.descriptor == nil { + return nil + } + if h.descriptor.Delete == nil { + return kvs.ErrUnimplementedDelete + } + return h.descriptor.Delete(key, value, metadata) +} + +// isRetriableFailure first checks for errors returned by the handler itself. +// If descriptor does not define IsRetriableFailure, it is assumed any failure +// can be potentially fixed by retry. +func (h *descriptorHandler) isRetriableFailure(err error) bool { + // first check for errors returned by the handler itself + handlerErrs := []error{kvs.ErrUnimplementedCreate, kvs.ErrUnimplementedDelete} + for _, handlerError := range handlerErrs { + if err == handlerError { + return false + } + } + if h.descriptor == nil || h.descriptor.IsRetriableFailure == nil { + return true + } + return h.descriptor.IsRetriableFailure(err) +} + +// dependencies returns empty list if descriptor does not define any. +func (h *descriptorHandler) dependencies(key string, value proto.Message) (deps []kvs.Dependency) { + if h.descriptor == nil || h.descriptor.Dependencies == nil { + return + } + return h.descriptor.Dependencies(key, value) +} + +// derivedValues returns empty list if descriptor does not define any. +func (h *descriptorHandler) derivedValues(key string, value proto.Message) (derives []kvs.KeyValuePair) { + if h.descriptor == nil || h.descriptor.DerivedValues == nil { + return + } + return h.descriptor.DerivedValues(key, value) +} + +// retrieve returns as false if descriptor does not implement Retrieve. +func (h *descriptorHandler) retrieve(correlate []kvs.KVWithMetadata) (values []kvs.KVWithMetadata, ableToRetrieve bool, err error) { + if h.descriptor == nil || h.descriptor.Retrieve == nil { + return values, false, nil + } + values, err = h.descriptor.Retrieve(correlate) + return values, true, err +} diff --git a/plugins/kvscheduler/docs/kvscheduler.png b/plugins/kvscheduler/docs/kvscheduler.png new file mode 100755 index 0000000000..a83f80a985 Binary files /dev/null and b/plugins/kvscheduler/docs/kvscheduler.png differ diff --git a/plugins/kvscheduler/docs/migration.png b/plugins/kvscheduler/docs/migration.png new file mode 100755 index 0000000000..b093a6f446 Binary files /dev/null and b/plugins/kvscheduler/docs/migration.png differ diff --git a/plugins/kvscheduler/graphviz.go b/plugins/kvscheduler/graphviz.go new file mode 100644 index 0000000000..909590dc33 --- /dev/null +++ b/plugins/kvscheduler/graphviz.go @@ -0,0 +1,486 @@ +package kvscheduler + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "text/template" + "time" + + "github.com/gogo/protobuf/proto" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" + "github.com/unrolled/render" +) + +const ( + // txnArg allows to display graph at the time when the referenced transaction + // has just finalized + txnArg = "txn" // value = txn sequence number +) + +type depNode struct { + node *dotNode + label string + satisfied bool +} + +func (s *Scheduler) dotGraphHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + graphRead := s.graph.Read() + defer graphRead.Release() + + var txn *kvs.RecordedTxn + timestamp := time.Now() + + // parse optional *txn* argument + if txnStr, withTxn := args[txnArg]; withTxn && len(txnStr) == 1 { + txnSeqNum, err := strconv.ParseUint(txnStr[0], 10, 64) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + + txn = s.GetRecordedTransaction(txnSeqNum) + if txn == nil { + err := errors.New("transaction with such sequence number is not recorded") + s.logError(formatter.JSON(w, http.StatusNotFound, errorString{err.Error()})) + return + } + timestamp = txn.Stop + } + + graphSnapshot := graphRead.GetSnapshot(timestamp) + output, err := s.renderDotOutput(graphSnapshot, txn) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if format := req.FormValue("format"); format == "dot" { + w.Write(output) + return + } + + img, err := dotToImage("", "svg", output) + if err != nil { + http.Error(w, fmt.Sprintf("%v\n%v", err, img), http.StatusInternalServerError) + return + } + + s.Log.Debug("serving graph image from:", img) + http.ServeFile(w, req, img) + } +} + +func (s *Scheduler) renderDotOutput(graphNodes []*graph.RecordedNode, txn *kvs.RecordedTxn) ([]byte, error) { + title := fmt.Sprintf("%d keys", len(graphNodes)) + updatedKeys := utils.NewMapBasedKeySet() + graphTimestamp := time.Now() + if txn != nil { + graphTimestamp = txn.Stop + title += fmt.Sprintf(" - SeqNum: %d (%s)", txn.SeqNum, graphTimestamp.Format(time.RFC822)) + for _, op := range txn.Executed { + updatedKeys.Add(op.Key) + } + } else { + title += " - current" + } + + cluster := NewDotCluster("nodes") + cluster.Attrs = dotAttrs{ + "bgcolor": "white", + "label": title, + "labelloc": "t", + "labeljust": "c", + "fontsize": "15", + "tooltip": "", + } + + // TODO: how to link transaction recording inside of the main cluster title (SeqNum: %d)? + //if txn != nil { + // cluster.Attrs["href"] = fmt.Sprintf(txnHistoryURL + "?seq-num=%d", txn.SeqNum) + //} + + var ( + nodes []*dotNode + edges []*dotEdge + ) + + nodeMap := make(map[string]*dotNode) + edgeMap := make(map[string]*dotEdge) + + var getGraphNode = func(key string) *graph.RecordedNode { + for _, graphNode := range graphNodes { + if graphNode.Key == key { + return graphNode + } + } + return nil + } + + var processGraphNode = func(graphNode *graph.RecordedNode) *dotNode { + key := graphNode.Key + if n, ok := nodeMap[key]; ok { + return n + } + + attrs := make(dotAttrs) + attrs["pad"] = "0.01" + attrs["margin"] = "0.01" + attrs["href"] = fmt.Sprintf(keyTimelineURL+"?key=%s&time=%d", key, graphTimestamp.UnixNano()) + + if updatedKeys.Has(key) { + attrs["penwidth"] = "2" + attrs["color"] = "Gold" + } + + c := cluster + + label := graphNode.Label + var descriptorName string + if descriptorFlag := graphNode.GetFlag(DescriptorFlagName); descriptorFlag != nil { + descriptorName = descriptorFlag.GetValue() + } else { + // for missing dependencies + if descriptor := s.registry.GetDescriptorForKey(key); descriptor != nil { + descriptorName = descriptor.Name + if descriptor.KeyLabel != nil { + label = descriptor.KeyLabel(key) + } + } + } + + if label != "" { + attrs["label"] = label + } + + if descriptorName != "" { + attrs["fillcolor"] = "PaleGreen" + + if _, ok := c.Clusters[descriptorName]; !ok { + c.Clusters[descriptorName] = &dotCluster{ + ID: key, + Clusters: make(map[string]*dotCluster), + Attrs: dotAttrs{ + "penwidth": "0.8", + "fontsize": "16", + "label": fmt.Sprintf("< %s >", descriptorName), + "style": "filled", + "fillcolor": "#e6ecfa", + }, + } + } + c = c.Clusters[descriptorName] + } + + valueState := kvs.ValueState_MISSING // missing dependencies + stateFlag := graphNode.GetFlag(ValueStateFlagName) + if stateFlag != nil { + valueState = stateFlag.(*ValueStateFlag).valueState + } + switch valueState { + case kvs.ValueState_MISSING: + attrs["fillcolor"] = "Dimgray" + attrs["style"] = "dashed,filled" + case kvs.ValueState_UNIMPLEMENTED: + attrs["fillcolor"] = "Darkkhaki" + attrs["style"] = "dashed,filled" + case kvs.ValueState_REMOVED: + attrs["fontcolor"] = "White" + attrs["fillcolor"] = "Black" + attrs["style"] = "dashed,filled" + // case kvs.ValueState_CONFIGURED // leave default + case kvs.ValueState_OBTAINED: + attrs["fillcolor"] = "LightCyan" + case kvs.ValueState_DISCOVERED: + attrs["fillcolor"] = "Lime" + case kvs.ValueState_PENDING: + attrs["style"] = "dashed,filled" + attrs["fillcolor"] = "Pink" + case kvs.ValueState_INVALID: + attrs["fontcolor"] = "White" + attrs["fillcolor"] = "Maroon" + case kvs.ValueState_FAILED: + attrs["fillcolor"] = "Orangered" + case kvs.ValueState_RETRYING: + attrs["fillcolor"] = "Deeppink" + } + + value := graphNode.Value + if rec, ok := value.(*utils.RecordedProtoMessage); ok { + value = rec.Message + } + attrs["tooltip"] = fmt.Sprintf("[%s] %s\n-----\n%s", valueState, key, proto.MarshalTextString(value)) + + n := &dotNode{ + ID: key, + Attrs: attrs, + } + c.Nodes = append(c.Nodes, n) + nodeMap[key] = n + return n + } + + var addEdge = func(e *dotEdge) { + edgeKey := fmt.Sprintf("%s->%s", e.From.ID, e.To.ID) + if _, ok := edgeMap[edgeKey]; !ok { + edges = append(edges, e) + edgeMap[edgeKey] = e + } + } + + for _, graphNode := range graphNodes { + n := processGraphNode(graphNode) + + derived := graphNode.Targets.GetTargetsForRelation(DerivesRelation) + if derived != nil { + for _, target := range derived.Targets { + for _, dKey := range target.MatchingKeys.Iterate() { + dn := processGraphNode(getGraphNode(dKey)) + dn.Attrs["fillcolor"] = "LightYellow" + dn.Attrs["color"] = "bisque4" + dn.Attrs["style"] = "rounded,filled" + attrs := make(dotAttrs) + attrs["color"] = "bisque4" + attrs["arrowhead"] = "invempty" + e := &dotEdge{ + From: n, + To: dn, + Attrs: attrs, + } + addEdge(e) + } + } + } + + dependencies := graphNode.Targets.GetTargetsForRelation(DependencyRelation) + if dependencies != nil { + var deps []depNode + for _, target := range dependencies.Targets { + if target.MatchingKeys.Length() == 0 { + var dn *dotNode + if target.ExpectedKey != "" { + dn = processGraphNode(&graph.RecordedNode{ + Key: target.ExpectedKey, + }) + } else { + dn = processGraphNode(&graph.RecordedNode{ + Key: "? " + target.Label + " ?", + }) + } + deps = append(deps, depNode{node: dn, label: target.Label}) + } + for _, dKey := range target.MatchingKeys.Iterate() { + dn := processGraphNode(getGraphNode(dKey)) + deps = append(deps, depNode{node: dn, label: target.Label, satisfied: true}) + } + } + for _, d := range deps { + attrs := make(dotAttrs) + attrs["tooltip"] = d.label + if !d.satisfied { + attrs["color"] = "Red" + } + e := &dotEdge{ + From: n, + To: d.node, + Attrs: attrs, + } + addEdge(e) + } + } + } + + hostname, _ := os.Hostname() + footer := fmt.Sprintf("KVScheduler Graph - generated at %s on %s (PID: %d)", + time.Now().Format(time.RFC1123), hostname, os.Getpid(), + ) + + dot := &dotGraph{ + Title: footer, + Minlen: minlen, + Cluster: cluster, + Nodes: nodes, + Edges: edges, + Options: map[string]string{ + "minlen": fmt.Sprint(minlen), + }, + } + + var buf bytes.Buffer + if err := WriteDot(&buf, dot); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +var ( + minlen uint = 1 +) + +// location of dot executable for converting from .dot to .svg +// it's usually at: /usr/bin/dot +var dotExe string + +// dotToImage generates a SVG using the 'dot' utility, returning the filepath +func dotToImage(outfname string, format string, dot []byte) (string, error) { + if dotExe == "" { + dot, err := exec.LookPath("dot") + if err != nil { + return "", fmt.Errorf("unable to find program 'dot', please install it or check your PATH") + } + dotExe = dot + } + + var img string + if outfname == "" { + img = filepath.Join(os.TempDir(), fmt.Sprintf("kvscheduler-graph.%s", format)) + } else { + img = fmt.Sprintf("%s.%s", outfname, format) + } + + cmd := exec.Command(dotExe, fmt.Sprintf("-T%s", format), "-o", img) + cmd.Stdin = bytes.NewReader(dot) + if out, err := cmd.CombinedOutput(); err != nil { + return string(out), err + } + + return img, nil +} + +const tmplGraph = `digraph kvscheduler { + ranksep=.5 + //nodesep=.1 + label="{{.Title}}"; + labelloc="b"; + labeljust="c"; + fontsize="12"; + fontname="Ubuntu"; + rankdir="LR"; + bgcolor="lightgray"; + style="solid"; + penwidth="1"; + pad="0.04"; + nodesep="{{.Options.nodesep}}"; + ordering="out"; + + node [shape="box" style="filled" fontname="Ubuntu" fillcolor="honeydew" penwidth="1.0" margin="0.03,0.0"]; + edge [minlen="{{.Options.minlen}}"] + + {{template "cluster" .Cluster}} + + {{- range .Edges}} + {{template "edge" .}} + {{- end}} + + {{range .Nodes}} + {{template "node" .}} + {{- end}} +} +` +const tmplNode = `{{define "edge" -}} + {{printf "%q -> %q [ %s ]" .From .To .Attrs}} +{{- end}}` + +const tmplEdge = `{{define "node" -}} + {{printf "%q [ %s ]" .ID .Attrs}} +{{- end}}` + +const tmplCluster = `{{define "cluster" -}} + {{printf "subgraph %q {" .}} + {{printf "%s" .Attrs.Lines}} + {{range .Nodes}} + {{template "node" .}} + {{- end}} + {{range .Clusters}} + {{template "cluster" .}} + {{- end}} + {{println "}" }} +{{- end}}` + +type dotGraph struct { + Title string + Minlen uint + Attrs dotAttrs + Cluster *dotCluster + Nodes []*dotNode + Edges []*dotEdge + Options map[string]string +} + +type dotCluster struct { + ID string + Clusters map[string]*dotCluster + Nodes []*dotNode + Attrs dotAttrs +} + +type dotNode struct { + ID string + Attrs dotAttrs +} + +type dotEdge struct { + From *dotNode + To *dotNode + Attrs dotAttrs +} + +type dotAttrs map[string]string + +func NewDotCluster(id string) *dotCluster { + return &dotCluster{ + ID: id, + Clusters: make(map[string]*dotCluster), + Attrs: make(dotAttrs), + } +} + +func (c *dotCluster) String() string { + return fmt.Sprintf("cluster_%s", c.ID) +} +func (n *dotNode) String() string { + return n.ID +} + +func (p dotAttrs) List() []string { + l := []string{} + for k, v := range p { + l = append(l, fmt.Sprintf("%s=%q", k, v)) + } + return l +} + +func (p dotAttrs) String() string { + return strings.Join(p.List(), " ") +} + +func (p dotAttrs) Lines() string { + return fmt.Sprintf("%s;", strings.Join(p.List(), ";\n")) +} + +func WriteDot(w io.Writer, g *dotGraph) error { + t := template.New("dot") + for _, s := range []string{tmplCluster, tmplNode, tmplEdge, tmplGraph} { + if _, err := t.Parse(s); err != nil { + return err + } + } + var buf bytes.Buffer + if err := t.Execute(&buf, g); err != nil { + return err + } + _, err := buf.WriteTo(w) + return err +} diff --git a/plugins/kvscheduler/internal/graph/graph_api.go b/plugins/kvscheduler/internal/graph/graph_api.go new file mode 100644 index 0000000000..9c019e7408 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/graph_api.go @@ -0,0 +1,366 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "bytes" + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/cn-infra/idxmap" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// Graph is an in-memory graph representation of key-value pairs and their +// relations, where nodes are kv-pairs and each relation is a separate set of direct +// labeled edges. +// +// The graph furthermore allows to associate metadata and flags (name:value pairs) +// with every node. It is possible to register instances of NamedMapping, each +// for a different set of selected nodes, and the graph will keep them up-to-date +// with the latest value-label->metadata associations. +// +// The graph provides various getter method, for example it is possible to select +// a set of nodes using a key selector and/or a flag selector. +// As for editing, Graph allows to prepare new changes and then save them or let +// them get discarded by GC. +// +// The graph supports multiple-readers single-writer access, i.e. it is assumed +// there is no write-concurrency. +// +// Last but not least, the graph maintains a history of revisions for all nodes +// that have ever existed. The history of changes and a graph snapshot from +// a selected moment in time are exposed via ReadAccess interface. +type Graph interface { + // Read returns a graph handle for read-only access. + // The graph supports multiple concurrent readers. + // Release eventually using Release() method. + Read() ReadAccess // acquires R-lock + + // Write returns a graph handle for read-write access. + // The graph supports at most one writer at a time - i.e. it is assumed + // there is no write-concurrency. + // The changes are propagated to the graph using Save(). + // If is true, the changes will be recorded once the handle is + // released. + // Release eventually using Release() method. + Write(record bool) RWAccess +} + +// ReadAccess lists operations provided by the read-only graph handle. +type ReadAccess interface { + // GetMetadataMap returns registered metadata map. + GetMetadataMap(mapName string) idxmap.NamedMapping + + // GetKeys returns sorted keys. + GetKeys() []string + + // GetNode returns node with the given key or nil if the key is unused. + GetNode(key string) Node + + // GetNodes returns a set of nodes matching the key selector (can be nil) + // and every provided flag selector. + GetNodes(keySelector KeySelector, flagSelectors ...FlagSelector) []Node + + // GetFlagStats returns stats for a given flag. + GetFlagStats(flagName string, filter KeySelector) FlagStats + + // GetNodeTimeline returns timeline of all node revisions, ordered from + // the oldest to the newest. + GetNodeTimeline(key string) []*RecordedNode + + // GetSnapshot returns the snapshot of the graph at a given time. + GetSnapshot(time time.Time) []*RecordedNode + + // Dump returns a human-readable string representation of the current graph + // content for debugging purposes. + Dump() string + + // Release releases the graph handle (both Read() & Write() should end with + // release). + Release() // for reader release R-lock +} + +// RWAccess lists operations provided by the read-write graph handle. +type RWAccess interface { + ReadAccess + + // RegisterMetadataMap registers new metadata map for value-label->metadata + // associations of selected node. + RegisterMetadataMap(mapName string, mapping idxmap.NamedMappingRW) + + // SetNode creates new node or returns read-write handle to an existing node. + // The changes are propagated to the graph only after Save() is called. + SetNode(key string) NodeRW + + // DeleteNode deletes node with the given key. + // Returns true if the node really existed before the operation. + DeleteNode(key string) bool + + // Save propagates all changes to the graph. + Save() // noop if no changes performed, acquires RW-lock for the time of the operation +} + +// Node is a read-only handle to a single graph node. +type Node interface { + // GetKey returns the key associated with the node. + GetKey() string + + // GetLabel returns the label associated with this node. + GetLabel() string + + // GetKey returns the value associated with the node. + GetValue() proto.Message + + // GetFlag returns reference to the given flag or nil if the node doesn't have + // this flag associated. + GetFlag(name string) Flag + + // GetMetadata returns the value metadata associated with the node. + GetMetadata() interface{} + + // GetTargets returns a set of nodes, indexed by relation labels, that the + // edges of the given relation points to. + GetTargets(relation string) RuntimeTargetsByLabel + + // GetSources returns a set of nodes with edges of the given relation + // pointing to this node. + GetSources(relation string) []Node +} + +// NodeRW is a read-write handle to a single graph node. +type NodeRW interface { + Node + + // SetLabel associates given label with this node. + SetLabel(label string) + + // SetValue associates given value with this node. + SetValue(value proto.Message) + + // SetFlags associates given flag with this node. + SetFlags(flags ...Flag) + + // DelFlags removes given flag from this node. + DelFlags(names ...string) + + // SetMetadataMap chooses metadata map to be used to store the association + // between this node's value label and metadata. + SetMetadataMap(mapName string) + + // SetMetadata associates given value metadata with this node. + SetMetadata(metadata interface{}) + + // SetTargets provides definition of all edges pointing from this node. + SetTargets(targets []RelationTargetDef) +} + +// Flag is a name:value pair. +type Flag interface { + // GetName should return name of the flag. + GetName() string + + // GetValue return the associated value. Can be empty. + GetValue() string +} + +// FlagSelector is used to select node with(out) given flags assigned. +// +// Flag value=="" => any value +type FlagSelector struct { + with bool + flags []Flag +} + +// WithFlags creates flag selector selecting nodes that have all the listed flags +// assigned. +func WithFlags(flags ...Flag) FlagSelector { + return FlagSelector{with: true, flags: flags} +} + +// WithoutFlags creates flag selector selecting nodes that do not have +// any of the listed flags assigned. +func WithoutFlags(flags ...Flag) FlagSelector { + return FlagSelector{flags: flags} +} + +// RelationTargetDef is a definition of a relation between a source node and a set +// of target nodes. +type RelationTargetDef struct { + // Relation name. + Relation string + + // Label for the edge. + Label string // mandatory, unique for a given (source, relation) + + // Either Key or Selector are defined: + + // Key of the target node. + Key string + + // Selector selecting a set of target nodes. + Selector KeySelector +} + +// Targets groups relation targets with the same label. +// Target nodes are not referenced directly, instead via their keys (suitable +// for recording). +type Targets struct { + Label string + ExpectedKey string // empty if AnyOf predicate is used instead + MatchingKeys utils.KeySet +} + +// TargetsByLabel is a slice of single-relation targets, grouped by labels. +type TargetsByLabel []*Targets + +// String returns human-readable string representation of TargetsByLabel. +func (t TargetsByLabel) String() string { + str := "{" + for idx, targets := range t { + if idx > 0 { + str += ", " + } + str += fmt.Sprintf("%s->%s", targets.Label, targets.MatchingKeys.String()) + } + str += "}" + return str +} + +// RelationTargets groups targets of the same relation. +type RelationTargets struct { + Relation string + Targets TargetsByLabel +} + +// GetTargetsForLabel returns targets (keys) for the given label. +func (t *RelationTargets) GetTargetsForLabel(label string) *Targets { + for _, targets := range t.Targets { + if targets.Label == label { + return targets + } + } + return nil +} + +// TargetsByRelation is a slice of all targets, grouped by relations. +type TargetsByRelation []*RelationTargets + +// GetTargetsForRelation returns targets (keys by label) for the given relation. +func (t TargetsByRelation) GetTargetsForRelation(relation string) *RelationTargets { + for _, relTargets := range t { + if relTargets.Relation == relation { + return relTargets + } + } + return nil +} + +// String returns human-readable string representation of TargetsByRelation. +func (t TargetsByRelation) String() string { + str := "{" + for idx, relTargets := range t { + if idx > 0 { + str += ", " + } + str += fmt.Sprintf("%s->%s", relTargets.Relation, relTargets.Targets.String()) + } + str += "}" + return str +} + +// RuntimeTargets groups relation targets with the same label. +// Targets are stored as direct runtime references pointing to instances of target +// nodes. +type RuntimeTargets struct { + Label string + Nodes []Node +} + +// RuntimeTargetsByLabel is a slice of single-relation (runtime reference-based) +// targets, grouped by labels. +type RuntimeTargetsByLabel []*RuntimeTargets + +// GetTargetsForLabel returns targets (nodes) for the given label. +func (rt RuntimeTargetsByLabel) GetTargetsForLabel(label string) *RuntimeTargets { + for _, targets := range rt { + if targets.Label == label { + return targets + } + } + return nil +} + +// RecordedNode saves all attributes of a single node revision. +type RecordedNode struct { + Since time.Time + Until time.Time + Key string + Label string + Value proto.Message + Flags RecordedFlags + MetadataFields map[string][]string // field name -> values + Targets TargetsByRelation + TargetUpdateOnly bool // true if only runtime Targets have changed since the last rev +} + +// GetFlag returns reference to the given flag or nil if the node didn't have +// this flag associated at the time when it was recorded. +func (node *RecordedNode) GetFlag(name string) Flag { + for _, flag := range node.Flags.Flags { + if flag.GetName() == name { + return flag + } + } + return nil +} + +// RecordedFlags is a record of assigned flags at a given time. +type RecordedFlags struct { + Flags []Flag +} + +// MarshalJSON marshalls recorded flags into JSON. +func (rf RecordedFlags) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString("{") + for idx, flag := range rf.Flags { + if idx > 0 { + buffer.WriteString(",") + } + buffer.WriteString(fmt.Sprintf("\"%s\":\"%s\"", flag.GetName(), flag.GetValue())) + } + buffer.WriteString("}") + return buffer.Bytes(), nil +} + +// GetFlag returns reference to the given flag or nil if the node hasn't had +// this flag associated at the given time. +func (rf RecordedFlags) GetFlag(name string) Flag { + for _, flag := range rf.Flags { + if flag.GetName() == name { + return flag + } + } + return nil +} + +// FlagStats is a summary of the usage for a given flag. +type FlagStats struct { + TotalCount uint // number of revisions with the given flag assigned + PerValueCount map[string]uint // number of revisions with the given flag having the given value +} diff --git a/plugins/kvscheduler/internal/graph/graph_impl.go b/plugins/kvscheduler/internal/graph/graph_impl.go new file mode 100644 index 0000000000..d2b5481f44 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/graph_impl.go @@ -0,0 +1,75 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "sync" + "time" +) + +const ( + // how often (at most) the log of previous revisions gets trimmed to remove + // records too old to keep + oldRevsTrimmingPeriod = 1 * time.Minute +) + +// kvgraph implements Graph interface. +type kvgraph struct { + rwLock sync.RWMutex + graph *graphR + + startTime time.Time + lastRevTrimming time.Time // last time the history of revisions was trimmed + recordOldRevs bool + recordAgeLimit time.Duration + permanentInitPeriod time.Duration +} + +// NewGraph creates and new instance of key-value graph. +// if enabled, will cause the graph to record the previous +// revisions of every node that have ever existed. is in minutes +// and allows to limit the maximum age of a record to keep, avoiding infinite +// memory usage growth. The initial phase of the execution is, however, of greater +// significance and allows to keep records from that period +// permanently in memory. +func NewGraph(recordOldRevs bool, recordAgeLimit, permanentInitPeriod uint32) Graph { + kvgraph := &kvgraph{ + startTime: time.Now(), + lastRevTrimming: time.Now(), + recordOldRevs: recordOldRevs, + recordAgeLimit: time.Duration(recordAgeLimit) * time.Minute, + permanentInitPeriod: time.Duration(permanentInitPeriod) * time.Minute, + } + kvgraph.graph = newGraphR() + kvgraph.graph.parent = kvgraph + return kvgraph +} + +// Read returns a graph handle for read-only access. +// The graph supports multiple concurrent readers. +// Release eventually using Release() method. +func (kvgraph *kvgraph) Read() ReadAccess { + kvgraph.rwLock.RLock() + return kvgraph.graph +} + +// Write returns a graph handle for read-write access. +// The graph supports at most one writer at a time - i.e. it is assumed +// there is no write-concurrency. +// The changes are propagated to the graph using Save(). +// Release eventually using Release() method. +func (kvgraph *kvgraph) Write(record bool) RWAccess { + return newGraphRW(kvgraph.graph, record) +} diff --git a/plugins/kvscheduler/internal/graph/graph_read.go b/plugins/kvscheduler/internal/graph/graph_read.go new file mode 100644 index 0000000000..733a12baa6 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/graph_read.go @@ -0,0 +1,322 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/ligato/cn-infra/idxmap" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// printDelimiter is used in pretty-printing of the graph. +const printDelimiter = ", " + +// graphR implements ReadAccess. +type graphR struct { + parent *kvgraph + nodes map[string]*node + mappings map[string]idxmap.NamedMappingRW + timeline map[string][]*RecordedNode // key -> node records (from the oldest to the newest) +} + +// newGraphR creates and initializes a new instance of graphR. +func newGraphR() *graphR { + return &graphR{ + nodes: make(map[string]*node), + mappings: make(map[string]idxmap.NamedMappingRW), + timeline: make(map[string][]*RecordedNode), + } +} + +// GetMetadataMap returns registered metadata map. +func (graph *graphR) GetMetadataMap(mapName string) idxmap.NamedMapping { + metadataMap, has := graph.mappings[mapName] + if !has { + return nil + } + return metadataMap +} + +// GetNode returns node with the given key or nil if the key is unused. +func (graph *graphR) GetNode(key string) Node { + node, has := graph.nodes[key] + if !has { + return nil + } + return node.nodeR +} + +// GetNodes returns a set of nodes matching the key selector (can be nil) +// and every provided flag selector. +func (graph *graphR) GetNodes(keySelector KeySelector, flagSelectors ...FlagSelector) (nodes []Node) { + for key, node := range graph.nodes { + if keySelector != nil && !keySelector(key) { + continue + } + selected := true + for _, flagSelector := range flagSelectors { + for _, flag := range flagSelector.flags { + hasFlag := false + for _, nodeFlag := range node.flags { + if nodeFlag.GetName() == flag.GetName() && + (flag.GetValue() == "" || (nodeFlag.GetValue() == flag.GetValue())) { + hasFlag = true + break + } + } + if hasFlag != flagSelector.with { + selected = false + break + } + } + if !selected { + break + } + } + if !selected { + continue + } + nodes = append(nodes, node.nodeR) + } + return nodes +} + +// GetNodeTimeline returns timeline of all node revisions, ordered from +// the oldest to the newest. +func (graph *graphR) GetNodeTimeline(key string) []*RecordedNode { + timeline, has := graph.timeline[key] + if !has { + return nil + } + return timeline +} + +// GetFlagStats returns stats for a given flag. +func (graph *graphR) GetFlagStats(flagName string, selector KeySelector) FlagStats { + stats := FlagStats{PerValueCount: make(map[string]uint)} + + for key, timeline := range graph.timeline { + if selector != nil && !selector(key) { + continue + } + for _, record := range timeline { + if record.TargetUpdateOnly { + continue + } + if flag := record.Flags.GetFlag(flagName); flag != nil { + flagValue := flag.GetValue() + stats.TotalCount++ + if _, hasValue := stats.PerValueCount[flagValue]; !hasValue { + stats.PerValueCount[flagValue] = 0 + } + stats.PerValueCount[flagValue]++ + } + } + } + + return stats +} + +// GetSnapshot returns the snapshot of the graph at a given time. +func (graph *graphR) GetSnapshot(time time.Time) (nodes []*RecordedNode) { + for _, timeline := range graph.timeline { + for _, record := range timeline { + if record.Since.Before(time) && + (record.Until.IsZero() || record.Until.After(time)) { + nodes = append(nodes, record) + break + } + } + } + return nodes +} + +// GetKeys returns sorted keys. +func (graph *graphR) GetKeys() []string { + var keys []string + for key := range graph.nodes { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + return keys +} + +// Dump returns a human-readable string representation of the current graph +// content for debugging purposes. +func (graph *graphR) Dump() string { + // order nodes by keys + var keys []string + for key := range graph.nodes { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + var buf strings.Builder + graphInfo := fmt.Sprintf("%d nodes", len(keys)) + buf.WriteString("+======================================================================================================================+\n") + buf.WriteString(fmt.Sprintf("| GRAPH DUMP %105s |\n", graphInfo)) + buf.WriteString("+======================================================================================================================+\n") + + for i, key := range keys { + node := graph.nodes[key] + + buf.WriteString(fmt.Sprintf("| Key: %111q |\n", key)) + if label := node.GetLabel(); label != key { + buf.WriteString(fmt.Sprintf("| Label: %109s |\n", label)) + } + buf.WriteString(fmt.Sprintf("| Value: %109s |\n", utils.ProtoToString(node.GetValue()))) + buf.WriteString(fmt.Sprintf("| Flags: %109v |\n", prettyPrintFlags(node.flags))) + if len(node.targets) > 0 { + buf.WriteString(fmt.Sprintf("| Targets: %107v |\n", prettyPrintTargets(node.targets))) + } + if len(node.sources) > 0 { + buf.WriteString(fmt.Sprintf("| Sources: %107v |\n", prettyPrintSources(node.sources))) + } + if metadata := graph.getMetadataFields(node); len(metadata) > 0 { + buf.WriteString(fmt.Sprintf("| Metadata: %106v |\n", metadata)) + } + if i+1 != len(keys) { + buf.WriteString("+----------------------------------------------------------------------------------------------------------------------+\n") + } + } + buf.WriteString("+----------------------------------------------------------------------------------------------------------------------+\n") + + return buf.String() +} + +// Release releases the graph handle (both Read() & Write() should end with +// release). +func (graph *graphR) Release() { + graph.parent.rwLock.RUnlock() +} + +// copyNodesOnly returns a deep-copy of the graph, excluding the timelines +// and the map with mappings. +func (graph *graphR) copyNodesOnly() *graphR { + graphCopy := &graphR{ + parent: graph.parent, + nodes: make(map[string]*node), + } + for key, node := range graph.nodes { + nodeCopy := node.copy() + nodeCopy.graph = graphCopy + graphCopy.nodes[key] = newNode(nodeCopy) + } + return graphCopy +} + +// recordNode builds a record for the node to be added into the timeline. +func (graph *graphR) recordNode(node *node, targetUpdateOnly bool) *RecordedNode { + record := &RecordedNode{ + Since: time.Now(), + Key: node.key, + Label: node.label, + Value: utils.RecordProtoMessage(node.value), + Flags: RecordedFlags{Flags: node.flags}, + MetadataFields: graph.getMetadataFields(node), // returned already copied + Targets: node.targets, // no need to copy, never changed in graphR + TargetUpdateOnly: targetUpdateOnly, + } + return record +} + +// getMetadataFields returns secondary fields from metadata attached to the given node. +func (graph *graphR) getMetadataFields(node *node) map[string][]string { + writeCopy := graph.parent.graph != graph + if !writeCopy && node.metadataAdded { + mapping := graph.mappings[node.metadataMap] + return mapping.ListFields(node.label) + } + return nil +} + +// prettyPrintFlags returns nicely formatted string representation of the given list of flags. +func prettyPrintFlags(flags []Flag) string { + var str string + for idx, flag := range flags { + if flag.GetValue() == "" { + str += flag.GetName() + } else { + str += fmt.Sprintf("%s:<%s>", flag.GetName(), flag.GetValue()) + } + if idx < len(flags)-1 { + str += printDelimiter + } + } + return str +} + +// prettyPrintTargets returns nicely formatted relation targets. +func prettyPrintTargets(targets TargetsByRelation) string { + if len(targets) == 0 { + return "" + } + var str string + idx := 0 + for _, relation := range targets { + str += fmt.Sprintf("[%s]{%s}", relation.Relation, prettyPrintEdges(relation.Targets)) + if idx < len(targets)-1 { + str += printDelimiter + } + idx++ + } + return str +} + +// prettyPrintSources returns nicely formatted relation sources. +func prettyPrintSources(sources []*relationSources) string { + if len(sources) == 0 { + return "" + } + var str string + idx := 0 + for _, relSources := range sources { + str += fmt.Sprintf("[%s]%s", relSources.relation, relSources.sources.String()) + if idx < len(sources)-1 { + str += printDelimiter + } + idx++ + } + return str +} + +// prettyPrintEdges returns nicely formatted node edges. +func prettyPrintEdges(edges TargetsByLabel) string { + var str string + idx := 0 + for _, edge := range edges { + if edge.MatchingKeys.Length() == 1 && edge.MatchingKeys.Has(edge.Label) { + // special case: there 1:1 between label and the key + str += edge.Label + } else { + str += edge.Label + " -> " + edge.MatchingKeys.String() + } + if idx < len(edges)-1 { + str += printDelimiter + } + idx++ + } + return str +} diff --git a/plugins/kvscheduler/internal/graph/graph_test.go b/plugins/kvscheduler/internal/graph/graph_test.go new file mode 100644 index 0000000000..a9bb867881 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/graph_test.go @@ -0,0 +1,836 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + . "github.com/onsi/gomega" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +const ( + minutesInOneDay = uint32(1440) + minutesInOneHour = uint32(60) +) + +func TestEmptyGraph(t *testing.T) { + RegisterTestingT(t) + + graph := NewGraph(true, minutesInOneDay, minutesInOneHour) + Expect(graph).ToNot(BeNil()) + + graphR := graph.Read() + Expect(graphR).ToNot(BeNil()) + + Expect(graphR.GetNode(keyA1)).To(BeNil()) + Expect(graphR.GetNodeTimeline(keyA1)).To(BeEmpty()) + Expect(graphR.GetNodes(prefixASelector)).To(BeEmpty()) + Expect(graphR.GetMetadataMap(metadataMapA)).To(BeNil()) + Expect(graphR.GetSnapshot(time.Now())).To(BeEmpty()) + flagStats := graphR.GetFlagStats(ColorFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(0)) + Expect(flagStats.PerValueCount).To(BeEmpty()) + graphR.Release() +} + +func TestSingleNode(t *testing.T) { + RegisterTestingT(t) + + startTime := time.Now() + + graph := NewGraph(true, minutesInOneDay, minutesInOneHour) + graphW := graph.Write(true) + + graphW.RegisterMetadataMap(metadataMapA, NewNameToInteger(metadataMapA)) + + nodeW := graphW.SetNode(keyA1) + // new node, everything except the key is unset: + Expect(nodeW.GetKey()).To(BeEquivalentTo(keyA1)) + Expect(nodeW.GetValue()).To(BeNil()) + Expect(nodeW.GetTargets(relation1)).To(BeEmpty()) + Expect(nodeW.GetSources(relation1)).To(BeEmpty()) + Expect(nodeW.GetMetadata()).To(BeNil()) + Expect(nodeW.GetFlag(ColorFlagName)).To(BeNil()) + + // set attributes: + nodeW.SetLabel(value1Label) + nodeW.SetValue(value1) + nodeW.SetMetadata(&OnlyInteger{Integer: 1}) + nodeW.SetMetadataMap(metadataMapA) + nodeW.SetFlags(ColorFlag(Red), AbstractFlag()) + + // check attributes: + Expect(nodeW.GetLabel()).To(Equal(value1Label)) + Expect(nodeW.GetValue()).To(Equal(value1)) + Expect(nodeW.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(1)) + flag := nodeW.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag := flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Red)) + Expect(nodeW.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(nodeW.GetFlag(TemporaryFlagName)).To(BeNil()) + Expect(nodeW.GetTargets(relation1)).To(BeEmpty()) + Expect(nodeW.GetSources(relation1)).To(BeEmpty()) + + // not applied into the graph until saved + graphR := graph.Read() + Expect(graphR.GetNode(keyA1)).To(BeNil()) + Expect(graphR.GetMetadataMap(metadataMapA)).To(BeNil()) + graphR.Release() + + // save new node + graphW.Save() + graphW.Release() + + // check that the new node was saved correctly + graphR = graph.Read() + nodeR := graphR.GetNode(keyA1) + Expect(nodeR).ToNot(BeNil()) + Expect(nodeR.GetLabel()).To(Equal(value1Label)) + Expect(nodeR.GetValue()).To(Equal(value1)) + Expect(nodeR.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(1)) + flag = nodeR.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag = flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Red)) + Expect(nodeR.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(nodeR.GetFlag(TemporaryFlagName)).To(BeNil()) + Expect(nodeR.GetTargets(relation1)).To(BeEmpty()) + Expect(nodeR.GetSources(relation1)).To(BeEmpty()) + + // check metadata + metaMap := graphR.GetMetadataMap(metadataMapA) + Expect(metaMap).ToNot(BeNil()) + Expect(metaMap.ListAllNames()).To(Equal([]string{value1Label})) + intMap := metaMap.(NameToInteger) + metadata, exists := intMap.LookupByName(value1Label) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + label, metadata, exists := intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value1Label)) + + // check history + flagStats := graphR.GetFlagStats(ColorFlagName, prefixASelector) + Expect(flagStats.TotalCount).To(BeEquivalentTo(1)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{Red.String(): 1})) + timeline := graphR.GetNodeTimeline(keyA1) + Expect(timeline).To(HaveLen(1)) + record := timeline[0] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(BeEmpty()) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Red), AbstractFlag()}})) +} + +func TestMultipleNodes(t *testing.T) { + RegisterTestingT(t) + + startTime := time.Now() + graph := buildGraph(nil, true, true, selectNodesToBuild(1, 2, 3, 4)) + + // check graph content + graphR := graph.Read() + + // -> node1: + node1 := graphR.GetNode(keyA1) + Expect(node1).ToNot(BeNil()) + Expect(node1.GetLabel()).To(Equal(value1Label)) + Expect(node1.GetValue()).To(Equal(value1)) + Expect(node1.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(1)) + flag := node1.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag := flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Red)) + Expect(node1.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(node1.GetFlag(TemporaryFlagName)).To(BeNil()) + Expect(node1.GetTargets(relation1)).To(HaveLen(1)) + checkTargets(node1, relation1, "node2", keyA2) + checkSources(node1, relation1, keyB1) + Expect(node1.GetTargets(relation2)).To(HaveLen(1)) + checkTargets(node1, relation2, "prefixB", keyB1) + checkSources(node1, relation2, keyA3) + + // -> node2: + node2 := graphR.GetNode(keyA2) + Expect(node2).ToNot(BeNil()) + Expect(node2.GetLabel()).To(Equal(value2Label)) + Expect(node2.GetValue()).To(Equal(value2)) + Expect(node2.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(2)) + flag = node2.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag = flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Blue)) + Expect(node2.GetFlag(AbstractFlagName)).To(BeNil()) + Expect(node2.GetFlag(TemporaryFlagName)).To(BeNil()) + Expect(node2.GetTargets(relation1)).To(HaveLen(1)) + checkTargets(node2, relation1, "node3", keyA3) + checkSources(node2, relation1, keyA1, keyB1) + Expect(node2.GetTargets(relation2)).To(HaveLen(0)) + checkSources(node2, relation2, keyA3) + + // -> node3: + node3 := graphR.GetNode(keyA3) + Expect(node3).ToNot(BeNil()) + Expect(node3.GetLabel()).To(Equal(value3Label)) + Expect(node3.GetValue()).To(Equal(value3)) + Expect(node3.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(3)) + flag = node3.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag = flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Green)) + Expect(node3.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(node3.GetFlag(TemporaryFlagName)).ToNot(BeNil()) + Expect(node3.GetTargets(relation1)).To(BeEmpty()) + checkSources(node3, relation1, keyA2, keyB1) + Expect(node3.GetTargets(relation2)).To(HaveLen(2)) + checkTargets(node3, relation2, "node1+node2", keyA1, keyA2) + checkTargets(node3, relation2, "prefixB", keyB1) + checkSources(node3, relation2) + + // -> node4: + node4 := graphR.GetNode(keyB1) + Expect(node4).ToNot(BeNil()) + Expect(node4.GetLabel()).To(Equal(value4Label)) + Expect(node4.GetValue()).To(Equal(value4)) + Expect(node4.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(1)) + Expect(node4.GetFlag(ColorFlagName)).To(BeNil()) + Expect(node4.GetFlag(AbstractFlagName)).To(BeNil()) + Expect(node4.GetFlag(TemporaryFlagName)).ToNot(BeNil()) + Expect(node4.GetTargets(relation1)).To(HaveLen(1)) + checkTargets(node4, relation1, "prefixA", keyA1, keyA2, keyA3) + checkSources(node4, relation1) + Expect(node4.GetTargets(relation2)).To(HaveLen(1)) + checkTargets(node4, relation2, "non-existing-key") + checkSources(node4, relation2, keyA1, keyA3) + + // check metadata + + // -> metadata for prefixA: + metaMap := graphR.GetMetadataMap(metadataMapA) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value1Label, value2Label, value3Label) + intMap := metaMap.(NameToInteger) + label, metadata, exists := intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value1Label)) + label, metadata, exists = intMap.LookupByIndex(2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(2)) + Expect(label).To(Equal(value2Label)) + label, metadata, exists = intMap.LookupByIndex(3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(3)) + Expect(label).To(Equal(value3Label)) + + // -> metadata for prefixB: + metaMap = graphR.GetMetadataMap(metadataMapB) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value4Label) + intMap = metaMap.(NameToInteger) + label, metadata, exists = intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value4Label)) + + // check history + + // -> flags: + flagStats := graphR.GetFlagStats(ColorFlagName, prefixASelector) + Expect(flagStats.TotalCount).To(BeEquivalentTo(3)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{ + Red.String(): 1, + Blue.String(): 1, + Green.String(): 1, + })) + flagStats = graphR.GetFlagStats(ColorFlagName, prefixBSelector) + Expect(flagStats.TotalCount).To(BeEquivalentTo(0)) + Expect(flagStats.PerValueCount).To(BeEmpty()) + flagStats = graphR.GetFlagStats(AbstractFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(2)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 2})) + flagStats = graphR.GetFlagStats(TemporaryFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(2)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 2})) + + // -> timeline node1: + timeline := graphR.GetNodeTimeline(keyA1) + Expect(timeline).To(HaveLen(1)) + record := timeline[0] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2", keyA2) + checkRecordedTargets(record.Targets, relation2, 1, "prefixB", keyB1) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Red), AbstractFlag()}})) + + // -> timeline node2: + timeline = graphR.GetNodeTimeline(keyA2) + Expect(timeline).To(HaveLen(1)) + record = timeline[0] + Expect(record.Key).To(Equal(keyA2)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value2Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value2))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(1)) + checkRecordedTargets(record.Targets, relation1, 1, "node3", keyA3) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"2"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Blue)}})) + + // -> timeline node3: + timeline = graphR.GetNodeTimeline(keyA3) + Expect(timeline).To(HaveLen(1)) + record = timeline[0] + Expect(record.Key).To(Equal(keyA3)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value3Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value3))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(1)) + checkRecordedTargets(record.Targets, relation2, 2, "node1+node2", keyA1, keyA2) + checkRecordedTargets(record.Targets, relation2, 2, "prefixB", keyB1) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"3"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Green), AbstractFlag(), TemporaryFlag()}})) + + // -> timeline node4: + timeline = graphR.GetNodeTimeline(keyB1) + Expect(timeline).To(HaveLen(1)) + record = timeline[0] + Expect(record.Key).To(Equal(keyB1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value4Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value4))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "prefixA", keyA1, keyA2, keyA3) + checkRecordedTargets(record.Targets, relation2, 1, "non-existing-key") + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{TemporaryFlag()}})) + + // check snapshot: + // -> before the changes + records := graphR.GetSnapshot(startTime) + checkRecordedNodes(records) + // -> after the changes + records = graphR.GetSnapshot(time.Now()) + checkRecordedNodes(records, keyA1, keyA2, keyA3, keyB1) + + graphR.Release() +} + +func TestSelectors(t *testing.T) { + RegisterTestingT(t) + + graph := buildGraph(nil, true, true, selectNodesToBuild(1, 2, 3, 4)) + graphR := graph.Read() + + // test key selector + checkNodes(graphR.GetNodes(prefixASelector), keyA1, keyA2, keyA3) + checkNodes(graphR.GetNodes(prefixBSelector), keyB1) + checkNodes(graphR.GetNodes(keySelector(keyA1, keyB1)), keyA1, keyB1) + checkNodes(graphR.GetNodes(func(key string) bool { return false })) + + // test flag selectors + checkNodes(graphR.GetNodes(nil, WithFlags(AnyColorFlag())), keyA1, keyA2, keyA3) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Red))), keyA1) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Blue))), keyA2) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Green))), keyA3) + checkNodes(graphR.GetNodes(nil, WithFlags(AnyColorFlag()), WithoutFlags(TemporaryFlag())), keyA1, keyA2) + checkNodes(graphR.GetNodes(nil, WithoutFlags(TemporaryFlag())), keyA1, keyA2) + checkNodes(graphR.GetNodes(nil, WithoutFlags(AbstractFlag())), keyA2, keyB1) + + // test combination of key selector and flag selector + checkNodes(graphR.GetNodes(prefixASelector, WithoutFlags(AbstractFlag())), keyA2) + checkNodes(graphR.GetNodes(prefixBSelector, WithoutFlags(TemporaryFlag()))) + checkNodes(graphR.GetNodes(keySelector(keyA1, keyB1), WithFlags(AnyColorFlag())), keyA1) + + // change flags and re-test flag selectors + graphR.Release() + graphW := graph.Write(false) + graphW.SetNode(keyA1).SetFlags(ColorFlag(Green), TemporaryFlag()) + graphW.SetNode(keyA1).DelFlags(AbstractFlagName) + graphW.SetNode(keyA3).DelFlags(ColorFlagName) + graphW.Save() + graphW.Release() + + graphR = graph.Read() + checkNodes(graphR.GetNodes(nil, WithFlags(AnyColorFlag())), keyA1, keyA2) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Red)))) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Blue))), keyA2) + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Green))), keyA1) + checkNodes(graphR.GetNodes(nil, WithFlags(AnyColorFlag()), WithoutFlags(TemporaryFlag())), keyA2) + checkNodes(graphR.GetNodes(nil, WithoutFlags(TemporaryFlag())), keyA2) + checkNodes(graphR.GetNodes(nil, WithoutFlags(AbstractFlag())), keyA1, keyA2, keyB1) + graphR.Release() +} + +func TestNodeRemoval(t *testing.T) { + RegisterTestingT(t) + + startTime := time.Now() + graph := buildGraph(nil, true, true, selectNodesToBuild(1, 2, 3, 4)) + + // delete node2 & node 4 + delTime := time.Now() + graphW := graph.Write(true) + graphW.DeleteNode(keyA2) + graphW.DeleteNode(keyB1) + graphW.Save() + graphW.Release() + + // check graph content + graphR := graph.Read() + + // -> node1: + node1 := graphR.GetNode(keyA1) + Expect(node1).ToNot(BeNil()) + Expect(node1.GetLabel()).To(Equal(value1Label)) + Expect(node1.GetValue()).To(Equal(value1)) + Expect(node1.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(1)) + flag := node1.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag := flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Red)) + Expect(node1.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(node1.GetFlag(TemporaryFlagName)).To(BeNil()) + Expect(node1.GetTargets(relation1)).To(HaveLen(1)) + checkTargets(node1, relation1, "node2") + checkSources(node1, relation1) + Expect(node1.GetTargets(relation2)).To(HaveLen(1)) + checkTargets(node1, relation2, "prefixB") + checkSources(node1, relation2, keyA3) + + // -> node2: + node2 := graphR.GetNode(keyA2) + Expect(node2).To(BeNil()) + + // -> node3: + node3 := graphR.GetNode(keyA3) + Expect(node3).ToNot(BeNil()) + Expect(node3.GetLabel()).To(Equal(value3Label)) + Expect(node3.GetValue()).To(Equal(value3)) + Expect(node3.GetMetadata().(MetaWithInteger).GetInteger()).To(Equal(3)) + flag = node3.GetFlag(ColorFlagName) + Expect(flag).ToNot(BeNil()) + colorFlag = flag.(*ColorFlagImpl) + Expect(colorFlag.Color).To(Equal(Green)) + Expect(node3.GetFlag(AbstractFlagName)).ToNot(BeNil()) + Expect(node3.GetFlag(TemporaryFlagName)).ToNot(BeNil()) + Expect(node3.GetTargets(relation1)).To(BeEmpty()) + checkSources(node3, relation1) + Expect(node3.GetTargets(relation2)).To(HaveLen(2)) + checkTargets(node3, relation2, "node1+node2", keyA1) + checkTargets(node3, relation2, "prefixB") + checkSources(node3, relation2) + + // -> node4: + node4 := graphR.GetNode(keyB1) + Expect(node4).To(BeNil()) + + // check metadata + + // -> metadata for prefixA: + metaMap := graphR.GetMetadataMap(metadataMapA) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value1Label, value3Label) + intMap := metaMap.(NameToInteger) + label, metadata, exists := intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value1Label)) + label, metadata, exists = intMap.LookupByIndex(2) + Expect(exists).To(BeFalse()) + label, metadata, exists = intMap.LookupByIndex(3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(3)) + Expect(label).To(Equal(value3Label)) + + // -> metadata for prefixB: + metaMap = graphR.GetMetadataMap(metadataMapB) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap) + intMap = metaMap.(NameToInteger) + label, metadata, exists = intMap.LookupByIndex(1) + Expect(exists).To(BeFalse()) + + // check history + + // -> flags: + flagStats := graphR.GetFlagStats(ColorFlagName, prefixASelector) + Expect(flagStats.TotalCount).To(BeEquivalentTo(3)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{ + Red.String(): 1, + Blue.String(): 1, + Green.String(): 1, + })) + flagStats = graphR.GetFlagStats(ColorFlagName, prefixBSelector) + Expect(flagStats.TotalCount).To(BeEquivalentTo(0)) + Expect(flagStats.PerValueCount).To(BeEmpty()) + flagStats = graphR.GetFlagStats(AbstractFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(2)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 2})) + flagStats = graphR.GetFlagStats(TemporaryFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(2)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 2})) + + // -> timeline node1: + timeline := graphR.GetNodeTimeline(keyA1) + Expect(timeline).To(HaveLen(2)) + // -> prev record + record := timeline[0] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(delTime)).To(BeTrue()) + Expect(record.Until.After(delTime)).To(BeTrue()) + Expect(record.Until.Before(time.Now())).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2", keyA2) + checkRecordedTargets(record.Targets, relation2, 1, "prefixB", keyB1) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Red), AbstractFlag()}})) + // -> new record + record = timeline[1] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(delTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2") + checkRecordedTargets(record.Targets, relation2, 1, "prefixB") + Expect(record.TargetUpdateOnly).To(BeTrue()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Red), AbstractFlag()}})) + + // -> timeline node2: + timeline = graphR.GetNodeTimeline(keyA2) + Expect(timeline).To(HaveLen(1)) + // -> old record + record = timeline[0] + Expect(record.Key).To(Equal(keyA2)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(delTime)).To(BeTrue()) + Expect(record.Until.After(delTime)).To(BeTrue()) + Expect(record.Until.Before(time.Now())).To(BeTrue()) + Expect(record.Label).To(Equal(value2Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value2))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(1)) + checkRecordedTargets(record.Targets, relation1, 1, "node3", keyA3) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"2"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Blue)}})) + + // -> timeline node3: + timeline = graphR.GetNodeTimeline(keyA3) + Expect(timeline).To(HaveLen(2)) + // -> old record + record = timeline[0] + Expect(record.Key).To(Equal(keyA3)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(delTime)).To(BeTrue()) + Expect(record.Until.After(delTime)).To(BeTrue()) + Expect(record.Until.Before(time.Now())).To(BeTrue()) + Expect(record.Label).To(Equal(value3Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value3))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(1)) + checkRecordedTargets(record.Targets, relation2, 2, "node1+node2", keyA1, keyA2) + checkRecordedTargets(record.Targets, relation2, 2, "prefixB", keyB1) + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"3"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Green), AbstractFlag(), TemporaryFlag()}})) + // -> new record + record = timeline[1] + Expect(record.Key).To(Equal(keyA3)) + Expect(record.Since.After(delTime)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value3Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value3))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(1)) + checkRecordedTargets(record.Targets, relation2, 2, "node1+node2", keyA1) + checkRecordedTargets(record.Targets, relation2, 2, "prefixB") + Expect(record.TargetUpdateOnly).To(BeTrue()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"3"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Green), AbstractFlag(), TemporaryFlag()}})) + + // -> timeline node4: + // -> old record + timeline = graphR.GetNodeTimeline(keyB1) + Expect(timeline).To(HaveLen(1)) + record = timeline[0] + Expect(record.Key).To(Equal(keyB1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(delTime)).To(BeTrue()) + Expect(record.Until.After(delTime)).To(BeTrue()) + Expect(record.Until.Before(time.Now())).To(BeTrue()) + Expect(record.Label).To(Equal(value4Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value4))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "prefixA", keyA1, keyA2, keyA3) + checkRecordedTargets(record.Targets, relation2, 1, "non-existing-key") + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{TemporaryFlag()}})) + + // check snapshot: + records := graphR.GetSnapshot(time.Now()) + checkRecordedNodes(records, keyA1, keyA3) + + graphR.Release() +} + +func TestNodeTimeline(t *testing.T) { + RegisterTestingT(t) + + // add node1 + startTime := time.Now() + graph := buildGraph(nil, true, true, selectNodesToBuild(1)) + + // delete node1 + delTime := time.Now() + graphW := graph.Write(true) + graphW.DeleteNode(keyA1) + graphW.Save() + graphW.Release() + + // re-create node1, but without recording + buildGraph(graph, false, false, selectNodesToBuild(1)) + + // change flags + changeTime1 := time.Now() + graphW = graph.Write(true) + node := graphW.SetNode(keyA1) + node.SetFlags(ColorFlag(Blue)) + graphW.Save() + graphW.Release() + + // change metadata + flags + changeTime2 := time.Now() + graphW = graph.Write(true) + node = graphW.SetNode(keyA1) + node.SetFlags(TemporaryFlag()) + node.DelFlags(AbstractFlagName) + node.SetMetadata(&OnlyInteger{Integer: 2}) + graphW.Save() + graphW.Release() + + // check history + graphR := graph.Read() + + // -> flags: + flagStats := graphR.GetFlagStats(ColorFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(3)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{ + Red.String(): 1, + Blue.String(): 2, + })) + flagStats = graphR.GetFlagStats(AbstractFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(2)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 2})) + flagStats = graphR.GetFlagStats(TemporaryFlagName, nil) + Expect(flagStats.TotalCount).To(BeEquivalentTo(1)) + Expect(flagStats.PerValueCount).To(BeEquivalentTo(map[string]uint{"": 1})) + + // -> timeline node1: + timeline := graphR.GetNodeTimeline(keyA1) + Expect(timeline).To(HaveLen(3)) + // -> first record + record := timeline[0] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(startTime)).To(BeTrue()) + Expect(record.Since.Before(delTime)).To(BeTrue()) + Expect(record.Until.After(delTime)).To(BeTrue()) + Expect(record.Until.Before(changeTime1)).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2") + checkRecordedTargets(record.Targets, relation2, 1, "prefixB") + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Red), AbstractFlag()}})) + // -> second record + record = timeline[1] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(changeTime1)).To(BeTrue()) + Expect(record.Since.Before(changeTime2)).To(BeTrue()) + Expect(record.Until.After(changeTime2)).To(BeTrue()) + Expect(record.Until.Before(time.Now())).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2") + checkRecordedTargets(record.Targets, relation2, 1, "prefixB") + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"1"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{AbstractFlag(), ColorFlag(Blue)}})) + // -> third record + record = timeline[2] + Expect(record.Key).To(Equal(keyA1)) + Expect(record.Since.After(changeTime2)).To(BeTrue()) + Expect(record.Since.Before(time.Now())).To(BeTrue()) + Expect(record.Until.IsZero()).To(BeTrue()) + Expect(record.Label).To(Equal(value1Label)) + Expect(proto.Equal(record.Value, RecordProtoMessage(value1))).To(BeTrue()) + Expect(record.Targets).To(HaveLen(2)) + checkRecordedTargets(record.Targets, relation1, 1, "node2") + checkRecordedTargets(record.Targets, relation2, 1, "prefixB") + Expect(record.TargetUpdateOnly).To(BeFalse()) + Expect(record.MetadataFields).To(BeEquivalentTo(map[string][]string{IntegerKey: {"2"}})) + Expect(record.Flags).To(BeEquivalentTo(RecordedFlags{[]Flag{ColorFlag(Blue), TemporaryFlag()}})) + + graphR.Release() +} + +func TestNodeMetadata(t *testing.T) { + RegisterTestingT(t) + + // add node1-node3 + graph := buildGraph(nil, true, true, selectNodesToBuild(1, 2, 3)) + + // check metadata + graphR := graph.Read() + + // -> metadata for prefixA: + metaMap := graphR.GetMetadataMap(metadataMapA) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value1Label, value2Label, value3Label) + intMap := metaMap.(NameToInteger) + label, metadata, exists := intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value1Label)) + label, metadata, exists = intMap.LookupByIndex(2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(2)) + Expect(label).To(Equal(value2Label)) + label, metadata, exists = intMap.LookupByIndex(3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(3)) + Expect(label).To(Equal(value3Label)) + + // -> metadata for prefixB: + metaMap = graphR.GetMetadataMap(metadataMapB) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap) + intMap = metaMap.(NameToInteger) + label, metadata, exists = intMap.LookupByIndex(1) + Expect(exists).To(BeFalse()) + graphR.Release() + + // add node4, remove node1 & change metadata for node2 + buildGraph(graph, true, false, selectNodesToBuild(4)) + graphW := graph.Write(true) + graphW.DeleteNode(keyA1) + graphW.SetNode(keyA2).SetMetadata(&OnlyInteger{Integer: 4}) + graphW.Save() + graphW.Release() + + // check metadata after the changes + graphR = graph.Read() + + // -> metadata for prefixA: + metaMap = graphR.GetMetadataMap(metadataMapA) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value2Label, value3Label) + intMap = metaMap.(NameToInteger) + label, metadata, exists = intMap.LookupByIndex(1) + Expect(exists).To(BeFalse()) + label, metadata, exists = intMap.LookupByIndex(2) + Expect(exists).To(BeFalse()) + label, metadata, exists = intMap.LookupByIndex(4) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(4)) + Expect(label).To(Equal(value2Label)) + label, metadata, exists = intMap.LookupByIndex(3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(3)) + Expect(label).To(Equal(value3Label)) + + // -> metadata for prefixB: + metaMap = graphR.GetMetadataMap(metadataMapB) + Expect(metaMap).ToNot(BeNil()) + checkMetadataValues(metaMap, value4Label) + intMap = metaMap.(NameToInteger) + label, metadata, exists = intMap.LookupByIndex(1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(Equal(1)) + Expect(label).To(Equal(value4Label)) + graphR.Release() +} + +func TestReuseNodeAfterSave(t *testing.T) { + RegisterTestingT(t) + + graph := NewGraph(true, minutesInOneDay, minutesInOneHour) + graphW := graph.Write(true) + + // add new node + nodeW := graphW.SetNode(keyA1) + nodeW.SetValue(value1) + nodeW.SetFlags(ColorFlag(Red)) + + // save new node + graphW.Save() + + // keep using the same node handle + nodeW.SetFlags(AbstractFlag()) + + // save changes + graphW.Save() + + // get new handle + nodeW = graphW.SetNode(keyA1) + nodeW.SetFlags(TemporaryFlag()) + + // save changes + graphW.Save() + graphW.Release() + + // check that all 3 flags are applied + graphR := graph.Read() + checkNodes(graphR.GetNodes(nil, WithFlags(ColorFlag(Red), AbstractFlag(), TemporaryFlag())), keyA1) + graphR.Release() +} diff --git a/plugins/kvscheduler/internal/graph/graph_write.go b/plugins/kvscheduler/internal/graph/graph_write.go new file mode 100644 index 0000000000..5ade445380 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/graph_write.go @@ -0,0 +1,246 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "reflect" + "time" + + "github.com/ligato/cn-infra/idxmap" +) + +// graphRW implements RWAccess. +type graphRW struct { + *graphR + record bool + deleted []string + newRevs map[string]bool // key -> data-updated? +} + +// newGraphRW creates a new instance of grapRW, which extends an existing +// graph with write-operations. +func newGraphRW(graph *graphR, recordChanges bool) *graphRW { + graphRCopy := graph.copyNodesOnly() + return &graphRW{ + graphR: graphRCopy, + record: recordChanges, + newRevs: make(map[string]bool), + } +} + +// RegisterMetadataMap registers new metadata map for value-label->metadata +// associations of selected node. +func (graph *graphRW) RegisterMetadataMap(mapName string, mapping idxmap.NamedMappingRW) { + if graph.mappings == nil { + graph.mappings = make(map[string]idxmap.NamedMappingRW) + } + graph.mappings[mapName] = mapping +} + +// SetNode creates new node or returns read-write handle to an existing node. +// The changes are propagated to the graph only after Save() is called. +// If is true, the changes will recorded as a new revision of the +// node for the history. +func (graph *graphRW) SetNode(key string) NodeRW { + node, has := graph.nodes[key] + if has { + return node + } + node = newNode(nil) + node.graph = graph.graphR + node.key = key + for _, otherNode := range graph.nodes { + otherNode.checkPotentialTarget(node) + } + graph.nodes[key] = node + + return node +} + +// DeleteNode deletes node with the given key. +// Returns true if the node really existed before the operation. +func (graph *graphRW) DeleteNode(key string) bool { + node, has := graph.nodes[key] + if !has { + return false + } + + // remove from sources of current targets + node.removeThisFromSources() + + // delete from graph + delete(graph.nodes, key) + + // remove from targets of other nodes + for _, otherNode := range graph.nodes { + otherNode.removeFromTargets(key) + } + graph.deleted = append(graph.deleted, key) + return true +} + +// Save propagates all changes to the graph. +func (graph *graphRW) Save() { + graph.parent.rwLock.Lock() + defer graph.parent.rwLock.Unlock() + + destGraph := graph.parent.graph + + // propagate newly registered mappings + for mapName, mapping := range graph.mappings { + if _, alreadyReg := destGraph.mappings[mapName]; !alreadyReg { + destGraph.mappings[mapName] = mapping + } + } + + // apply deleted nodes + for _, key := range graph.deleted { + if node, has := destGraph.nodes[key]; has { + // remove metadata + if node.metadataAdded { + if mapping, hasMapping := destGraph.mappings[node.metadataMap]; hasMapping { + mapping.Delete(node.label) + } + } + // remove node from graph + delete(destGraph.nodes, key) + } + graph.newRevs[key] = true + } + graph.deleted = []string{} + + // apply new/changes nodes + for key, node := range graph.nodes { + if !node.dataUpdated && !node.targetsUpdated && !node.sourcesUpdated { + continue + } + + // update metadata + if !node.metaInSync { + // update metadata map + if mapping, hasMapping := destGraph.mappings[node.metadataMap]; hasMapping { + if node.metadataAdded { + if node.metadata == nil { + mapping.Delete(node.label) + node.metadataAdded = false + } else { + prevMeta, _ := mapping.GetValue(node.label) + if !reflect.DeepEqual(prevMeta, node.metadata) { + mapping.Update(node.label, node.metadata) + } + } + } else if node.metadata != nil { + mapping.Put(node.label, node.metadata) + node.metadataAdded = true + } + } + } + + // mark node for recording during RW-handle release + // (ignore if only sources have been updated) + if node.dataUpdated || node.targetsUpdated { + if _, newRev := graph.newRevs[key]; !newRev { + graph.newRevs[key] = false + } + graph.newRevs[key] = graph.newRevs[key] || node.dataUpdated + } + + // copy changed node to the actual graph + nodeCopy := node.copy() + nodeCopy.graph = destGraph + destGraph.nodes[key] = newNode(nodeCopy) + + // use copy-on-write targets+sources for the write-handle + cowTargets := nodeCopy.targets + nodeCopy.targets = node.targets + node.targets = cowTargets + cowSources := nodeCopy.sources + nodeCopy.sources = node.sources + node.sources = cowSources + + // working copy is now in-sync + node.dataUpdated = false + node.targetsUpdated = false + node.sourcesUpdated = false + node.metaInSync = true + } +} + +// Release records changes if requested. +func (graph *graphRW) Release() { + if graph.record && graph.parent.recordOldRevs { + graph.parent.rwLock.Lock() + defer graph.parent.rwLock.Unlock() + + destGraph := graph.parent.graph + for key, dataUpdated := range graph.newRevs { + node, exists := destGraph.nodes[key] + if _, hasTimeline := destGraph.timeline[key]; !hasTimeline { + if !exists { + // deleted, but never recorded => skip + continue + } + destGraph.timeline[key] = []*RecordedNode{} + } + records := destGraph.timeline[key] + if len(records) > 0 { + lastRecord := records[len(records)-1] + if lastRecord.Until.IsZero() { + lastRecord.Until = time.Now() + } + } + if exists { + destGraph.timeline[key] = append(records, + destGraph.recordNode(node, !dataUpdated)) + } + } + + // remove past revisions from the log which are too old to keep + now := time.Now() + sinceLastTrimming := now.Sub(graph.parent.lastRevTrimming) + if sinceLastTrimming >= oldRevsTrimmingPeriod { + for key, records := range destGraph.timeline { + var i, j int // i = first after init period, j = first after init period to keep + for i = 0; i < len(records); i++ { + sinceStart := records[i].Since.Sub(graph.parent.startTime) + if sinceStart > graph.parent.permanentInitPeriod { + break + } + } + for j = i; j < len(records); j++ { + if records[j].Until.IsZero() { + break + } + elapsed := now.Sub(records[j].Until) + if elapsed <= graph.parent.recordAgeLimit { + break + } + } + if j > i { + copy(records[i:], records[j:]) + newLen := len(records) - (j - i) + for k := newLen; k < len(records); k++ { + records[k] = nil + } + destGraph.timeline[key] = records[:newLen] + } + if len(destGraph.timeline[key]) == 0 { + delete(destGraph.timeline, key) + } + } + graph.parent.lastRevTrimming = now + } + } +} diff --git a/plugins/kvscheduler/internal/graph/node_read.go b/plugins/kvscheduler/internal/graph/node_read.go new file mode 100644 index 0000000000..3337352f2c --- /dev/null +++ b/plugins/kvscheduler/internal/graph/node_read.go @@ -0,0 +1,186 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// nodeR implements Node. +type nodeR struct { + graph *graphR + + key string + label string + value proto.Message + flags []Flag + metadata interface{} + metadataAdded bool + metadataMap string + targetsDef []RelationTargetDef + targets TargetsByRelation + sources sourcesByRelation +} + +// relationSources groups all sources for a single relation. +type relationSources struct { + relation string + sources utils.KeySet +} + +// sourcesByRelation is a slice of all sources, grouped by relations. +type sourcesByRelation []*relationSources + +// String returns human-readable string representation of sourcesByRelation. +func (s sourcesByRelation) String() string { + str := "{" + for idx, sources := range s { + if idx > 0 { + str += ", " + } + str += fmt.Sprintf("%s->%s", sources.relation, sources.sources.String()) + } + str += "}" + return str +} + +// getSourcesForRelation returns sources (keys) for the given relation. +func (s sourcesByRelation) getSourcesForRelation(relation string) *relationSources { + for _, relSources := range s { + if relSources.relation == relation { + return relSources + } + } + return nil +} + +// newNodeR creates a new instance of nodeR. +func newNodeR() *nodeR { + return &nodeR{} +} + +// GetKey returns the key associated with the node. +func (node *nodeR) GetKey() string { + return node.key +} + +// GetLabel returns the label associated with this node. +func (node *nodeR) GetLabel() string { + return node.label +} + +// GetKey returns the value associated with the node. +func (node *nodeR) GetValue() proto.Message { + return node.value +} + +// GetFlag returns reference to the given flag or nil if the node doesn't have +// this flag associated. +func (node *nodeR) GetFlag(name string) Flag { + for _, flag := range node.flags { + if flag.GetName() == name { + return flag + } + } + return nil +} + +// GetMetadata returns the value metadata associated with the node. +func (node *nodeR) GetMetadata() interface{} { + return node.metadata +} + +// GetTargets returns a set of nodes, indexed by relation labels, that the +// edges of the given relation points to. +func (node *nodeR) GetTargets(relation string) (runtimeTargets RuntimeTargetsByLabel) { + relTargets := node.targets.GetTargetsForRelation(relation) + if relTargets == nil { + return nil + } + for _, targets := range relTargets.Targets { + var nodes []Node + for _, key := range targets.MatchingKeys.Iterate() { + nodes = append(nodes, node.graph.nodes[key]) + } + runtimeTargets = append(runtimeTargets, &RuntimeTargets{ + Label: targets.Label, + Nodes: nodes, + }) + } + return runtimeTargets +} + + +// GetSources returns a set of nodes with edges of the given relation +// pointing to this node. +func (node *nodeR) GetSources(relation string) (nodes []Node) { + relSources := node.sources.getSourcesForRelation(relation) + if relSources == nil { + return nil + } + + for _, key := range relSources.sources.Iterate() { + nodes = append(nodes, node.graph.nodes[key]) + } + return nodes +} + +// copy returns a deep copy of the node. +func (node *nodeR) copy() *nodeR { + nodeCopy := newNodeR() + nodeCopy.key = node.key + nodeCopy.label = node.label + nodeCopy.value = node.value + nodeCopy.metadata = node.metadata + nodeCopy.metadataAdded = node.metadataAdded + nodeCopy.metadataMap = node.metadataMap + + // shallow-copy flags (immutable) + nodeCopy.flags = node.flags + + // shallow-copy target definitions (immutable) + nodeCopy.targetsDef = node.targetsDef + + // copy targets + nodeCopy.targets = make(TargetsByRelation, 0, len(node.targets)) + for _, relTargets := range node.targets { + targets := make(TargetsByLabel, 0, len(relTargets.Targets)) + for _, target := range relTargets.Targets { + targets = append(targets, &Targets{ + Label: target.Label, + ExpectedKey: target.ExpectedKey, + MatchingKeys: target.MatchingKeys.CopyOnWrite(), + }) + } + nodeCopy.targets = append(nodeCopy.targets, &RelationTargets{ + Relation: relTargets.Relation, + Targets: targets, + }) + } + + // copy sources + nodeCopy.sources = make(sourcesByRelation, 0, len(node.sources)) + for _, relSources := range node.sources { + nodeCopy.sources = append(nodeCopy.sources, &relationSources{ + relation: relSources.relation, + sources: relSources.sources.CopyOnWrite(), + }) + } + return nodeCopy +} diff --git a/plugins/kvscheduler/internal/graph/node_write.go b/plugins/kvscheduler/internal/graph/node_write.go new file mode 100644 index 0000000000..1d8639b202 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/node_write.go @@ -0,0 +1,284 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "github.com/gogo/protobuf/proto" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +type node struct { + *nodeR + + metaInSync bool + dataUpdated bool + targetsUpdated bool + sourcesUpdated bool +} + +// newNode creates a new instance of node, either built from the scratch or +// extending existing nodeR. +func newNode(nodeR *nodeR) *node { + if nodeR == nil { + return &node{ + nodeR: newNodeR(), + metaInSync: true, + dataUpdated: true, /* completely new node */ + } + } + return &node{ + nodeR: nodeR, + metaInSync: true, + } +} + +// SetLabel associates given label with this node. +func (node *node) SetLabel(label string) { + node.label = label + node.dataUpdated = true +} + +// SetValue associates given value with this node. +func (node *node) SetValue(value proto.Message) { + node.value = value + node.dataUpdated = true +} + +// SetFlags associates given flag with this node. +func (node *node) SetFlags(flags ...Flag) { + toBeSet := make(map[string]struct{}) + for _, flag := range flags { + toBeSet[flag.GetName()] = struct{}{} + } + + var otherFlags []Flag + for _, flag := range node.flags { + if _, set := toBeSet[flag.GetName()]; !set { + otherFlags = append(otherFlags, flag) + } + } + + node.flags = append(otherFlags, flags...) + node.dataUpdated = true +} + +// DelFlags removes given flag from this node. +func (node *node) DelFlags(names ...string) { + var otherFlags []Flag + for _, flag := range node.flags { + delete := false + for _, flagName := range names { + if flag.GetName() == flagName { + delete = true + break + } + } + if !delete { + otherFlags = append(otherFlags, flag) + } + } + + node.flags = otherFlags + node.dataUpdated = true +} + +// SetMetadataMap chooses metadata map to be used to store the association +// between this node's value label and metadata. +func (node *node) SetMetadataMap(mapName string) { + if node.metadataMap == "" { // cannot be changed + node.metadataMap = mapName + node.dataUpdated = true + node.metaInSync = false + } +} + +// SetMetadata associates given value metadata with this node. +func (node *node) SetMetadata(metadata interface{}) { + node.metadata = metadata + node.dataUpdated = true + node.metaInSync = false +} + +// SetTargets provides definition of all edges pointing from this node. +func (node *node) SetTargets(targetsDef []RelationTargetDef) { + node.targetsDef = targetsDef + node.dataUpdated = true + + // remove obsolete targets + for _, relTargets := range node.targets { + for labelIdx := 0; labelIdx < len(relTargets.Targets); { + targets := relTargets.Targets[labelIdx] + + // collect keys to remove for this relation+label + var toRemove []string + for _, target := range targets.MatchingKeys.Iterate() { + obsolete := true + targetDefs := node.getTargetDefsForKey(target, relTargets.Relation) + for _, targetDef := range targetDefs { + if targetDef.Label == targets.Label { + obsolete = false + break + } + } + if len(targetDefs) == 0 { + // this is no longer target for any label of this relation + targetNode := node.graph.nodes[target] + targetNode.removeFromSources(relTargets.Relation, node.GetKey()) + } + if obsolete { + toRemove = append(toRemove, target) + } + } + + // remove the entire label if it is no longer defined + obsoleteLabel := true + for _, targetDef := range node.targetsDef { + if targetDef.Relation == relTargets.Relation && + targetDef.Label == targets.Label { + obsoleteLabel = false + break + } + } + if obsoleteLabel { + newLen := len(relTargets.Targets) - 1 + copy(relTargets.Targets[labelIdx:], relTargets.Targets[labelIdx+1:]) + relTargets.Targets = relTargets.Targets[:newLen] + } else { + // remove just obsolete targets, not the entire label + for _, target := range toRemove { + targets.MatchingKeys.Del(target) + } + labelIdx++ + } + } + } + + // build new targets + var usesSelector bool + for _, targetDef := range node.targetsDef { + node.createEntryForTarget(targetDef) + if targetDef.Key != "" { + // without selectors, the lookup procedure has complexity O(m*log(n)) + // where n = number of nodes; m = number of edges defined for this node + if node2, hasTarget := node.graph.nodes[targetDef.Key]; hasTarget { + node.addToTargets(node2, targetDef) + } + } else { + usesSelector = true // have to use the less efficient O(mn) lookup + } + } + if usesSelector { + for _, otherNode := range node.graph.nodes { + if otherNode.key == node.key { + continue + } + node.checkPotentialTarget(otherNode) + } + } +} + +// checkPotentialTarget checks if node2 is target of node in any of the relations. +func (node *node) checkPotentialTarget(node2 *node) { + targetDefs := node.getTargetDefsForKey(node2.key, "") // for any relation + for _, targetDef := range targetDefs { + node.addToTargets(node2, targetDef) + } +} + +// getTargetDefsForKey returns all target definitions that select the given key. +// Target definitions can be further filtered by the relation. +func (node *node) getTargetDefsForKey(key, relation string) (defs []RelationTargetDef) { + for _, targetDef := range node.targetsDef { + if relation != "" && targetDef.Relation != relation { + continue + } + if targetDef.Key == key || + (targetDef.Key == "" && targetDef.Selector(key)) { + defs = append(defs, targetDef) + } + } + return defs +} + +// createEntryForTarget creates entry for target(s) with the given definition +// if it does not exist yet. +func (node *node) createEntryForTarget(targetDef RelationTargetDef) { + relTargets := node.targets.GetTargetsForRelation(targetDef.Relation) + if relTargets == nil { + // new relation + relTargets = &RelationTargets{Relation: targetDef.Relation} + node.targets = append(node.targets, relTargets) + } + targets := relTargets.GetTargetsForLabel(targetDef.Label) + if targets == nil { + // new relation label + targets = &Targets{Label: targetDef.Label, ExpectedKey: targetDef.Key} + if targetDef.Key != "" { + targets.MatchingKeys = utils.NewSingletonKeySet("") + } else { + // selector + targets.MatchingKeys = utils.NewSliceBasedKeySet() + } + relTargets.Targets = append(relTargets.Targets, targets) + } + targets.ExpectedKey = targetDef.Key +} + +// addToTargets adds node2 into the set of targets for this node. Sources of node2 +// are also updated accordingly. +func (node *node) addToTargets(node2 *node, targetDef RelationTargetDef) { + // update targets of node + relTargets := node.targets.GetTargetsForRelation(targetDef.Relation) + targets := relTargets.GetTargetsForLabel(targetDef.Label) + node.targetsUpdated = targets.MatchingKeys.Add(node2.key) || node.targetsUpdated + + // update sources of node2 + relSources := node2.sources.getSourcesForRelation(targetDef.Relation) + if relSources == nil { + relSources = &relationSources{ + relation: targetDef.Relation, + sources: utils.NewSliceBasedKeySet(), + } + node2.sources = append(node2.sources, relSources) + } + node2.sourcesUpdated = relSources.sources.Add(node.key) || node2.sourcesUpdated +} + +// removeFromTargets removes given key from the set of targets. +func (node *node) removeFromTargets(key string) { + for _, relTargets := range node.targets { + for _, targets := range relTargets.Targets { + node.targetsUpdated = targets.MatchingKeys.Del(key) || node.targetsUpdated + } + } +} + +// removeFromTargets removes this node from the set of sources of all the other nodes. +func (node *node) removeThisFromSources() { + for _, relTargets := range node.targets { + for _, targets := range relTargets.Targets { + for _, key := range targets.MatchingKeys.Iterate() { + targetNode := node.graph.nodes[key] + targetNode.removeFromSources(relTargets.Relation, node.GetKey()) + } + } + } +} + +// removeFromSources removes given key from the sources for the given relation. +func (node *node) removeFromSources(relation string, key string) { + updated := node.sources.getSourcesForRelation(relation).sources.Del(key) + node.sourcesUpdated = updated || node.sourcesUpdated +} diff --git a/plugins/kvscheduler/internal/graph/utils_for_test.go b/plugins/kvscheduler/internal/graph/utils_for_test.go new file mode 100644 index 0000000000..2d5e480884 --- /dev/null +++ b/plugins/kvscheduler/internal/graph/utils_for_test.go @@ -0,0 +1,258 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "strings" + + . "github.com/onsi/gomega" + + "github.com/ligato/cn-infra/idxmap" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" +) + +const ( + value1Label = "value1" + value2Label = "value2" + value3Label = "value3" + value4Label = "value4" + + prefixA = "/prefixA/" + prefixB = "/prefixB/" + + keyA1 = prefixA + "key1" + keyA2 = prefixA + "key2" + keyA3 = prefixA + "key3" + keyB1 = prefixB + "key4" + + metadataMapA = "mapA" + metadataMapB = "mapB" + + relation1 = "relation1" + relation2 = "relation2" +) + +var ( + value1 = NewStringValue("this is value1") + value2 = NewStringValue("this is value2") + value3 = NewStringValue("this is value3") + value4 = NewStringValue("this is value4") +) + +func prefixASelector(key string) bool { + return strings.HasPrefix(key, prefixA) +} + +func prefixBSelector(key string) bool { + return strings.HasPrefix(key, prefixB) +} + +func keySelector(keys ...string) func(key string) bool { + return func(key string) bool { + for _, k := range keys { + if key == k { + return true + } + } + return false + } +} + +func selectNodesToBuild(ids ...int) map[int]struct{} { + nodeIDs := make(map[int]struct{}) + for _, id := range ids { + nodeIDs[id] = struct{}{} + } + return nodeIDs +} + +func buildGraph(graph Graph, record, regMaps bool, nodes map[int]struct{}) Graph { + if graph == nil { + graph = NewGraph(true, minutesInOneDay, minutesInOneHour) + } + graphW := graph.Write(record) + + if regMaps { + graphW.RegisterMetadataMap(metadataMapA, NewNameToInteger(metadataMapA)) + graphW.RegisterMetadataMap(metadataMapB, NewNameToInteger(metadataMapB)) + } + + var ( + node1, node2, node3, node4 NodeRW + ) + + if _, addNode1 := nodes[1]; addNode1 { + node1 = graphW.SetNode(keyA1) + node1.SetLabel(value1Label) + node1.SetValue(value1) + node1.SetMetadata(&OnlyInteger{Integer: 1}) + node1.SetMetadataMap(metadataMapA) + node1.SetFlags(ColorFlag(Red), AbstractFlag()) + node1.SetTargets([]RelationTargetDef{ + {relation1, "node3", keyA3, nil}, + {relation2, "node2", keyA2, nil}, + }) + // targets changed + node1.SetTargets([]RelationTargetDef{ + {relation1, "node2", keyA2, nil}, + {relation2, "prefixB", "", prefixBSelector}, + }) + } + + if _, addNode2 := nodes[2]; addNode2 { + node2 = graphW.SetNode(keyA2) + node2.SetLabel(value2Label) + node2.SetValue(value2) + node2.SetMetadata(&OnlyInteger{Integer: 2}) + node2.SetMetadataMap(metadataMapA) + node2.SetFlags(ColorFlag(Blue)) + node2.SetTargets([]RelationTargetDef{ + {relation1, "node3", keyA1, nil}, + }) + // targets changed + node2.SetTargets([]RelationTargetDef{ + {relation1, "node3", keyA3, nil}, + }) + } + + if _, addNode3 := nodes[3]; addNode3 { + node3 = graphW.SetNode(keyA3) + node3.SetLabel(value3Label) + node3.SetValue(value3) + node3.SetMetadata(&OnlyInteger{Integer: 3}) + node3.SetMetadataMap(metadataMapA) + node3.SetFlags(ColorFlag(Green), AbstractFlag(), TemporaryFlag()) + node3.SetTargets([]RelationTargetDef{ + {relation2, "node1+node2", "", keySelector(keyA1, keyA2)}, + {relation2, "prefixB", keyB1, nil}, + }) + // targets changed + node3.SetTargets([]RelationTargetDef{ + {relation2, "node1+node2", "", keySelector(keyA1, keyA2)}, + {relation2, "prefixB", "", prefixBSelector}, + }) + } + + if _, addNode4 := nodes[4]; addNode4 { + node4 = graphW.SetNode(keyB1) + node4.SetLabel(value4Label) + node4.SetValue(value4) + node4.SetMetadata(&OnlyInteger{Integer: 1}) + node4.SetMetadataMap(metadataMapB) + node4.SetFlags(TemporaryFlag()) + node4.SetTargets([]RelationTargetDef{ + {relation1, "prefixA", "", prefixASelector}, + {relation2, "non-existing-key", "non-existing-key", nil}, + {relation2, "non-existing-key2", "non-existing-key2", nil}, + }) + // targets changed + node4.SetTargets([]RelationTargetDef{ + {relation1, "prefixA", "", prefixASelector}, + {relation2, "non-existing-key", "non-existing-key", nil}, + }) + } + + graphW.Save() + + // make changes that will not be saved and thus should have no effect + if node1 != nil { + node1.SetTargets([]RelationTargetDef{ + {relation1, "node3", keyA3, nil}, + {relation2, "node2", keyA2, nil}, + }) + } + if node3 != nil { + node3.SetTargets([]RelationTargetDef{}) + } + if node4 != nil { + node4.SetTargets([]RelationTargetDef{ + {relation1, "prefixA", "use-key-instead-of-selector", nil}, + {relation2, "non-existing-key", keyA3, nil}, + }) + } + + graphW.Release() + return graph +} + +func checkTargets(node Node, relation string, label string, targetKeys ...string) { + targets := node.GetTargets(relation) + forLabel := targets.GetTargetsForLabel(label) + targetNodes := make(map[string]struct{}) + for _, targetNode := range forLabel.Nodes { + targetNodes[targetNode.GetKey()] = struct{}{} + } + for _, targetKey := range targetKeys { + Expect(targetNodes).To(HaveKey(targetKey)) + } + Expect(targetNodes).To(HaveLen(len(targetKeys))) +} + +func checkRecordedTargets(recordedTargets TargetsByRelation, relation string, labelCnt int, label string, targetKeys ...string) { + relTargets := recordedTargets.GetTargetsForRelation(relation) + Expect(relTargets).ToNot(BeNil()) + Expect(relTargets.Targets).To(HaveLen(labelCnt)) + targets := relTargets.GetTargetsForLabel(label) + Expect(targets).ToNot(BeNil()) + Expect(targets.Label).To(Equal(label)) + for _, targetKey := range targetKeys { + Expect(targets.MatchingKeys.Has(targetKey)).To(BeTrue()) + } + Expect(targets.MatchingKeys.Length()).To(Equal(len(targetKeys))) +} + +func checkNodes(nodes []Node, keys ...string) { + for _, key := range keys { + found := false + for _, node := range nodes { + if node.GetKey() == key { + found = true + break + } + } + Expect(found).To(BeTrue()) + } + Expect(nodes).To(HaveLen(len(keys))) +} + +func checkRecordedNodes(nodes []*RecordedNode, keys ...string) { + recordedNodes := make(map[string]struct{}) + for _, node := range nodes { + recordedNodes[node.Key] = struct{}{} + } + Expect(nodes).To(HaveLen(len(keys))) +} + +func checkSources(node Node, relation string, sourceKeys ...string) { + sourceNodes := make(map[string]struct{}) + for _, sourceNode := range node.GetSources(relation) { + sourceNodes[sourceNode.GetKey()] = struct{}{} + } + for _, sourceKey := range sourceKeys { + Expect(sourceNodes).To(HaveKey(sourceKey)) + } + Expect(node.GetSources(relation)).To(HaveLen(len(sourceKeys))) +} + +func checkMetadataValues(mapping idxmap.NamedMapping, labels ...string) { + allLabels := make(map[string]struct{}) + for _, label := range mapping.ListAllNames() { + allLabels[label] = struct{}{} + } + for _, label := range labels { + Expect(allLabels).To(HaveKey(label)) + } + Expect(mapping.ListAllNames()).To(HaveLen(len(labels))) +} diff --git a/plugins/kvscheduler/internal/registry/registry_api.go b/plugins/kvscheduler/internal/registry/registry_api.go new file mode 100644 index 0000000000..4762af975e --- /dev/null +++ b/plugins/kvscheduler/internal/registry/registry_api.go @@ -0,0 +1,35 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +// Registry can be used to register all descriptors and get quick (cached, O(log)) +// lookups by keys. +type Registry interface { + // RegisterDescriptor add new descriptor into the registry. + RegisterDescriptor(descriptor *KVDescriptor) + + // GetAllDescriptors returns all registered descriptors ordered by retrieve-dependencies. + GetAllDescriptors() []*KVDescriptor + + // GetDescriptor returns descriptor with the given name. + GetDescriptor(name string) *KVDescriptor + + // GetDescriptorForKey returns descriptor handling the given key. + GetDescriptorForKey(key string) *KVDescriptor +} diff --git a/plugins/kvscheduler/internal/registry/registry_impl.go b/plugins/kvscheduler/internal/registry/registry_impl.go new file mode 100644 index 0000000000..d403e6f986 --- /dev/null +++ b/plugins/kvscheduler/internal/registry/registry_impl.go @@ -0,0 +1,123 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "container/list" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +const ( + // maxKeyCacheSize is the maximum number of key->descriptor entries the registry + // will cache. + maxKeyCacheSize = 500 +) + +// registry is an implementation of Registry for descriptors. +type registry struct { + descriptors map[string]*KVDescriptor // descriptor name -> descriptor + descriptorList []*KVDescriptor // ordered by retrieve dependencies + upToDateDescList bool // true if descriptorList is in sync with descriptors + keyToCacheEntry map[string]*list.Element // key -> cache entry + keyCache *list.List // doubly linked list of cached entries key->descriptor +} + +// cacheEntry encapsulates data for one entry in registry.keyCache +type cacheEntry struct { + key string + descriptor *KVDescriptor +} + +// NewRegistry creates a new instance of registry. +func NewRegistry() Registry { + return ®istry{ + descriptors: make(map[string]*KVDescriptor), + keyToCacheEntry: make(map[string]*list.Element), + keyCache: list.New(), + } +} + +// RegisterDescriptor add new descriptor into the registry. +func (reg *registry) RegisterDescriptor(descriptor *KVDescriptor) { + reg.descriptors[descriptor.Name] = descriptor + reg.upToDateDescList = false +} + +// GetAllDescriptors returns all registered descriptors. +func (reg *registry) GetAllDescriptors() (descriptors []*KVDescriptor) { + if reg.upToDateDescList { + return reg.descriptorList + } + + // collect descriptor retrieve dependencies + deps := make(map[string]utils.KeySet) + descNames := utils.NewMapBasedKeySet() + for _, descriptor := range reg.descriptors { + descNames.Add(descriptor.Name) + deps[descriptor.Name] = utils.NewMapBasedKeySet(descriptor.RetrieveDependencies...) + } + + // order topologically respecting dependencies. + orderedNames := utils.TopologicalOrder(descNames, deps, true, false) + reg.descriptorList = []*KVDescriptor{} + for _, descName := range orderedNames { + reg.descriptorList = append(reg.descriptorList, reg.descriptors[descName]) + } + + reg.upToDateDescList = true + return reg.descriptorList +} + +// GetDescriptor returns descriptor with the given name. +func (reg *registry) GetDescriptor(name string) *KVDescriptor { + descriptor, has := reg.descriptors[name] + if !has { + return nil + } + return descriptor +} + +// GetDescriptorForKey returns descriptor handling the given key. +func (reg *registry) GetDescriptorForKey(key string) *KVDescriptor { + elem, cached := reg.keyToCacheEntry[key] + if cached { + // get descriptor from the cache + entry := elem.Value.(*cacheEntry) + reg.keyCache.MoveToFront(elem) + return entry.descriptor + } + if reg.keyCache.Len() == maxKeyCacheSize { + // the cache is full => remove the last used key + toRemove := reg.keyCache.Back() + toRemoveKey := toRemove.Value.(*cacheEntry).key + delete(reg.keyToCacheEntry, toRemoveKey) + reg.keyCache.Remove(toRemove) + } + // find the descriptor + var keyDescriptor *KVDescriptor + for _, descriptor := range reg.descriptors { + if descriptor.KeySelector(key) { + keyDescriptor = descriptor + break + } + } + // add entry to cache + entry := &cacheEntry{key: key, descriptor: keyDescriptor} + elem = reg.keyCache.PushFront(entry) + reg.keyToCacheEntry[key] = elem + return keyDescriptor +} diff --git a/plugins/kvscheduler/internal/registry/registry_test.go b/plugins/kvscheduler/internal/registry/registry_test.go new file mode 100644 index 0000000000..c55d59c099 --- /dev/null +++ b/plugins/kvscheduler/internal/registry/registry_test.go @@ -0,0 +1,180 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "fmt" + "strings" + "testing" + + . "github.com/onsi/gomega" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" +) + +const ( + descriptor1Name = "descriptor1" + descriptor2Name = "descriptor2" + descriptor3Name = "descriptor3" + descriptor4Name = "descriptor4" + + prefixA = "/prefixA/" + prefixB = "/prefixB/" + prefixC = "/prefixC/" + + randomKey = "randomKey" + randomSuffix = "randomSuffix" +) + +func keySelector(keys ...string) func(key string) bool { + return func(key string) bool { + for _, k := range keys { + if key == k { + return true + } + } + return false + } +} + +func prefixSelector(prefix string) func(key string) bool { + return func(key string) bool { + return strings.HasPrefix(key, prefix) + } +} + +func TestRegistry(t *testing.T) { + RegisterTestingT(t) + + descriptor1 := NewMockDescriptor( + &KVDescriptor{ + Name: descriptor1Name, + KeySelector: prefixSelector(prefixA), + RetrieveDependencies: []string{descriptor2Name}, + }, nil, 0) + + descriptor2 := NewMockDescriptor( + &KVDescriptor{ + Name: descriptor2Name, + KeySelector: prefixSelector(prefixB), + RetrieveDependencies: []string{descriptor3Name}, + }, nil, 0) + + descriptor3 := NewMockDescriptor( + &KVDescriptor{ + Name: descriptor3Name, + KeySelector: prefixSelector(prefixC), + RetrieveDependencies: []string{descriptor4Name}, + }, nil, 0) + + descriptor4 := NewMockDescriptor( + &KVDescriptor{ + Name: descriptor4Name, + KeySelector: keySelector(randomKey), + }, nil, 0) + + registry := NewRegistry() + + registry.RegisterDescriptor(descriptor3) + registry.RegisterDescriptor(descriptor2) + registry.RegisterDescriptor(descriptor1) + registry.RegisterDescriptor(descriptor4) + + // test that descriptors are ordered by dependencies + allDescriptors := registry.GetAllDescriptors() + Expect(allDescriptors).To(HaveLen(4)) + Expect(allDescriptors[0].Name).To(BeEquivalentTo(descriptor4Name)) + Expect(allDescriptors[1].Name).To(BeEquivalentTo(descriptor3Name)) + Expect(allDescriptors[2].Name).To(BeEquivalentTo(descriptor2Name)) + Expect(allDescriptors[3].Name).To(BeEquivalentTo(descriptor1Name)) + + // test GetDescriptor() method + descriptor := registry.GetDescriptor(descriptor1Name) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor1Name)) + Expect(descriptor.KeySelector(prefixA + randomSuffix)).To(BeTrue()) + Expect(descriptor.KeySelector(prefixB + randomSuffix)).To(BeFalse()) + descriptor = registry.GetDescriptor(descriptor2Name) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor2Name)) + Expect(descriptor.KeySelector(prefixA + randomSuffix)).To(BeFalse()) + Expect(descriptor.KeySelector(prefixB + randomSuffix)).To(BeTrue()) + descriptor = registry.GetDescriptor(descriptor3Name) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor3Name)) + Expect(descriptor.KeySelector(prefixA + randomSuffix)).To(BeFalse()) + Expect(descriptor.KeySelector(prefixC + randomSuffix)).To(BeTrue()) + descriptor = registry.GetDescriptor(descriptor4Name) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor4Name)) + Expect(descriptor.KeySelector(prefixA + randomSuffix)).To(BeFalse()) + Expect(descriptor.KeySelector(randomKey)).To(BeTrue()) + + // basic GetDescriptorForKey tests + descriptor = registry.GetDescriptorForKey(prefixA + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor1Name)) + descriptor = registry.GetDescriptorForKey(prefixB + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor2Name)) + descriptor = registry.GetDescriptorForKey(prefixC + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor3Name)) + descriptor = registry.GetDescriptorForKey(randomKey) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor4Name)) + + // repeated lookups will take result from the cache + descriptor = registry.GetDescriptorForKey(prefixA + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor1Name)) + descriptor = registry.GetDescriptorForKey(prefixB + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor2Name)) + descriptor = registry.GetDescriptorForKey(prefixC + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor3Name)) + descriptor = registry.GetDescriptorForKey(randomKey) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor4Name)) + + // fill up the cache + for i := 0; i < maxKeyCacheSize; i++ { + if i%2 == 0 { + descriptor = registry.GetDescriptorForKey(fmt.Sprintf("%s%d", prefixA, i)) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor1Name)) + } else { + descriptor = registry.GetDescriptorForKey(fmt.Sprintf("%s%d", prefixB, i)) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor2Name)) + } + } + + // results for these lookups were already removed from the cache and thus will have to be repeated + descriptor = registry.GetDescriptorForKey(prefixA + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor1Name)) + descriptor = registry.GetDescriptorForKey(prefixB + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor2Name)) + descriptor = registry.GetDescriptorForKey(prefixC + randomSuffix) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor3Name)) + descriptor = registry.GetDescriptorForKey(randomKey) + Expect(descriptor).ToNot(BeNil()) + Expect(descriptor.Name).To(BeEquivalentTo(descriptor4Name)) +} diff --git a/plugins/kvscheduler/internal/test/descriptor.go b/plugins/kvscheduler/internal/test/descriptor.go new file mode 100644 index 0000000000..398b1e2c5d --- /dev/null +++ b/plugins/kvscheduler/internal/test/descriptor.go @@ -0,0 +1,189 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/idxmap" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +// WithoutOp references operation to leave undefined in the MockDescriptor. +type WithoutOp int + +const ( + // WithoutCreate tells MockDescriptor to leave Create as nil. + WithoutCreate WithoutOp = iota + // WithoutUpdate tells MockDescriptor to leave Update as nil. + WithoutUpdate + // WithoutDelete tells MockDescriptor to leave Delete as nil. + WithoutDelete + // WithoutRetrieve tells MockDescriptor to leave Retrieve as nil. + WithoutRetrieve +) + +// mockDescriptor implements KVDescriptor for UTs. +type mockDescriptor struct { + nextIndex int + args *KVDescriptor + sb *MockSouthbound +} + +// NewMockDescriptor creates a new instance of Mock Descriptor. +func NewMockDescriptor(args *KVDescriptor, sb *MockSouthbound, firstFreeIndex int, withoutOps ...WithoutOp) *KVDescriptor { + mock := &mockDescriptor{ + nextIndex: firstFreeIndex, + args: args, + sb: sb, + } + descriptor := &KVDescriptor{ + Name: args.Name, + KeySelector: args.KeySelector, + ValueTypeName: args.ValueTypeName, + ValueComparator: args.ValueComparator, + KeyLabel: args.KeyLabel, + NBKeyPrefix: args.NBKeyPrefix, + WithMetadata: args.WithMetadata, + Validate: args.Validate, + IsRetriableFailure: args.IsRetriableFailure, + UpdateWithRecreate: args.UpdateWithRecreate, + Dependencies: args.Dependencies, + RetrieveDependencies: args.RetrieveDependencies, + } + if args.WithMetadata { + descriptor.MetadataMapFactory = func() idxmap.NamedMappingRW { + return NewNameToInteger(args.Name) + } + descriptor.KeyLabel = func(key string) string { + return strings.TrimPrefix(key, args.NBKeyPrefix) + } + } + if args.DerivedValues != nil { + descriptor.DerivedValues = mock.DerivedValues + } + + // operations that can be left undefined: + withoutMap := make(map[WithoutOp]struct{}) + for _, withoutOp := range withoutOps { + withoutMap[withoutOp] = struct{}{} + } + if _, withoutCreate := withoutMap[WithoutCreate]; !withoutCreate { + descriptor.Create = mock.Create + } + if _, withoutDelete := withoutMap[WithoutDelete]; !withoutDelete { + descriptor.Delete = mock.Delete + } + if _, withoutUpdate := withoutMap[WithoutUpdate]; !withoutUpdate { + descriptor.Update = mock.Update + } + if _, withoutRetrieve := withoutMap[WithoutRetrieve]; !withoutRetrieve { + descriptor.Retrieve = mock.Retrieve + } + return descriptor +} + +// validateKey tests predicate for a key that should hold. +func (md *mockDescriptor) validateKey(key string, predicate bool) { + if !predicate && md.sb != nil { + md.sb.registerKeyWithInvalidData(key) + } +} + +// equalValues compares two values for equality +func (md *mockDescriptor) equalValues(key string, v1, v2 proto.Message) bool { + if md.args.ValueComparator != nil { + return md.args.ValueComparator(key, v1, v2) + } + return proto.Equal(v1, v2) +} + +// Create executes create operation in the mock SB. +func (md *mockDescriptor) Create(key string, value proto.Message) (metadata Metadata, err error) { + md.validateKey(key, md.args.KeySelector(key)) + withMeta := md.sb != nil && md.args.WithMetadata && !md.sb.isKeyDerived(key) + if withMeta { + metadata = &OnlyInteger{md.nextIndex} + } + if md.sb != nil { + md.validateKey(key, md.sb.GetValue(key) == nil) + err = md.sb.executeChange(md.args.Name, MockCreate, key, value, metadata) + } + if err == nil && withMeta { + md.nextIndex++ + } + return metadata, err +} + +// Delete executes del operation in the mock SB. +func (md *mockDescriptor) Delete(key string, value proto.Message, metadata Metadata) (err error) { + md.validateKey(key, md.args.KeySelector(key)) + if md.sb != nil { + kv := md.sb.GetValue(key) + md.validateKey(key, kv != nil) + if md.sb.isKeyDerived(key) { + // re-generated on refresh + md.validateKey(key, md.equalValues(key, kv.Value, value)) + } else { + md.validateKey(key, kv.Value == value) + } + md.validateKey(key, kv.Metadata == metadata) + err = md.sb.executeChange(md.args.Name, MockDelete, key, nil, metadata) + } + return err +} + +// Update executes update operation in the mock SB. +func (md *mockDescriptor) Update(key string, oldValue, newValue proto.Message, oldMetadata Metadata) (newMetadata Metadata, err error) { + md.validateKey(key, md.args.KeySelector(key)) + newMetadata = oldMetadata + if md.sb != nil { + kv := md.sb.GetValue(key) + md.validateKey(key, kv != nil) + if md.sb.isKeyDerived(key) { + // re-generated on refresh + md.validateKey(key, md.equalValues(key, kv.Value, oldValue)) + } else { + md.validateKey(key, kv.Value == oldValue) + } + md.validateKey(key, kv.Metadata == oldMetadata) + err = md.sb.executeChange(md.args.Name, MockUpdate, key, newValue, newMetadata) + } + return newMetadata, err +} + +// Dependencies uses provided DerValuesBuilder. +func (md *mockDescriptor) DerivedValues(key string, value proto.Message) []KeyValuePair { + md.validateKey(key, md.args.KeySelector(key)) + if md.args.DerivedValues != nil { + derivedKVs := md.args.DerivedValues(key, value) + if md.sb != nil { + for _, kv := range derivedKVs { + md.sb.registerDerivedKey(kv.Key) + } + } + return derivedKVs + } + return nil +} + +// Retrieve returns non-derived values currently set in the mock SB. +func (md *mockDescriptor) Retrieve(correlate []KVWithMetadata) ([]KVWithMetadata, error) { + if md.sb == nil { + return nil, nil + } + return md.sb.retrieve(md.args.Name, correlate, md.args.KeySelector) +} diff --git a/plugins/kvscheduler/internal/test/flag.go b/plugins/kvscheduler/internal/test/flag.go new file mode 100644 index 0000000000..561d8564e9 --- /dev/null +++ b/plugins/kvscheduler/internal/test/flag.go @@ -0,0 +1,123 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +// A set of node flags used for testing of the Graph. + +// TestingFlag as a base for flags in the UTs. +type TestingFlag struct { + Name, Value string +} + +// GetName returns the assigned name. +func (flag *TestingFlag) GetName() string { + return flag.Name +} + +// GetValue returns the assigned value. +func (flag *TestingFlag) GetValue() string { + return flag.Value +} + +// Color is a property to be assigned to nodes for testing purposes. +type Color int + +const ( + // Red color. + Red Color = iota + // Blue color. + Blue + // Green color. + Green +) + +// String converts color to string. +func (color Color) String() string { + switch color { + case Red: + return "red" + case Blue: + return "blue" + case Green: + return "green" + } + return "unknown" +} + +// ColorFlagName is the name of the color flag. +const ColorFlagName = "color" + +// ColorFlagImpl implements flag used in UTs to associate "color" with nodes. +type ColorFlagImpl struct { + TestingFlag + Color Color +} + +// ColorFlag returns a new instance of color flag for testing. +func ColorFlag(color Color) *ColorFlagImpl { + return &ColorFlagImpl{ + TestingFlag: TestingFlag{ + Name: ColorFlagName, + Value: color.String(), + }, + Color: color, + } +} + +// AnyColorFlag can be used to match nodes with any color assigned. +func AnyColorFlag() *ColorFlagImpl { + return &ColorFlagImpl{ + TestingFlag: TestingFlag{ + Name: ColorFlagName, + Value: "", + }, + } +} + +// AbstractFlagName is the name of the abstract flag. +const AbstractFlagName = "is-abstract" + +// AbstractFlagImpl is used in UTs to mark "abstract" key-value pairs. +type AbstractFlagImpl struct { + TestingFlag +} + +// AbstractFlag returns a new instance of AbstractFlag for testing. +func AbstractFlag() *AbstractFlagImpl { + return &AbstractFlagImpl{ + TestingFlag: TestingFlag{ + Name: AbstractFlagName, + // empty value -> it is a boolean flag + }, + } +} + +// TemporaryFlagName is the name of the temporary flag. +const TemporaryFlagName = "is-temporary" + +// TemporaryFlagImpl is used in UTs to mark "temporary" key-value pairs. +type TemporaryFlagImpl struct { + TestingFlag +} + +// TemporaryFlag returns a new instance of TemporaryFlag for testing. +func TemporaryFlag() *TemporaryFlagImpl { + return &TemporaryFlagImpl{ + TestingFlag: TestingFlag{ + Name: TemporaryFlagName, + // empty value -> it is a boolean flag + }, + } +} diff --git a/plugins/kvscheduler/internal/test/intmeta.go b/plugins/kvscheduler/internal/test/intmeta.go new file mode 100644 index 0000000000..1ab2dacb71 --- /dev/null +++ b/plugins/kvscheduler/internal/test/intmeta.go @@ -0,0 +1,112 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "strconv" + + "github.com/ligato/cn-infra/idxmap" + "github.com/ligato/cn-infra/idxmap/mem" + "github.com/ligato/cn-infra/logging" +) + +// NameToInteger is a idxmap specialization used in the UTs for scheduler. +// It extends plain metadata with integer exposed as a secondary index. +type NameToInteger interface { + // LookupByName retrieves a previously stored metadata identified by . + LookupByName(valName string) (metadata MetaWithInteger, exists bool) + + // LookupByIndex retrieves a previously stored metadata identified by the given + // integer index . + LookupByIndex(metaInt int) (valName string, metadata MetaWithInteger, exists bool) +} + +// NameToIntegerRW extends NameToInteger with write access. +type NameToIntegerRW interface { + NameToInteger + idxmap.NamedMappingRW +} + +// MetaWithInteger is interface that metadata for NameToIntMap must implement. +type MetaWithInteger interface { + // GetInteger returns the integer stored in the metadata. + GetInteger() int +} + +// OnlyInteger is a minimal implementation of MetaWithInteger. +type OnlyInteger struct { + Integer int +} + +// GetInteger returns the integer stored in the metadata. +func (idx *OnlyInteger) GetInteger() int { + return idx.Integer +} + +// nameToInteger implements NameToInteger. +type nameToInteger struct { + idxmap.NamedMappingRW + log logging.Logger +} + +const ( + // IntegerKey is a secondary index for the integer value. + IntegerKey = "integer" +) + +// NewNameToInteger creates a new instance implementing NameToIntegerRW. +func NewNameToInteger(title string) NameToIntegerRW { + return &nameToInteger{ + NamedMappingRW: mem.NewNamedMapping(logging.DefaultLogger, title, internalIndexFunction), + } +} + +// LookupByName retrieves a previously stored metadata identified by . +func (metaMap *nameToInteger) LookupByName(valName string) (metadata MetaWithInteger, exists bool) { + untypedMeta, found := metaMap.GetValue(valName) + if found { + if metadata, ok := untypedMeta.(MetaWithInteger); ok { + return metadata, found + } + } + return nil, false +} + +// LookupByIndex retrieves a previously stored metadata identified by the given +// integer index . +func (metaMap *nameToInteger) LookupByIndex(metaInt int) (valName string, metadata MetaWithInteger, exists bool) { + res := metaMap.ListNames(IntegerKey, strconv.FormatUint(uint64(metaInt), 10)) + if len(res) != 1 { + return + } + untypedMeta, found := metaMap.GetValue(res[0]) + if found { + if metadata, ok := untypedMeta.(MetaWithInteger); ok { + return res[0], metadata, found + } + } + return +} + +func internalIndexFunction(untypedMeta interface{}) map[string][]string { + indexes := map[string][]string{} + metadata, ok := untypedMeta.(MetaWithInteger) + if !ok || metadata == nil { + return indexes + } + + indexes[IntegerKey] = []string{strconv.FormatUint(uint64(metadata.GetInteger()), 10)} + return indexes +} diff --git a/plugins/kvscheduler/internal/test/model/values.pb.go b/plugins/kvscheduler/internal/test/model/values.pb.go new file mode 100644 index 0000000000..0204db053c --- /dev/null +++ b/plugins/kvscheduler/internal/test/model/values.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: values.proto + +package model + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ArrayValue struct { + Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { + return fileDescriptor_values_f4aee9239d9ccefd, []int{0} +} +func (m *ArrayValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ArrayValue.Unmarshal(m, b) +} +func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic) +} +func (dst *ArrayValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArrayValue.Merge(dst, src) +} +func (m *ArrayValue) XXX_Size() int { + return xxx_messageInfo_ArrayValue.Size(m) +} +func (m *ArrayValue) XXX_DiscardUnknown() { + xxx_messageInfo_ArrayValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ArrayValue proto.InternalMessageInfo + +func (m *ArrayValue) GetItems() []string { + if m != nil { + return m.Items + } + return nil +} + +type StringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_values_f4aee9239d9ccefd, []int{1} +} +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func init() { + proto.RegisterType((*ArrayValue)(nil), "model.ArrayValue") + proto.RegisterType((*StringValue)(nil), "model.StringValue") +} + +func init() { proto.RegisterFile("values.proto", fileDescriptor_values_f4aee9239d9ccefd) } + +var fileDescriptor_values_f4aee9239d9ccefd = []byte{ + // 102 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4b, 0xcc, 0x29, + 0x4d, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcd, 0xcd, 0x4f, 0x49, 0xcd, 0x51, + 0x52, 0xe2, 0xe2, 0x72, 0x2c, 0x2a, 0x4a, 0xac, 0x0c, 0x03, 0xc9, 0x09, 0x89, 0x70, 0xb1, 0x66, + 0x96, 0xa4, 0xe6, 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4a, 0xca, 0x5c, + 0xdc, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x70, 0x45, 0x60, 0x93, 0x24, 0x18, 0x15, 0x18, 0x41, + 0x8a, 0xc0, 0x9c, 0x24, 0x36, 0xb0, 0xb1, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xec, + 0xab, 0xd4, 0x66, 0x00, 0x00, 0x00, +} diff --git a/plugins/kvscheduler/internal/test/model/values.proto b/plugins/kvscheduler/internal/test/model/values.proto new file mode 100644 index 0000000000..eea93e3090 --- /dev/null +++ b/plugins/kvscheduler/internal/test/model/values.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package model; + +message ArrayValue { + repeated string items = 1; +} + +message StringValue { + string value = 1; +} diff --git a/plugins/kvscheduler/internal/test/southbound.go b/plugins/kvscheduler/internal/test/southbound.go new file mode 100644 index 0000000000..6d86725b2a --- /dev/null +++ b/plugins/kvscheduler/internal/test/southbound.go @@ -0,0 +1,229 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "sync" + + "github.com/gogo/protobuf/proto" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +// MockSouthbound is used in UTs to simulate the state of the southbound for the scheduler. +type MockSouthbound struct { + sync.Mutex + + values map[string]*KVWithMetadata // key -> value + plannedErrors map[string][]plannedError // key -> planned error + derivedKeys map[string]struct{} + opHistory []MockOperation // from the oldest to the latest + invalidKeyData map[string]struct{} +} + +// MockOpType is used to remember the type of a simulated operation. +type MockOpType int + +const ( + // MockCreate is a mock Create operation. + MockCreate MockOpType = iota + // MockUpdate is a mock Update operation. + MockUpdate + // MockDelete is a mock Delete operation. + MockDelete + // MockRetrieve is a mock Retrieve operation. + MockRetrieve +) + +// MockOperation is used in UTs to remember executed descriptor operations. +type MockOperation struct { + OpType MockOpType + Descriptor string + Key string + Value proto.Message + Err error + CorrelateRetrieve []KVWithMetadata +} + +// plannedError is used to simulate error situation. +type plannedError struct { + err error + afterErrClb func() // update values after error via SetValue() +} + +// NewMockSouthbound creates a new instance of SB mock. +func NewMockSouthbound() *MockSouthbound { + return &MockSouthbound{ + values: make(map[string]*KVWithMetadata), + plannedErrors: make(map[string][]plannedError), + derivedKeys: make(map[string]struct{}), + invalidKeyData: make(map[string]struct{}), + } +} + +// GetKeysWithInvalidData returns a set of keys for which invalid data were provided +// in one of the descriptor's operations. +func (ms *MockSouthbound) GetKeysWithInvalidData() map[string]struct{} { + return ms.invalidKeyData +} + +// PlanError is used to simulate error situation for the next operation over the given key. +func (ms *MockSouthbound) PlanError(key string, err error, afterErrClb func()) { + ms.Lock() + defer ms.Unlock() + + if _, has := ms.plannedErrors[key]; !has { + ms.plannedErrors[key] = []plannedError{} + } + ms.plannedErrors[key] = append(ms.plannedErrors[key], plannedError{err: err, afterErrClb: afterErrClb}) +} + +// SetValue is used in UTs to prepare the state of SB for the next Retrieve. +func (ms *MockSouthbound) SetValue(key string, value proto.Message, metadata Metadata, origin ValueOrigin, isDerived bool) { + ms.Lock() + defer ms.Unlock() + + ms.setValueUnsafe(key, value, metadata, origin, isDerived) +} + +// GetValue can be used in UTs to query the state of simulated SB. +func (ms *MockSouthbound) GetValue(key string) *KVWithMetadata { + ms.Lock() + defer ms.Unlock() + + if _, hasValue := ms.values[key]; !hasValue { + return nil + } + return ms.values[key] +} + +// GetValues can be used in UTs to query the state of simulated SB. +func (ms *MockSouthbound) GetValues(selector KeySelector) []*KVWithMetadata { + ms.Lock() + defer ms.Unlock() + + var values []*KVWithMetadata + for _, kv := range ms.values { + if selector != nil && !selector(kv.Key) { + continue + } + values = append(values, kv) + } + + return values +} + +// PopHistoryOfOps returns and simultaneously clears the history of executed descriptor operations. +func (ms *MockSouthbound) PopHistoryOfOps() []MockOperation { + ms.Lock() + defer ms.Unlock() + + history := ms.opHistory + ms.opHistory = []MockOperation{} + return history +} + +// setValueUnsafe changes the value under given key without acquiring the lock. +func (ms *MockSouthbound) setValueUnsafe(key string, value proto.Message, metadata Metadata, origin ValueOrigin, isDerived bool) { + if value == nil { + delete(ms.values, key) + } else { + ms.values[key] = &KVWithMetadata{Key: key, Value: value, Metadata: metadata, Origin: origin} + } + if isDerived { + ms.derivedKeys[key] = struct{}{} + } +} + +// registerDerivedKey is used to remember that the given key points to a derived value. +// Used by MockDescriptor. +func (ms *MockSouthbound) registerDerivedKey(key string) { + ms.Lock() + defer ms.Unlock() + ms.derivedKeys[key] = struct{}{} +} + +// isKeyDerived returns true if the given key belongs to a derived value. +func (ms *MockSouthbound) isKeyDerived(key string) bool { + _, isDerived := ms.derivedKeys[key] + return isDerived +} + +// registerKeyWithInvalidData is used to remember that for the given key invalid input +// data were provided. +func (ms *MockSouthbound) registerKeyWithInvalidData(key string) { + //panic(key) + ms.invalidKeyData[key] = struct{}{} +} + +// retrieve returns non-derived values under the given selector. +// Used by MockDescriptor. +func (ms *MockSouthbound) retrieve(descriptor string, correlate []KVWithMetadata, selector KeySelector) ([]KVWithMetadata, error) { + ms.Lock() + defer ms.Unlock() + + var values []KVWithMetadata + for _, kv := range ms.values { + if ms.isKeyDerived(kv.Key) || !selector(kv.Key) { + continue + } + values = append(values, KVWithMetadata{ + Key: kv.Key, + Value: kv.Value, + Metadata: kv.Metadata, + Origin: kv.Origin, + }) + } + + ms.opHistory = append(ms.opHistory, MockOperation{ + OpType: MockRetrieve, + Descriptor: descriptor, + CorrelateRetrieve: correlate, + }) + return values, nil +} + +// executeChange is used by MockDescriptor to simulate execution of a operation in SB. +func (ms *MockSouthbound) executeChange(descriptor string, opType MockOpType, key string, value proto.Message, metadata Metadata) error { + ms.Lock() + + operation := MockOperation{OpType: opType, Descriptor: descriptor, Key: key, Value: value} + + plannedErrors, hasErrors := ms.plannedErrors[key] + if hasErrors { + // simulate error situation + ms.plannedErrors[key] = plannedErrors[1:] + if len(ms.plannedErrors[key]) == 0 { + delete(ms.plannedErrors, key) + } + err := plannedErrors[0].err + clb := plannedErrors[0].afterErrClb + operation.Err = err + ms.opHistory = append(ms.opHistory, operation) + ms.Unlock() + + if clb != nil { + clb() + } + + return err + } + + // the simulated operation has succeeded + ms.setValueUnsafe(key, value, metadata, FromNB, ms.isKeyDerived(key)) + ms.opHistory = append(ms.opHistory, operation) + ms.Unlock() + return nil +} diff --git a/plugins/kvscheduler/internal/test/values.go b/plugins/kvscheduler/internal/test/values.go new file mode 100644 index 0000000000..2b22fd5633 --- /dev/null +++ b/plugins/kvscheduler/internal/test/values.go @@ -0,0 +1,103 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate protoc --proto_path=./model --gogo_out=./model values.proto + +package test + +import ( + "github.com/gogo/protobuf/proto" + + "github.com/ligato/cn-infra/datasync" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test/model" +) + +// LazyArrayValue implements datasync.LazyValue for ArrayValue. +type LazyArrayValue struct { + items []string +} + +// LazyStringValue implements datasync.LazyValue for StringValue. +type LazyStringValue struct { + value string +} + +// GetValue unmarshalls ArrayValue into the given proto.Message. +func (lav *LazyArrayValue) GetValue(value proto.Message) error { + av := NewArrayValue(lav.items...) + tmp, err := proto.Marshal(av) + if err != nil { + return err + } + return proto.Unmarshal(tmp, value) +} + +// GetValue unmarshalls StringValue into the given proto.Message. +func (lsv *LazyStringValue) GetValue(value proto.Message) error { + sv := NewStringValue(lsv.value) + tmp, err := proto.Marshal(sv) + if err != nil { + return err + } + return proto.Unmarshal(tmp, value) +} + +// NewStringValue creates a new instance of StringValue. +func NewStringValue(str string) proto.Message { + return &StringValue{Value: str} +} + +// NewArrayValue creates a new instance of ArrayValue. +func NewArrayValue(items ...string) proto.Message { + return &ArrayValue{Items: items} +} + +// NewLazyStringValue creates a new instance of lazy (marshalled) StringValue. +func NewLazyStringValue(str string) datasync.LazyValue { + return &LazyStringValue{value: str} +} + +// NewLazyArrayValue creates a new instance of lazy (marshalled) ArrayValue. +func NewLazyArrayValue(items ...string) datasync.LazyValue { + return &LazyArrayValue{ + items: items, + } +} + +// StringValueComparator is (a custom) KVDescriptor.ValueComparator for string values. +func StringValueComparator(key string, v1, v2 proto.Message) bool { + sv1, isStringVal1 := v1.(*StringValue) + sv2, isStringVal2 := v2.(*StringValue) + if !isStringVal1 || !isStringVal2 { + return false + } + return sv1.Value == sv2.Value +} + +// ArrayValueDerBuilder can be used to derive one StringValue for every item +// in the array. +func ArrayValueDerBuilder(key string, value proto.Message) []KeyValuePair { + var derivedVals []KeyValuePair + arrayVal, isArrayVal := value.(*ArrayValue) + if isArrayVal { + for _, item := range arrayVal.Items { + derivedVals = append(derivedVals, KeyValuePair{ + Key: key + "/" + item, + Value: NewStringValue(item), + }) + } + } + return derivedVals +} diff --git a/plugins/kvscheduler/internal/utils/conversions.go b/plugins/kvscheduler/internal/utils/conversions.go new file mode 100644 index 0000000000..7fadea00c5 --- /dev/null +++ b/plugins/kvscheduler/internal/utils/conversions.go @@ -0,0 +1,43 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "github.com/gogo/protobuf/proto" + prototypes "github.com/gogo/protobuf/types" +) + +// ProtoToString converts proto message to string. +func ProtoToString(msg proto.Message) string { + if msg == nil { + return "" + } + if recProto, wrapped := msg.(*RecordedProtoMessage); wrapped { + msg = recProto.Message + } + if _, isEmpty := msg.(*prototypes.Empty); isEmpty { + return "" + } + // wrap with curly braces, it is easier to read + return "{ " + msg.String() + " }" +} + +// ErrorToString converts error to string. +func ErrorToString(err error) string { + if err == nil { + return "" + } + return err.Error() +} diff --git a/plugins/kvscheduler/internal/utils/dependencies.go b/plugins/kvscheduler/internal/utils/dependencies.go new file mode 100644 index 0000000000..9dc1a6c733 --- /dev/null +++ b/plugins/kvscheduler/internal/utils/dependencies.go @@ -0,0 +1,118 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sort" +) + +// DependsOn returns true if k1 depends on k2 based on dependencies from . +func DependsOn(k1, k2 string, deps map[string]KeySet, visited KeySet) bool { + // check direct dependencies + k1Deps := deps[k1] + if depends := k1Deps.Has(k2); depends { + return true + } + + // continue transitively + visited.Add(k1) + for _, dep := range k1Deps.Iterate() { + if wasVisited := visited.Has(dep); wasVisited { + continue + } + if DependsOn(dep, k2, deps, visited) { + return true + } + } + return false +} + +// TopologicalOrder orders keys topologically by Kahn's algorithm to respect +// the given dependencies. +// deps = map{ key -> } +func TopologicalOrder(keys KeySet, deps map[string]KeySet, depFirst bool, handleCycle bool) (sorted []string) { + // copy input arguments so that they are not returned to the caller changed + remains := keys.CopyOnWrite() + remainsDeps := make(map[string]KeySet) + for key, keyDeps := range deps { + if !keys.Has(key) { + continue + } + remainsDeps[key] = keyDeps.CopyOnWrite() + remainsDeps[key].Intersect(keys) + } + + // Kahn's algorithm (except for the cycle handling part): + for remains.Length() > 0 { + // find candidate keys - keys that could follow in the order + var candidates []string + for _, key := range remains.Iterate() { + // if depFirst, select keys that do not depend on anything in the remaining set + candidate := depFirst && remainsDeps[key].Length() == 0 + if !depFirst { + candidate = true + // is there any other key depending on this one? + for _, key2Deps := range remainsDeps { + if key2Deps.Has(key) { + candidate = false + break + } + } + } + if candidate { + candidates = append(candidates, key) + } + } + + // handle cycles + var cycle bool + if len(candidates) == 0 { + cycle = true + if !handleCycle { + panic("Dependency cycle!") + } + // select keys that depend on themselves + for _, key := range remains.Iterate() { + if DependsOn(key, key, deps, NewMapBasedKeySet()) { + candidates = append(candidates, key) + } + } + } + + // to make the algorithm deterministic (for simplified testing), + // order the candidates + sort.Strings(candidates) + + // in case of cycle output all the keys from the cycle, otherwise just the first candidate + var selected []string + if cycle { + selected = candidates + } else { + selected = append(selected, candidates[0]) + } + sorted = append(sorted, selected...) + + // remove selected key(s) from the set of remaining keys + for _, key := range selected { + remains.Del(key) + delete(remainsDeps, key) + // remove dependency edges going to this key + for _, key2Deps := range remainsDeps { + key2Deps.Del(key) + } + } + } + return sorted +} diff --git a/plugins/kvscheduler/internal/utils/keyset.go b/plugins/kvscheduler/internal/utils/keyset.go new file mode 100644 index 0000000000..913fe4c441 --- /dev/null +++ b/plugins/kvscheduler/internal/utils/keyset.go @@ -0,0 +1,525 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sort" + "strings" + "encoding/json" +) + +// KeySet defines API for a set of keys. +type KeySet interface { + // String return human-readable string representation of the key-set. + String() string + + // Iterate exposes the set of keys as slice which can be iterated through. + // The returned slice should not be modified. + Iterate() []string + + // Length returns the number of keys in the set. + Length() int + + // Has returns true if the given key is in the set. + Has(key string) bool + + // Add adds key into the set. + Add(key string) (changed bool) + + // Del removes key from the set. + Del(key string) (changed bool) + + // Subtract removes keys from this set which are also in . + Subtract(ks2 KeySet) (changed bool) + + // Intersect removes keys from this set which are not in . + Intersect(ks2 KeySet) (changed bool) + + // CopyOnWrite returns first a shallow copy of the key set, which gets + // deep-copied when it is about to get modified. + CopyOnWrite() KeySet +} + +/****************************** Singleton KeySet ******************************/ + +// singletonKeySet is the KeySet implementation for set which is guaranteed to +// contain at most one key. +type singletonKeySet struct { + set [1]string // empty string = empty set +} + +// NewSingletonKeySet returns KeySet implementation for at most one key. +func NewSingletonKeySet(key string) KeySet { + s := &singletonKeySet{} + s.set[0] = key + return s +} + +// String return human-readable string representation of the key-set. +func (s *singletonKeySet) String() string { + if s == nil { + return "{}" + } + return "{" + s.set[0] + "}" +} + +// Iterate exposes the set of keys as slice which can be iterated through. +// The returned slice should not be modified. +func (s *singletonKeySet) Iterate() []string { + if s == nil { + return nil + } + return s.set[:s.Length()] +} + +// Length returns the number of keys in the set. +func (s *singletonKeySet) Length() int { + if s == nil { + return 0 + } + if s.set[0] == "" { + return 0 + } + return 1 +} + +// Has returns true if the given key is in the set. +func (s *singletonKeySet) Has(key string) bool { + if s == nil { + return false + } + if s.set[0] == key { + return true + } + return false +} + +// Add adds key into the set. +func (s *singletonKeySet) Add(key string) (changed bool) { + if s.set[0] == key { + return false + } + s.set[0] = key + return true +} + +// Del removes key from the set. +func (s *singletonKeySet) Del(key string) (changed bool) { + if s.set[0] == key { + s.set[0] = "" + return true + } + return false +} + +// Subtract removes keys from this set which are also in . +func (s *singletonKeySet) Subtract(ks2 KeySet) (changed bool) { + if s.set[0] == "" { + return false + } + if ks2.Has(s.set[0]) { + s.set[0] = "" + return true + } + return false +} + +// Intersect removes keys from this set which are not in . +func (s *singletonKeySet) Intersect(ks2 KeySet) (changed bool) { + if s.set[0] == "" { + return false + } + if !ks2.Has(s.set[0]) { + s.set[0] = "" + return true + } + return false +} + +// CopyOnWrite actually returns a deep copy, but that is super cheap for singleton. +func (s *singletonKeySet) CopyOnWrite() KeySet { + return &singletonKeySet{set: s.set} +} + +// MarshalJSON marshalls the set into JSON. +func (s *singletonKeySet) MarshalJSON() ([]byte, error) { + if s.set[0] == "" { + return []byte("[]"), nil + } + return []byte("[\"" + s.set[0] + "\"]"), nil +} + +/***************************** KeySet based on map *****************************/ + +// mapKeySet implements KeySet using a map. +// Quicker lookups in average than the slice-based implementation, but bigger +// memory footprint and much slower copying. +type mapKeySet struct { + shallowCopy bool + set mapWithKeys + iter []string + iterInSync bool +} + +// mapWithKeys is used to represent a set of keys using a map with empty values. +type mapWithKeys map[string]struct{} + +// NewMapBasedKeySet returns KeySet implemented using map. +func NewMapBasedKeySet(keys ...string) KeySet { + s := &mapKeySet{set: make(mapWithKeys), iter: []string{}, iterInSync: true} + for _, key := range keys { + s.Add(key) + } + return s +} + +// String return human-readable string representation of the key-set. +func (s *mapKeySet) String() string { + return s.string(false) +} + +// string return human-readable string representation of the key-set. +func (s *mapKeySet) string(json bool) string { + if s == nil { + if json { + return "[]" + } + return "{}" + } + str := "{" + if json { + str = "[" + } + idx := 0 + for key := range s.set { + if json { + str += "\"" + key + "\"" + } else { + str += key + } + if idx < len(s.set)-1 { + str += ", " + } + idx++ + } + if json { + str += "]" + } else { + str += "}" + } + return str +} + +// Iterate exposes the set of keys as slice which can be iterated through. +// The returned slice should not be modified. +func (s *mapKeySet) Iterate() (keys []string) { + if s == nil { + return keys + } + if s.iterInSync { + return s.iter + } + s.iter = make([]string, len(s.set)) + i := 0 + for key := range s.set { + s.iter[i] = key + i++ + } + s.iterInSync = true + return s.iter +} + +// Length returns the number of keys in the set. +func (s *mapKeySet) Length() int { + if s == nil { + return 0 + } + return len(s.set) +} + +// Has returns true if the given key is in the set. +func (s *mapKeySet) Has(key string) bool { + if s == nil { + return false + } + _, has := s.set[key] + return has +} + +// Add adds key into the set. +func (s *mapKeySet) Add(key string) (changed bool) { + if !s.Has(key) { + if s.shallowCopy { + s.set = s.deepCopyMap() + s.shallowCopy = false + } + s.set[key] = struct{}{} + if s.iterInSync { + s.iter = append(s.iter, key) + } + changed = true + } + return +} + +// Del removes key from the set. +func (s *mapKeySet) Del(key string) (changed bool) { + if s.Has(key) { + if s.shallowCopy { + s.set = s.deepCopyMap() + s.shallowCopy = false + } + delete(s.set, key) + s.iterInSync = false + changed = true + } + return +} + +// Subtract removes keys from this set which are also in . +func (s *mapKeySet) Subtract(ks2 KeySet) (changed bool) { + for _, key := range ks2.Iterate() { + if s.Del(key) { + changed = true + } + } + return +} + +// Intersect removes keys from this set which are not in . +func (s *mapKeySet) Intersect(ks2 KeySet) (changed bool) { + for key := range s.set { + if !ks2.Has(key) { + s.Del(key) + changed = true + } + } + return +} + +// CopyOnWrite returns first a shallow copy of this key set, which gets deep-copied +// when it is about to get modified. +func (s *mapKeySet) CopyOnWrite() KeySet { + return &mapKeySet{ + shallowCopy: true, + set: s.set, + } +} + +// deepCopyMap returns a deep-copy of the internal map representing the key set. +func (s *mapKeySet) deepCopyMap() mapWithKeys { + copy := make(mapWithKeys) + for key := range s.set { + copy[key] = struct{}{} + } + return copy +} + +// MarshalJSON marshalls the set into JSON. +func (s *mapKeySet) MarshalJSON() ([]byte, error) { + return []byte(s.string(true)), nil +} + +/**************************** KeySet based on slice ****************************/ + +// sliceKeySet implements KeySet using a slice with ordered keys. +// The main advantage over the map-based implementation, is much smaller +// memory footprint and quick (deep-)copying. +type sliceKeySet struct { + shallowCopy bool + set []string + length int // len(set) can be > than length - the rest are empty strings +} + +// NewSliceBasedKeySet returns KeySet implemented using a slice with ordered keys. +func NewSliceBasedKeySet(keys ...string) KeySet { + s := &sliceKeySet{set: []string{}} + for _, key := range keys { + s.Add(key) + } + return s +} + +// String return human-readable string representation of the key-set. +func (s *sliceKeySet) String() string { + if s == nil { + return "{}" + } + return "{" + strings.Join(s.set[:s.length], ", ") + "}" +} + +// Iterate exposes the set of keys as slice which can be iterated through. +// The returned slice should not be modified. +func (s *sliceKeySet) Iterate() (keys []string) { + if s == nil { + return keys + } + return s.set[:s.length] +} + +// Length returns the number of keys in the set. +func (s *sliceKeySet) Length() int { + if s == nil { + return 0 + } + return s.length +} + +// Has returns true if the given key is in the set. +func (s *sliceKeySet) Has(key string) bool { + if s == nil { + return false + } + _, exists := s.getKeyIndex(key) + return exists +} + +// Add adds key into the set. +func (s *sliceKeySet) Add(key string) (changed bool) { + idx, exists := s.getKeyIndex(key) + if !exists { + if s.shallowCopy { + s.set = s.deepCopySlice() + s.shallowCopy = false + } + if s.length == len(s.set) { + // increase capacity + s.set = append(s.set, "") + } + if idx < s.length { + copy(s.set[idx+1:], s.set[idx:]) + } + s.set[idx] = key + s.length++ + changed = true + } + return +} + +// Del removes key from the set. +func (s *sliceKeySet) Del(key string) (changed bool) { + idx, exists := s.getKeyIndex(key) + if exists { + if s.shallowCopy { + s.set = s.deepCopySlice() + s.shallowCopy = false + } + if idx < s.length-1 { + copy(s.set[idx:], s.set[idx+1:]) + } + s.length-- + s.set[s.length] = "" + changed = true + } + return +} + +// Subtract removes keys from this set which are also in . +func (s *sliceKeySet) Subtract(ks2 KeySet) (changed bool) { + s2, isSliceKeySet := ks2.(*sliceKeySet) + if isSliceKeySet { + // optimized case when both are slice-based + var i, j, newLen int + for ; i < s.length; i++ { + subtract := false + for ; j < s2.length; j++ { + if s.set[i] > s2.set[j] { + continue + } + if s.set[i] == s2.set[j] { + subtract = true + } else { + break + } + } + if subtract { + if s.shallowCopy { + s.set = s.deepCopySlice() + s.shallowCopy = false + } + changed = true + } + if !subtract { + if newLen != i { + s.set[newLen] = s.set[i] + } + newLen++ + } + } + if newLen != s.length { + s.length = newLen + } + return + } + for _, key := range ks2.Iterate() { + if s.Del(key) { + changed = true + } + } + return +} + +// Intersect removes keys from this set which are not in . +func (s *sliceKeySet) Intersect(ks2 KeySet) (changed bool) { + for i := 0; i < s.length; { + key := s.set[i] + if !ks2.Has(key) { + s.Del(key) + changed = true + } else { + i++ + } + } + return +} + +// CopyOnWrite returns first a shallow copy of this key set, which gets deep-copied +// when it is about to get modified. +func (s *sliceKeySet) CopyOnWrite() KeySet { + return &sliceKeySet{ + shallowCopy: true, + set: s.set, + length: s.length, + } +} + +// getKeyIndex returns index at which the given key would be stored. +func (s *sliceKeySet) getKeyIndex(key string) (idx int, exists bool) { + if s.length <= 5 { + for idx = 0; idx < s.length; idx++ { + if key <= s.set[idx] { + break + } + } + } else { + idx = sort.Search(s.length, + func(i int) bool { + return key <= s.set[i] + }) + } + return idx, idx < s.length && key == s.set[idx] +} + +// deepCopyMap returns a deep-copy of the internal slice representing the key set. +func (s *sliceKeySet) deepCopySlice() []string { + c := make([]string, s.length) + copy(c, s.set) + return c +} + +// MarshalJSON marshalls the set into JSON. +func (s *sliceKeySet) MarshalJSON() ([]byte, error) { + return json.Marshal(s.set[:s.length]) +} diff --git a/plugins/kvscheduler/internal/utils/keyset_test.go b/plugins/kvscheduler/internal/utils/keyset_test.go new file mode 100644 index 0000000000..c06a16f7b6 --- /dev/null +++ b/plugins/kvscheduler/internal/utils/keyset_test.go @@ -0,0 +1,245 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "reflect" + "strings" + "testing" + + . "github.com/onsi/gomega" +) + +func TestSingleton(t *testing.T) { + RegisterTestingT(t) + + // constructor + s := NewSingletonKeySet("key1") + Expect(s.String()).To(BeEquivalentTo("{key1}")) + Expect(s.Has("key1")).To(BeTrue()) + Expect(s.Has("key2")).To(BeFalse()) + Expect(s.String()).To(BeEquivalentTo("{key1}")) + Expect(s.Length()).To(BeEquivalentTo(1)) + Expect(s.Iterate()).To(BeEquivalentTo([]string{"key1"})) + + // delete + Expect(s.Del("key2")).To(BeFalse()) + Expect(s.Del("key1")).To(BeTrue()) + Expect(s.Has("key1")).To(BeFalse()) + Expect(s.String()).To(BeEquivalentTo("{}")) + Expect(s.Length()).To(BeEquivalentTo(0)) + Expect(s.Iterate()).To(BeEquivalentTo([]string{})) + + // add + Expect(s.Add("key1")).To(BeTrue()) + Expect(s.Add("key1")).To(BeFalse()) + Expect(s.Add("key2")).To(BeTrue()) + Expect(s.Has("key1")).To(BeFalse()) + Expect(s.Has("key2")).To(BeTrue()) + Expect(s.String()).To(BeEquivalentTo("{key2}")) + Expect(s.Length()).To(BeEquivalentTo(1)) + Expect(s.Iterate()).To(BeEquivalentTo([]string{"key2"})) + + // copy-on-write + s2 := s.CopyOnWrite() + Expect(s2.Has("key1")).To(BeFalse()) + Expect(s2.Has("key2")).To(BeTrue()) + Expect(s2.String()).To(BeEquivalentTo("{key2}")) + Expect(s2.Length()).To(BeEquivalentTo(1)) + Expect(s2.Iterate()).To(BeEquivalentTo([]string{"key2"})) + Expect(s2.Del("key2")).To(BeTrue()) + Expect(s2.String()).To(BeEquivalentTo("{}")) + Expect(s.String()).To(BeEquivalentTo("{key2}")) + Expect(s.Length()).To(BeEquivalentTo(1)) + Expect(s2.Length()).To(BeEquivalentTo(0)) + + // subtract + Expect(s.Subtract(s2)).To(BeFalse()) + s2.Add("key2") + Expect(s.Subtract(s2)).To(BeTrue()) + Expect(s.String()).To(BeEquivalentTo("{}")) + s.Add("key1") + Expect(s.Subtract(s2)).To(BeFalse()) + + // intersect + Expect(s.Intersect(s2)).To(BeTrue()) + Expect(s.String()).To(BeEquivalentTo("{}")) + s.Add("key2") + Expect(s.Intersect(s2)).To(BeFalse()) + Expect(s.String()).To(BeEquivalentTo("{key2}")) +} + +func permutations(arr []string) [][]string { + var helper func([]string, int) + res := [][]string{} + + helper = func(arr []string, n int) { + if n == 1 { + tmp := make([]string, len(arr)) + copy(tmp, arr) + res = append(res, tmp) + } else { + for i := 0; i < n; i++ { + helper(arr, n-1) + if n%2 == 1 { + tmp := arr[i] + arr[i] = arr[n-1] + arr[n-1] = tmp + } else { + tmp := arr[0] + arr[0] = arr[n-1] + arr[n-1] = tmp + } + } + } + } + helper(arr, len(arr)) + return res +} + +func testKeySetToString(s KeySet, keys ...string) { + str := s.String() + validStr := false + for _, permutation := range permutations(keys) { + permStr := "{" + strings.Join(permutation, ", ") + "}" + if permStr == str { + validStr = true + break + } + } + Expect(validStr).To(BeTrue()) +} + +func testKeySetIterator(s KeySet, keys ...string) { + iter := s.Iterate() + validIter := false + for _, permutation := range permutations(keys) { + if reflect.DeepEqual(permutation, iter) { + validIter = true + break + } + } + Expect(validIter).To(BeTrue()) +} + +func testKeySet(factory1, factory2 func(keys ...string) KeySet) { + // constructor + s1 := factory1() + Expect(s1.Has("key1")).To(BeFalse()) + Expect(s1.Has("key2")).To(BeFalse()) + Expect(s1.String()).To(BeEquivalentTo("{}")) + Expect(s1.Length()).To(BeEquivalentTo(0)) + Expect(s1.Iterate()).To(BeEquivalentTo([]string{})) + s1 = factory1("key1", "key2", "key3") + Expect(s1.Has("key1")).To(BeTrue()) + Expect(s1.Has("key2")).To(BeTrue()) + testKeySetToString(s1, "key1", "key2", "key3") + Expect(s1.Length()).To(BeEquivalentTo(3)) + testKeySetIterator(s1, "key1", "key2", "key3") + + // delete + Expect(s1.Del("key4")).To(BeFalse()) + Expect(s1.Del("key2")).To(BeTrue()) + Expect(s1.Has("key2")).To(BeFalse()) + Expect(s1.Has("key1")).To(BeTrue()) + Expect(s1.Has("key3")).To(BeTrue()) + Expect(s1.Length()).To(BeEquivalentTo(2)) + testKeySetToString(s1, "key1", "key3") + testKeySetIterator(s1, "key1", "key3") + Expect(s1.Del("key1")).To(BeTrue()) + Expect(s1.Del("key3")).To(BeTrue()) + Expect(s1.Del("key3")).To(BeFalse()) + Expect(s1.String()).To(BeEquivalentTo("{}")) + Expect(s1.Length()).To(BeEquivalentTo(0)) + Expect(s1.Iterate()).To(BeEquivalentTo([]string{})) + + // add + Expect(s1.Add("key2")).To(BeTrue()) + Expect(s1.Add("key2")).To(BeFalse()) + Expect(s1.Add("key1")).To(BeTrue()) + Expect(s1.Add("key3")).To(BeTrue()) + Expect(s1.Has("key1")).To(BeTrue()) + Expect(s1.Has("key2")).To(BeTrue()) + Expect(s1.Has("key3")).To(BeTrue()) + Expect(s1.Has("key4")).To(BeFalse()) + Expect(s1.Length()).To(BeEquivalentTo(3)) + testKeySetToString(s1, "key1", "key2", "key3") + testKeySetIterator(s1, "key1", "key2", "key3") + + // copy-on-write + s2 := s1.CopyOnWrite() + Expect(s2.Has("key1")).To(BeTrue()) + Expect(s2.Has("key2")).To(BeTrue()) + Expect(s2.Has("key3")).To(BeTrue()) + Expect(s2.Has("key4")).To(BeFalse()) + Expect(s2.Length()).To(BeEquivalentTo(3)) + testKeySetToString(s2, "key1", "key2", "key3") + testKeySetIterator(s2, "key1", "key2", "key3") + Expect(s2.Add("key4")).To(BeTrue()) + Expect(s2.Has("key4")).To(BeTrue()) + Expect(s1.Has("key4")).To(BeFalse()) + Expect(s2.Del("key1")).To(BeTrue()) + Expect(s2.Has("key1")).To(BeFalse()) + Expect(s1.Has("key1")).To(BeTrue()) + Expect(s2.Length()).To(BeEquivalentTo(3)) + testKeySetToString(s2, "key2", "key3", "key4") + testKeySetIterator(s2, "key2", "key3", "key4") + Expect(s1.Length()).To(BeEquivalentTo(3)) + testKeySetToString(s1, "key1", "key2", "key3") + testKeySetIterator(s1, "key1", "key2", "key3") + + // subtract + s3 := factory2("key1", "key3") + Expect(s1.Subtract(s3)).To(BeTrue()) + Expect(s1.Length()).To(BeEquivalentTo(1)) + testKeySetToString(s1, "key2") + testKeySetIterator(s1, "key2") + Expect(s1.Subtract(s3)).To(BeFalse()) + Expect(s1.Length()).To(BeEquivalentTo(1)) + testKeySetToString(s1, "key2") + testKeySetIterator(s1, "key2") + Expect(s2.Subtract(s3)).To(BeTrue()) + Expect(s2.Length()).To(BeEquivalentTo(2)) + testKeySetToString(s2, "key2", "key4") + testKeySetIterator(s2, "key2", "key4") + Expect(s2.Subtract(s3)).To(BeFalse()) + Expect(s2.Length()).To(BeEquivalentTo(2)) + testKeySetToString(s2, "key2", "key4") + testKeySetIterator(s2, "key2", "key4") + + // intersect + Expect(s1.Intersect(s2)).To(BeFalse()) + Expect(s1.Length()).To(BeEquivalentTo(1)) + testKeySetToString(s1, "key2") + testKeySetIterator(s1, "key2") + Expect(s2.Intersect(s1)).To(BeTrue()) + Expect(s2.Length()).To(BeEquivalentTo(1)) + testKeySetToString(s2, "key2") + testKeySetIterator(s2, "key2") +} + +func TestMapBasedKeySet(t *testing.T) { + RegisterTestingT(t) + + testKeySet(NewMapBasedKeySet, NewMapBasedKeySet) + testKeySet(NewMapBasedKeySet, NewSliceBasedKeySet) +} + +func TestSliceBasedKeySet(t *testing.T) { + RegisterTestingT(t) + + testKeySet(NewSliceBasedKeySet, NewSliceBasedKeySet) + testKeySet(NewSliceBasedKeySet, NewMapBasedKeySet) +} diff --git a/plugins/kvscheduler/internal/utils/record.go b/plugins/kvscheduler/internal/utils/record.go new file mode 100644 index 0000000000..1df18b5277 --- /dev/null +++ b/plugins/kvscheduler/internal/utils/record.go @@ -0,0 +1,49 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" +) + +// RecordedProtoMessage is a proto.Message suitable for recording and access via +// REST API. +type RecordedProtoMessage struct { + proto.Message +} + +// MarshalJSON marshalls proto message using the marshaller from jsonpb. +// The jsonpb package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffers. +func (p *RecordedProtoMessage) MarshalJSON() ([]byte, error) { + marshaller := &jsonpb.Marshaler{} + str, err := marshaller.MarshalToString(p.Message) + if err != nil { + return nil, err + } + return []byte(str), nil +} + +// RecordProtoMessage prepares proto message for recording and potential +// access via REST API. +// Note: no need to clone the message - once un-marshalled, the content is never +// changed (otherwise it would break prev-new value comparisons). +func RecordProtoMessage(msg proto.Message) proto.Message { + if msg == nil { + return nil + } + return &RecordedProtoMessage{Message: msg} +} diff --git a/plugins/kvscheduler/node_utils.go b/plugins/kvscheduler/node_utils.go new file mode 100644 index 0000000000..8f86daa71a --- /dev/null +++ b/plugins/kvscheduler/node_utils.go @@ -0,0 +1,345 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "github.com/gogo/protobuf/proto" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +func nodesToKVPairsWithMetadata(nodes []graph.Node) (kvPairs []kvs.KVWithMetadata) { + for _, node := range nodes { + kvPairs = append(kvPairs, kvs.KVWithMetadata{ + Key: node.GetKey(), + Value: node.GetValue(), + Metadata: node.GetMetadata(), + Origin: getNodeOrigin(node), + }) + } + return kvPairs +} + +// constructTargets builds targets for the graph based on derived values and dependencies. +func constructTargets(deps []kvs.Dependency, derives []kvs.KeyValuePair) (targets []graph.RelationTargetDef) { + for _, dep := range deps { + target := graph.RelationTargetDef{ + Relation: DependencyRelation, + Label: dep.Label, + Key: dep.Key, + Selector: dep.AnyOf, + } + targets = append(targets, target) + } + + for _, derived := range derives { + target := graph.RelationTargetDef{ + Relation: DerivesRelation, + Label: derived.Key, + Key: derived.Key, + Selector: nil, + } + targets = append(targets, target) + } + + return targets +} + +// equalValueDetails compares value state details for equality. +func equalValueDetails(details1, details2 []string) bool { + if len(details1) != len(details2) { + return false + } + for _, d1 := range details1 { + found := false + for _, d2 := range details2 { + if d1 == d2 { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// getValueDetails returns further details about the value state. +func getValueDetails(node graph.Node) (details []string) { + state := getNodeState(node) + _, err := getNodeError(node) + if state == kvs.ValueState_INVALID { + if ivErr, isIVErr := err.(*kvs.InvalidValueError); isIVErr { + details = ivErr.GetInvalidFields() + return + } + } + if state == kvs.ValueState_PENDING { + for _, targets := range node.GetTargets(DependencyRelation) { + satisfied := false + for _, target := range targets.Nodes { + if isNodeAvailable(target) { + satisfied = true + } + } + if !satisfied { + details = append(details, targets.Label) + } + } + } + return details +} + +// getValueStatus reads the value status from the corresponding node. +func getValueStatus(node graph.Node, key string) *kvs.BaseValueStatus { + status := &kvs.BaseValueStatus{ + Value: &kvs.ValueStatus{ + Key: key, + }, + } + + status.Value.State = getNodeState(node) + if status.Value.State == kvs.ValueState_NONEXISTENT { + // nothing else to get for non-existent value + return status + } + _, err := getNodeError(node) + if err != nil { + status.Value.Error = err.Error() + } + status.Value.LastOperation = getNodeLastOperation(node) + status.Value.State = getNodeState(node) + status.Value.Details = getValueDetails(node) + + // derived nodes + if !isNodeDerived(node) { + for _, derivedNode := range getDerivedNodes(node) { + derValStatus := getValueStatus(derivedNode, derivedNode.GetKey()) + status.DerivedValues = append(status.DerivedValues, derValStatus.Value) + } + } + + return status +} + +// functions returns selectors selecting non-derived NB values. +func nbBaseValsSelectors() []graph.FlagSelector { + return []graph.FlagSelector{ + graph.WithoutFlags(&DerivedFlag{}), + graph.WithoutFlags(&ValueStateFlag{kvs.ValueState_OBTAINED}), + } +} + +// functions returns selectors selecting non-derived SB values. +func sbBaseValsSelectors() []graph.FlagSelector { + return []graph.FlagSelector{ + graph.WithoutFlags(&DerivedFlag{}), + graph.WithFlags(&ValueStateFlag{kvs.ValueState_OBTAINED}), + } +} + +// function returns selectors selecting values to be used for correlation for +// the Dump operation of the given descriptor. +func correlateValsSelectors(descriptor string) []graph.FlagSelector { + return []graph.FlagSelector{ + graph.WithFlags(&DescriptorFlag{descriptor}), + graph.WithoutFlags(&UnavailValueFlag{}, &DerivedFlag{}), + } +} + +// getNodeState returns state stored in the ValueState flag. +func getNodeState(node graph.Node) kvs.ValueState { + if node != nil { + flag := node.GetFlag(ValueStateFlagName) + if flag != nil { + return flag.(*ValueStateFlag).valueState + } + } + return kvs.ValueState_NONEXISTENT +} + +func valueStateToOrigin(state kvs.ValueState) kvs.ValueOrigin { + switch state { + case kvs.ValueState_NONEXISTENT: + return kvs.UnknownOrigin + case kvs.ValueState_OBTAINED: + return kvs.FromSB + } + return kvs.FromNB +} + +// getNodeOrigin returns node origin based on the value state. +func getNodeOrigin(node graph.Node) kvs.ValueOrigin { + state := getNodeState(node) + return valueStateToOrigin(state) +} + +// getNodeError returns node error stored in Error flag. +func getNodeError(node graph.Node) (retriable bool, err error) { + if node != nil { + errorFlag := node.GetFlag(ErrorFlagName) + if errorFlag != nil { + flag := errorFlag.(*ErrorFlag) + return flag.retriable, flag.err + } + } + return false, nil +} + +// getNodeErrorString returns node error stored in Error flag as string. +func getNodeErrorString(node graph.Node) string { + _, err := getNodeError(node) + if err == nil { + return "" + } + return err.Error() +} + +// getNodeLastUpdate returns info about the last update for a given node, stored in LastUpdate flag. +func getNodeLastUpdate(node graph.Node) *LastUpdateFlag { + if node == nil { + return nil + } + flag := node.GetFlag(LastUpdateFlagName) + if flag == nil { + return nil + } + return flag.(*LastUpdateFlag) +} + +// getNodeLastAppliedValue return the last applied value for the given node +func getNodeLastAppliedValue(node graph.Node) proto.Message { + lastUpdate := getNodeLastUpdate(node) + if lastUpdate == nil { + return nil + } + return lastUpdate.value +} + +// getNodeLastOperation returns last operation executed over the given node. +func getNodeLastOperation(node graph.Node) kvs.TxnOperation { + if node != nil && getNodeState(node) != kvs.ValueState_OBTAINED { + lastUpdate := getNodeLastUpdate(node) + if lastUpdate != nil { + return lastUpdate.txnOp + } + } + return kvs.TxnOperation_UNDEFINED +} + +// getNodeDescriptor returns name of the descriptor associated with the given node. +// Empty for properties and unimplemented values. +func getNodeDescriptor(node graph.Node) string { + if node == nil { + return "" + } + flag := node.GetFlag(DescriptorFlagName) + if flag == nil { + return "" + } + return flag.(*DescriptorFlag).descriptorName +} + +func isNodeDerived(node graph.Node) bool { + return node.GetFlag(DerivedFlagName) != nil +} + +func getNodeBaseKey(node graph.Node) string { + flag := node.GetFlag(DerivedFlagName) + if flag == nil { + return node.GetKey() + } + return flag.(*DerivedFlag).baseKey +} + +// isNodePending checks whether the node is available for dependency resolution. +func isNodeAvailable(node graph.Node) bool { + if node == nil { + return false + } + return node.GetFlag(UnavailValueFlagName) == nil +} + +// isNodeReady return true if the given node has all dependencies satisfied. +// Recursive calls are needed to handle circular dependencies - nodes of a strongly +// connected component are treated as if they were squashed into one. +func isNodeReady(node graph.Node) bool { + if getNodeOrigin(node) == kvs.FromSB { + // for SB values dependencies are not checked + return true + } + ready, _ := isNodeReadyRec(node, 0, make(map[string]int)) + return ready +} + +// isNodeReadyRec is a recursive call from within isNodeReady. +// visited = map{ key -> depth } +func isNodeReadyRec(node graph.Node, depth int, visited map[string]int) (ready bool, cycleDepth int) { + if targetDepth, wasVisited := visited[node.GetKey()]; wasVisited { + return true, targetDepth + } + cycleDepth = depth + visited[node.GetKey()] = depth + defer delete(visited, node.GetKey()) + + for _, targets := range node.GetTargets(DependencyRelation) { + satisfied := false + for _, target := range targets.Nodes { + if getNodeState(target) == kvs.ValueState_REMOVED { + // do not consider values that are (being) removed + continue + } + if isNodeAvailable(target) { + satisfied = true + } + + // test if node is inside a strongly-connected component (treated as one node) + targetReady, targetCycleDepth := isNodeReadyRec(target, depth+1, visited) + if targetReady && targetCycleDepth <= depth { + // this node is reachable from the target + satisfied = true + if targetCycleDepth < cycleDepth { + // update how far back in the branch this node can reach following dependencies + cycleDepth = targetCycleDepth + } + } + } + if !satisfied { + return false, cycleDepth + } + } + return true, cycleDepth +} + +func canNodeHaveMetadata(node graph.Node) bool { + return !isNodeDerived(node) +} + +func getDerivedNodes(node graph.Node) (derived []graph.Node) { + for _, derivedNodes := range node.GetTargets(DerivesRelation) { + derived = append(derived, derivedNodes.Nodes...) + } + return derived +} + +func getDerivedKeys(node graph.Node) utils.KeySet { + set := utils.NewSliceBasedKeySet() + for _, derived := range getDerivedNodes(node) { + set.Add(derived.GetKey()) + } + return set +} diff --git a/plugins/kvscheduler/notification_test.go b/plugins/kvscheduler/notification_test.go new file mode 100644 index 0000000000..9fbd40c790 --- /dev/null +++ b/plugins/kvscheduler/notification_test.go @@ -0,0 +1,1158 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +/* TODO: fix and re-enable UTs +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + . "github.com/onsi/gomega" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +func TestNotifications(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> descriptor1 (notifications): + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: func(key string) bool { + if !strings.HasPrefix(key, prefixA) { + return false + } + if strings.Contains(strings.TrimPrefix(key, prefixA), "/") { + return false // exclude derived values + } + return true + }, + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0, test.WithoutDump) + // -> descriptor2: + descriptor2 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor2Name, + NBKeyPrefix: prefixB, + KeySelector: prefixSelector(prefixB), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixB+baseValue2 { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixB+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item2" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + DumpDependencies: []string{descriptor1Name}, + }, mockSB, 0) + + // register both descriptors with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + scheduler.RegisterKVDescriptor(descriptor2) + + // get metadata map created for each descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger1, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor2.Name) + nameToInteger2, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction against empty SB + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixB+baseValue2, test.NewLazyArrayValue("item1", "item2")) + seqNum, err := schedulerTxn.Commit(WithResync(context.Background(), FullResync, true)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(BeEmpty()) + + // check metadata + Expect(metadataMap.ListAllNames()).To(BeEmpty()) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(1)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixB + baseValue2, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixB + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixB + baseValue2, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(0)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(1)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(1)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(1)) + Expect(descriptorStats.PerValueCount).ToNot(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(1)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(1)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(1)) + graphR.Release() + + // send notification + startTime = time.Now() + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue("item1"), &test.OnlyInteger{Integer: 10}, FromSB, false) + notifError := scheduler.PushSBNotification(prefixA+baseValue1, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 10}) + Expect(notifError).ShouldNot(HaveOccurred()) + + // wait until the notification is processed + Eventually(func() []*KVWithMetadata { + return mockSB.GetValues(nil) + }, 2*time.Second).Should(HaveLen(3)) + stopTime = time.Now() + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(10)) + Expect(value.Origin).To(BeEquivalentTo(FromSB)) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is pending + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).To(BeNil()) + + // check metadata + metadata, exists := nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(10)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(2)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(SBNotification)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromSB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(2)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(3)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(6)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(3)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(5)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(1)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(4)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(6)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(4)) + Expect(originStats.PerValueCount).To(HaveKey(FromSB.String())) + Expect(originStats.PerValueCount[FromSB.String()]).To(BeEquivalentTo(2)) + graphR.Release() + + // send 2nd notification + startTime = time.Now() + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue("item1", "item2"), &test.OnlyInteger{Integer: 11}, FromSB, false) + notifError = scheduler.PushSBNotification(prefixA+baseValue1, test.NewArrayValue("item1", "item2"), + &test.OnlyInteger{Integer: 11}) + Expect(notifError).ShouldNot(HaveOccurred()) + + // wait until the notification is processed + Eventually(func() []*KVWithMetadata { + return mockSB.GetValues(nil) + }, 2*time.Second).Should(HaveLen(4)) + stopTime = time.Now() + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value = mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(11)) + Expect(value.Origin).To(BeEquivalentTo(FromSB)) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + + // check metadata + metadata, exists = nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(11)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(2)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockUpdate)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(2)) + Expect(txn.TxnType).To(BeEquivalentTo(SBNotification)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromSB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Update, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(2)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(6)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(11)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(5)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(8)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(2)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(6)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(11)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(6)) + Expect(originStats.PerValueCount).To(HaveKey(FromSB.String())) + Expect(originStats.PerValueCount[FromSB.String()]).To(BeEquivalentTo(5)) + graphR.Release() + + // send 3rd notification + startTime = time.Now() + mockSB.SetValue(prefixA+baseValue1, nil, nil, FromSB, false) + notifError = scheduler.PushSBNotification(prefixA+baseValue1, nil, nil) + Expect(notifError).ShouldNot(HaveOccurred()) + + // wait until the notification is processed + Eventually(func() []*KVWithMetadata { + return mockSB.GetValues(nil) + }, 2*time.Second).Should(HaveLen(0)) + stopTime = time.Now() + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(BeEmpty()) + + // check metadata + Expect(metadataMap.ListAllNames()).To(BeEmpty()) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(3)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory = scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(3)) + Expect(txn.TxnType).To(BeEquivalentTo(SBNotification)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(nil), Origin: FromSB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Delete, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Delete, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Delete, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestNotificationsWithRetry(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> descriptor1 (notifications): + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0, test.WithoutDump) + // -> descriptor2: + descriptor2 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor2Name, + NBKeyPrefix: prefixB, + KeySelector: prefixSelector(prefixB), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixB+baseValue2 { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixB+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item2" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 0) + // -> descriptor3: + descriptor3 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor3Name, + NBKeyPrefix: prefixC, + KeySelector: prefixSelector(prefixC), + ValueTypeName: proto.MessageName(test.NewStringValue("")), + ValueComparator: test.StringValueComparator, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixC+baseValue3 { + return []Dependency{ + {Label: prefixA, AnyOf: prefixSelector(prefixA)}, + } + } + return nil + }, + WithMetadata: true, + DumpDependencies: []string{descriptor2Name}, + }, mockSB, 0) + + // -> planned errors + mockSB.PlanError(prefixB+baseValue2+"/item2", errors.New("failed to add derived value"), + func() { + mockSB.SetValue(prefixB+baseValue2, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + }) + mockSB.PlanError(prefixC+baseValue3, errors.New("failed to add value"), + func() { + mockSB.SetValue(prefixC+baseValue3, nil, nil, FromNB, false) + }) + + // subscribe to receive notifications about errors + errorChan := make(chan KeyWithError, 5) + scheduler.SubscribeForErrors(errorChan, nil) + + // register all 3 descriptors with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + scheduler.RegisterKVDescriptor(descriptor2) + scheduler.RegisterKVDescriptor(descriptor3) + + // get metadata map created for each descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger1, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor2.Name) + nameToInteger2, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor3.Name) + nameToInteger3, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run 1st data-change transaction with retry against empty SB + schedulerTxn1 := scheduler.StartNBTransaction() + schedulerTxn1.SetValue(prefixB+baseValue2, test.NewLazyArrayValue("item1", "item2")) + seqNum, err := schedulerTxn1.Commit(WithRetry(context.Background(), 3*time.Second, true)) + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // run 2nd data-change transaction with retry + schedulerTxn2 := scheduler.StartNBTransaction() + schedulerTxn2.SetValue(prefixC+baseValue3, test.NewLazyStringValue("base-value3-data")) + seqNum, err = schedulerTxn2.Commit(WithRetry(context.Background(), 6*time.Second, true)) + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB - empty since dependencies are not met + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(BeEmpty()) + Expect(mockSB.PopHistoryOfOps()).To(HaveLen(0)) + + // check metadata + Expect(metadataMap.ListAllNames()).To(BeEmpty()) + + // send notification + startTime := time.Now() + notifError := scheduler.PushSBNotification(prefixA+baseValue1, test.NewArrayValue("item1", "item2"), + &test.OnlyInteger{Integer: 10}) + Expect(notifError).ShouldNot(HaveOccurred()) + + // wait until the notification is processed + Eventually(func() []*KVWithMetadata { + return mockSB.GetValues(nil) + }, 2*time.Second).Should(HaveLen(2)) + stopTime := time.Now() + + // receive the error notifications + var errorNotif KeyWithError + Eventually(errorChan, time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixC + baseValue3)) + Expect(errorNotif.TxnOperation).To(Equal(Add)) + Expect(errorNotif.Error).ToNot(BeNil()) + Expect(errorNotif.Error.Error()).To(BeEquivalentTo("failed to add value")) + Eventually(errorChan, time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixB + baseValue2 + "/item2")) + Expect(errorNotif.TxnOperation).To(Equal(Add)) + Expect(errorNotif.Error).ToNot(BeNil()) + Expect(errorNotif.Error.Error()).To(BeEquivalentTo("failed to add derived value")) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 2 + value := mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 failed to get created + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).To(BeNil()) + // -> base value 3 failed to get created + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).To(BeNil()) + Expect(mockSB.GetValues(nil)).To(HaveLen(2)) + + // check failed (base) values + failedVals := scheduler.GetFailedValues(nil) + Expect(failedVals).To(HaveLen(2)) + Expect(failedVals).To(ContainElement(KeyWithError{Key: prefixC + baseValue3, TxnOperation: Add, Error: errors.New("failed to add value")})) + Expect(failedVals).To(ContainElement(KeyWithError{Key: prefixB + baseValue2, TxnOperation: Add, Error: errors.New("failed to add derived value")})) + + // check metadata + metadata, exists := nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(10)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeFalse()) + Expect(metadata).To(BeNil()) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(6)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).ToNot(BeNil()) + Expect(operation.Err.Error()).To(BeEquivalentTo("failed to add value")) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item2")) + Expect(operation.Err).ToNot(BeNil()) + Expect(operation.Err.Error()).To(BeEquivalentTo("failed to add derived value")) + operation = opHistory[4] // refresh failed value + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixB + baseValue2, + Value: test.NewArrayValue("item1", "item2"), + Metadata: &test.OnlyInteger{Integer: 0}, + Origin: FromNB, + }, + }) + operation = opHistory[5] // refresh failed value + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{}) + + // check last transaction + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(3)) + txn := txnHistory[2] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(2)) + Expect(txn.TxnType).To(BeEquivalentTo(SBNotification)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromSB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + // -> planned operations + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Update, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Update, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + + // -> executed operations + txnOps = RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + IsPending: true, + NewErr: errors.New("failed to add value"), + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromSB, + NewOrigin: FromSB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + NewErr: errors.New("failed to add derived value"), + }, + } + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(2)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(4)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(4)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(9)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(5)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(9)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(4)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor3Name)) + Expect(descriptorStats.PerValueCount[descriptor3Name]).To(BeEquivalentTo(2)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(9)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(6)) + Expect(originStats.PerValueCount).To(HaveKey(FromSB.String())) + Expect(originStats.PerValueCount[FromSB.String()]).To(BeEquivalentTo(3)) + graphR.Release() + + // item2 derived from baseValue2 should get fixed first + startTime = time.Now() + Eventually(errorChan, 5*time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixB + baseValue2 + "/item2")) + Expect(errorNotif.Error).To(BeNil()) + stopTime = time.Now() + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> item2 derived from base value 2 is now created + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(3)) + + // check failed values + failedVals = scheduler.GetFailedValues(nil) + Expect(failedVals).To(HaveLen(1)) + Expect(failedVals).To(ContainElement(KeyWithError{Key: prefixC + baseValue3, TxnOperation: Add, Error: errors.New("failed to add value")})) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(2)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check last transaction + txnHistory = scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(3)) + Expect(txn.TxnType).To(BeEquivalentTo(RetryFailedOps)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixB + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + txnOps = RecordedTxnOps{ + { + Operation: Modify, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRetry: true, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + PrevErr: errors.New("failed to add derived value"), + IsRetry: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // base-value3 should get fixed eventually as well + startTime = time.Now() + Eventually(errorChan, 5*time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixC + baseValue3)) + Expect(errorNotif.Error).To(BeNil()) + stopTime = time.Now() + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 3 is now created + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("base-value3-data"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(4)) + + // check failed values + failedVals = scheduler.GetFailedValues(nil) + Expect(failedVals).To(HaveLen(0)) + + // check operations executed in SB + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(1)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + + // check last transaction + txnHistory = scheduler.GetTransactionHistory(startTime, time.Time{}) + Expect(txnHistory).To(HaveLen(1)) + txn = txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(4)) + Expect(txn.TxnType).To(BeEquivalentTo(RetryFailedOps)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixC + baseValue3, Value: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + txnOps = RecordedTxnOps{ + { + Operation: Add, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("base-value3-data")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + PrevErr: errors.New("failed to add value"), + IsRetry: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check metadata + metadata, exists = nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(10)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} +*/ \ No newline at end of file diff --git a/plugins/kvscheduler/options.go b/plugins/kvscheduler/options.go new file mode 100644 index 0000000000..6256e722ab --- /dev/null +++ b/plugins/kvscheduler/options.go @@ -0,0 +1,55 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "github.com/ligato/cn-infra/config" + "github.com/ligato/cn-infra/logging" + "github.com/ligato/cn-infra/rpc/rest" +) + +// DefaultPlugin is a default instance of Plugin. +var DefaultPlugin = *NewPlugin() + +// NewPlugin creates a new Plugin with the provided Options. +func NewPlugin(opts ...Option) *Scheduler { + p := &Scheduler{} + + p.PluginName = "kvscheduler" + p.HTTPHandlers = &rest.DefaultPlugin + + for _, o := range opts { + o(p) + } + + if p.Deps.Log == nil { + p.Deps.Log = logging.ForPlugin(p.String()) + } + if p.Deps.Cfg == nil { + p.Deps.Cfg = config.ForPlugin(p.String()) + } + + return p +} + +// Option is a function that can be used in NewPlugin to customize Plugin. +type Option func(*Scheduler) + +// UseDeps returns Option that can inject custom dependencies. +func UseDeps(cb func(*Deps)) Option { + return func(p *Scheduler) { + cb(&p.Deps) + } +} diff --git a/plugins/kvscheduler/plugin_scheduler.go b/plugins/kvscheduler/plugin_scheduler.go new file mode 100644 index 0000000000..00523ca6af --- /dev/null +++ b/plugins/kvscheduler/plugin_scheduler.go @@ -0,0 +1,448 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/cn-infra/idxmap" + "github.com/ligato/cn-infra/idxmap/mem" + "github.com/ligato/cn-infra/infra" + "github.com/ligato/cn-infra/rpc/rest" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/registry" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +const ( + // DependencyRelation identifies dependency relation for the graph. + DependencyRelation = "depends-on" + + // DerivesRelation identifies relation of value derivation for the graph. + DerivesRelation = "derives" + + // how often the transaction history gets trimmed to remove records too old to keep + txnHistoryTrimmingPeriod = 1 * time.Minute + + // by default, a history of processed transaction is recorded + defaultRecordTransactionHistory = true + + // by default, only transaction processed in the last 24 hours are kept recorded + // (with the exception of permanently recorded init period) + defaultTransactionHistoryAgeLimit = 24 * 60 // in minutes + + // by default, transactions from the first hour of runtime stay permanently + // recorded + defaultPermanentlyRecordedInitPeriod = 60 // in minutes + + // name of the environment variable used to enable verification after every transaction + verifyModeEnv = "KVSCHED_VERIFY_MODE" + + // name of the environment variable used to trigger log messages showing + // graph traversal + logGraphWalkEnv = "KVSCHED_LOG_GRAPH_WALK" +) + +// Scheduler is a CN-infra plugin implementing KVScheduler. +// Detailed documentation can be found in the "api" and "docs" sub-folders. +type Scheduler struct { + Deps + + // configuration + config *Config + + // management of go routines + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // in-memory representation of all created+pending kv-pairs and their dependencies + graph graph.Graph + + // registry for descriptors + registry registry.Registry + + // a list of key prefixed covered by registered descriptors + keyPrefixes []string + + // TXN processing + txnLock sync.Mutex // can be used to pause transaction processing; always lock before the graph! + txnQueue chan *transaction + txnSeqNumber uint64 + resyncCount uint + + // value status + updatedStates utils.KeySet // base values with updated status + valStateWatchers []valStateWatcher + + // TXN history + historyLock sync.Mutex + txnHistory []*kvs.RecordedTxn // ordered from the oldest to the latest + startTime time.Time + + // debugging + verifyMode bool + logGraphWalk bool +} + +// Deps lists dependencies of the scheduler. +type Deps struct { + infra.PluginDeps + HTTPHandlers rest.HTTPHandlers +} + +// Config holds the KVScheduler configuration. +type Config struct { + RecordTransactionHistory bool `json:"record-transaction-history"` + TransactionHistoryAgeLimit uint32 `json:"transaction-history-age-limit"` // in minutes + PermanentlyRecordedInitPeriod uint32 `json:"permanently-recorded-init-period"` // in minutes +} + +// SchedulerTxn implements transaction for the KV scheduler. +type SchedulerTxn struct { + scheduler *Scheduler + values map[string]proto.Message +} + +// valStateWatcher represents one subscription for value state updates. +type valStateWatcher struct { + channel chan<- *kvs.BaseValueStatus + selector kvs.KeySelector +} + +// Init initializes the scheduler. Single go routine is started that will process +// all the transactions synchronously. +func (s *Scheduler) Init() error { + // default configuration + s.config = &Config{ + RecordTransactionHistory: defaultRecordTransactionHistory, + TransactionHistoryAgeLimit: defaultTransactionHistoryAgeLimit, + PermanentlyRecordedInitPeriod: defaultPermanentlyRecordedInitPeriod, + } + + // load configuration + err := s.loadConfig(s.config) + if err != nil { + s.Log.Error(err) + return err + } + s.Log.Infof("KVScheduler configuration: %+v", *s.config) + + // prepare context for all go routines + s.ctx, s.cancel = context.WithCancel(context.Background()) + // initialize graph for in-memory storage of key-value pairs + s.graph = graph.NewGraph(s.config.RecordTransactionHistory, s.config.TransactionHistoryAgeLimit, + s.config.PermanentlyRecordedInitPeriod) + // initialize registry for key->descriptor lookups + s.registry = registry.NewRegistry() + // prepare channel for serializing transactions + s.txnQueue = make(chan *transaction, 100) + // register REST API handlers + s.registerHandlers(s.HTTPHandlers) + // initialize key-set used to mark values with updated status + s.updatedStates = utils.NewSliceBasedKeySet() + // record startup time + s.startTime = time.Now() + + // enable or disable debugging mode + s.verifyMode = os.Getenv(verifyModeEnv) != "" + s.logGraphWalk = os.Getenv(logGraphWalkEnv) != "" + + // go routine processing serialized transactions + s.wg.Add(1) + go s.consumeTransactions() + + // go routine periodically removing transaction records too old to keep + if s.config.RecordTransactionHistory { + s.wg.Add(1) + go s.transactionHistoryTrimming() + } + return nil +} + +// loadConfig loads configuration file. +func (s *Scheduler) loadConfig(config *Config) error { + found, err := s.Cfg.LoadValue(config) + if err != nil { + return err + } else if !found { + s.Log.Debugf("%v config not found", s.PluginName) + return nil + } + s.Log.Debugf("%v config found: %+v", s.PluginName, config) + return err +} + +// Close stops all the go routines. +func (s *Scheduler) Close() error { + s.cancel() + s.wg.Wait() + return nil +} + +// RegisterKVDescriptor registers descriptor for a set of selected +// keys. It should be called in the Init phase of agent plugins. +// Every key-value pair must have at most one descriptor associated with it +// (none for derived values expressing properties). +func (s *Scheduler) RegisterKVDescriptor(descriptor *kvs.KVDescriptor) error { + // TODO: validate descriptor + + s.registry.RegisterDescriptor(descriptor) + if descriptor.NBKeyPrefix != "" { + s.keyPrefixes = append(s.keyPrefixes, descriptor.NBKeyPrefix) + } + + if descriptor.WithMetadata { + var metadataMap idxmap.NamedMappingRW + if descriptor.MetadataMapFactory != nil { + metadataMap = descriptor.MetadataMapFactory() + } else { + metadataMap = mem.NewNamedMapping(s.Log, descriptor.Name, nil) + } + graphW := s.graph.Write(false) + graphW.RegisterMetadataMap(descriptor.Name, metadataMap) + graphW.Save() + graphW.Release() + } + return nil +} + +// GetRegisteredNBKeyPrefixes returns a list of key prefixes from NB with values +// described by registered descriptors and therefore managed by the scheduler. +func (s *Scheduler) GetRegisteredNBKeyPrefixes() []string { + return s.keyPrefixes +} + +// StartNBTransaction starts a new transaction from NB to SB plane. +// The enqueued actions are scheduled for execution by Txn.Commit(). +func (s *Scheduler) StartNBTransaction() kvs.Txn { + txn := &SchedulerTxn{ + scheduler: s, + values: make(map[string]proto.Message), + } + return txn +} + +// TransactionBarrier ensures that all notifications received prior to the call +// are associated with transactions that have already finalized. +func (s *Scheduler) TransactionBarrier() { + s.txnLock.Lock() + s.txnLock.Unlock() +} + +// PushSBNotification notifies about a spontaneous value change in the SB +// plane (i.e. not triggered by NB transaction). +func (s *Scheduler) PushSBNotification(key string, value proto.Message, metadata kvs.Metadata) error { + txn := &transaction{ + txnType: kvs.SBNotification, + values: []kvForTxn{ + { + key: key, + value: value, + metadata: metadata, + origin: kvs.FromSB, + }, + }, + } + return s.enqueueTxn(txn) +} + +// GetMetadataMap returns (read-only) map associating value label with value +// metadata of a given descriptor. +// Returns nil if the descriptor does not expose metadata. +func (s *Scheduler) GetMetadataMap(descriptor string) idxmap.NamedMapping { + graphR := s.graph.Read() + defer graphR.Release() + + return graphR.GetMetadataMap(descriptor) +} + +// GetValueStatus returns the status of a non-derived value with the given +// key. +func (s *Scheduler) GetValueStatus(key string) *kvs.BaseValueStatus { + graphR := s.graph.Read() + defer graphR.Release() + return getValueStatus(graphR.GetNode(key), key) +} + +// WatchValueStatus allows to watch for changes in the status of non-derived +// values with keys selected by the selector (all if keySelector==nil). +func (s *Scheduler) WatchValueStatus(channel chan<- *kvs.BaseValueStatus, keySelector kvs.KeySelector) { + s.txnLock.Lock() + defer s.txnLock.Unlock() + s.valStateWatchers = append(s.valStateWatchers, valStateWatcher{ + channel: channel, + selector: keySelector, + }) +} + +// DumpValuesByDescriptor dumps values associated with the given +// descriptor as viewed from either NB (what was requested to be applied), +// SB (what is actually applied) or from the inside (what kvscheduler's +// cached view of SB is). +func (s *Scheduler) DumpValuesByDescriptor(descriptor string, view kvs.View) (values []kvs.KVWithMetadata, err error) { + if view == kvs.SBView { + // pause transaction processing + s.txnLock.Lock() + defer s.txnLock.Unlock() + } + + graphR := s.graph.Read() + defer graphR.Release() + + if view == kvs.NBView { + // return the intended state + var kvPairs []kvs.KVWithMetadata + nbNodes := graphR.GetNodes(nil, + graph.WithFlags(&DescriptorFlag{descriptor}), + graph.WithoutFlags(&DerivedFlag{}, &ValueStateFlag{kvs.ValueState_OBTAINED})) + + for _, node := range nbNodes { + lastUpdate := getNodeLastUpdate(node) + if lastUpdate == nil || lastUpdate.value == nil { + // filter found NB values and values requested to be deleted + continue + } + kvPairs = append(kvPairs, kvs.KVWithMetadata{ + Key: node.GetKey(), + Value: lastUpdate.value, + Origin: kvs.FromNB, + }) + } + return kvPairs, nil + } + + /* Cached/SB: */ + + // retrieve from the in-memory graph first (for Retrieve it is used for correlation) + inMemNodes := nodesToKVPairsWithMetadata( + graphR.GetNodes(nil, correlateValsSelectors(descriptor)...)) + + if view == kvs.CachedView { + // return the scheduler's view of SB for the given descriptor + return inMemNodes, nil + } + + // obtain Retrieve handler from the descriptor + kvDescriptor := s.registry.GetDescriptor(descriptor) + if kvDescriptor == nil { + err = errors.New("descriptor is not registered") + return + } + if kvDescriptor.Retrieve == nil { + err = errors.New("descriptor does not support Retrieve operation") + return + } + + // retrieve the state directly from SB via descriptor + values, err = kvDescriptor.Retrieve(inMemNodes) + return +} + +func (s *Scheduler) getDescriptorForKeyPrefix(keyPrefix string) string { + var descriptorName string + s.txnLock.Lock() + for _, descriptor := range s.registry.GetAllDescriptors() { + if descriptor.NBKeyPrefix == keyPrefix { + descriptorName = descriptor.Name + } + } + s.txnLock.Unlock() + return descriptorName +} + +// DumpValuesByKeyPrefix like DumpValuesByDescriptor returns a dump of values, +// but the descriptor is selected based on the key prefix. +func (s *Scheduler) DumpValuesByKeyPrefix(keyPrefix string, view kvs.View) (values []kvs.KVWithMetadata, err error) { + descriptorName := s.getDescriptorForKeyPrefix(keyPrefix) + if descriptorName == "" { + err = errors.New("unknown key prefix") + return + } + return s.DumpValuesByDescriptor(descriptorName, view) +} + +// SetValue changes (non-derived) value. +// If is nil, the value will get deleted. +func (txn *SchedulerTxn) SetValue(key string, value proto.Message) kvs.Txn { + txn.values[key] = value + return txn +} + +// Commit orders scheduler to execute enqueued operations. +// Operations with unmet dependencies will get postponed and possibly +// executed later. +func (txn *SchedulerTxn) Commit(ctx context.Context) (txnSeqNum uint64, err error) { + txnSeqNum = ^uint64(0) + + txnData := &transaction{ + txnType: kvs.NBTransaction, + nb: &nbTxn{}, + values: make([]kvForTxn, 0, len(txn.values)), + } + + // collect values + for key, value := range txn.values { + txnData.values = append(txnData.values, kvForTxn{ + key: key, + value: value, + origin: kvs.FromNB, + }) + } + + // parse transaction options + txnData.nb.isBlocking = !kvs.IsNonBlockingTxn(ctx) + txnData.nb.resyncType, txnData.nb.verboseRefresh = kvs.IsResync(ctx) + txnData.nb.retryArgs, txnData.nb.retryEnabled = kvs.IsWithRetry(ctx) + txnData.nb.revertOnFailure = kvs.IsWithRevert(ctx) + txnData.nb.description, _ = kvs.IsWithDescription(ctx) + + // validate transaction options + if txnData.nb.resyncType == kvs.DownstreamResync && len(txnData.values) > 0 { + return txnSeqNum, kvs.NewTransactionError(kvs.ErrCombinedDownstreamResyncWithChange, nil) + } + if txnData.nb.revertOnFailure && txnData.nb.resyncType != kvs.NotResync { + return txnSeqNum, kvs.NewTransactionError(kvs.ErrRevertNotSupportedWithResync, nil) + } + + // enqueue txn and for blocking Commit wait for the errors + if txnData.nb.isBlocking { + txnData.nb.resultChan = make(chan txnResult, 1) + } + err = txn.scheduler.enqueueTxn(txnData) + if err != nil { + return txnSeqNum, kvs.NewTransactionError(err, nil) + } + if txnData.nb.isBlocking { + select { + case <-txn.scheduler.ctx.Done(): + return txnSeqNum, kvs.NewTransactionError(kvs.ErrClosedScheduler, nil) + case <-ctx.Done(): + return txnSeqNum, kvs.NewTransactionError(kvs.ErrTxnWaitCanceled, nil) + case txnResult := <-txnData.nb.resultChan: + close(txnData.nb.resultChan) + return txnResult.txnSeqNum, txnResult.err + } + } + return txnSeqNum, nil +} diff --git a/plugins/kvscheduler/refresh.go b/plugins/kvscheduler/refresh.go new file mode 100644 index 0000000000..af25e0fcc1 --- /dev/null +++ b/plugins/kvscheduler/refresh.go @@ -0,0 +1,562 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "fmt" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/ligato/cn-infra/logging" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +const ( + nodeVisitBeginMark = "[BEGIN]" + nodeVisitEndMark = "[END]" +) + +// resyncData stores data to be used for resync after refresh. +type resyncData struct { + first bool // true if startup-resync + values []kvForTxn +} + +// refreshGraph updates all/some values in the graph to their *real* state +// using the Retrieve methods from descriptors. +func (s *Scheduler) refreshGraph(graphW graph.RWAccess, keys utils.KeySet, resyncData *resyncData, verbose bool) { + if s.logGraphWalk { + keysToRefresh := "" + if keys != nil && keys.Length() > 0 { + keysToRefresh = keys.String() + } + msg := fmt.Sprintf("refreshGrap (keys=%s)", keysToRefresh) + fmt.Printf("%s %s\n", nodeVisitBeginMark, msg) + defer fmt.Printf("%s %s\n", nodeVisitEndMark, msg) + } + refreshedKeys := utils.NewMapBasedKeySet() + + // iterate over all descriptors, in order given by retrieve dependencies + for _, descriptor := range s.registry.GetAllDescriptors() { + handler := &descriptorHandler{descriptor} + + // check if this descriptor's key space should be refreshed as well + var skip bool + if keys != nil { + skip = keys.Length() > 0 + for _, key := range keys.Iterate() { + if descriptor.KeySelector(key) { + skip = false + break + } + } + } + if skip { + // nothing to refresh in the key space of this descriptor + s.skipRefresh(graphW, descriptor.Name, nil, refreshedKeys) + continue + } + + // get available base values for this descriptor from memory before + // refresh + prevAvailNodes := graphW.GetNodes(nil, correlateValsSelectors(descriptor.Name)...) + + // get key-value pairs for correlation + var correlate []kvs.KVWithMetadata + if resyncData != nil && resyncData.first { + // for startup resync, use data received from NB + for _, kv := range resyncData.values { + if descriptor.KeySelector(kv.key) { + correlate = append(correlate, + kvs.KVWithMetadata{ + Key: kv.key, + Value: kv.value, + Origin: kv.origin, + }) + } + } + } else { + // for refresh of failed values or run-time resync, use in-memory + // kv-pairs for correlation + correlate = nodesToKVPairsWithMetadata(prevAvailNodes) + } + + // execute Retrieve operation + retrieved, ableToRetrieve, err := handler.retrieve(correlate) + + // mark un-retrievable as refreshed + if !ableToRetrieve || err != nil { + if err != nil { + s.Log.WithField("descriptor", descriptor.Name). + Error("failed to retrieve values, refresh for the descriptor will be skipped") + } + s.skipRefresh(graphW, descriptor.Name, nil, refreshedKeys) + continue + } else if verbose { + plural := "s" + if len(retrieved) == 1 { + plural = "" + } + + var list strings.Builder + for i, d := range retrieved { + num := fmt.Sprintf("%d.", i+1) + list.WriteString(fmt.Sprintf("\n - %3s %q -> %v [%s]", + num, d.Key, utils.ProtoToString(d.Value), d.Origin)) + if d.Metadata != nil { + list.WriteString(fmt.Sprintf(" Metadata: %+v", d.Metadata)) + } + } + + s.Log.Debugf("%s descriptor retrieved %d item%s: %v", + descriptor.Name, len(retrieved), plural, list.String()) + + } + + if keys != nil && keys.Length() > 0 { + // mark keys that should not be touched as refreshed + s.skipRefresh(graphW, descriptor.Name, keys, refreshedKeys) + } + + // process retrieved kv-pairs + for _, retrievedKV := range retrieved { + if keys != nil && keys.Length() > 0 { + // do no touch values that aren't meant to be refreshed + if toRefresh := keys.Has(retrievedKV.Key); !toRefresh { + continue + } + } + if !s.validRetrievedKV(retrievedKV, descriptor, refreshedKeys) { + continue + } + + // 1st attempt to determine value origin + if retrievedKV.Origin == kvs.UnknownOrigin { + // determine value origin based on the values for correlation + for _, kv := range correlate { + if kv.Key == retrievedKV.Key { + retrievedKV.Origin = kv.Origin + break + } + } + } + + // 2nd attempt to determine value origin + if retrievedKV.Origin == kvs.UnknownOrigin { + // determine value origin based on the last revision + timeline := graphW.GetNodeTimeline(retrievedKV.Key) + if len(timeline) > 0 { + lastRev := timeline[len(timeline)-1] + valueStateFlag := lastRev.Flags.GetFlag(ValueStateFlagName) + valueState := valueStateFlag.(*ValueStateFlag).valueState + retrievedKV.Origin = valueStateToOrigin(valueState) + } + } + + if retrievedKV.Origin == kvs.UnknownOrigin { + // will assume this is from SB + retrievedKV.Origin = kvs.FromSB + } + + // refresh node that represents this kv-pair + s.refreshValue(graphW, retrievedKV, handler, refreshedKeys, 2) + } + + // unset the metadata from base NB values that do not actually exists + for _, node := range prevAvailNodes { + if refreshed := refreshedKeys.Has(node.GetKey()); !refreshed { + if getNodeOrigin(node) == kvs.FromNB { + if s.logGraphWalk { + fmt.Printf(" -> unset metadata for key=%s\n", node.GetKey()) + } + missingNode := graphW.SetNode(node.GetKey()) + missingNode.SetMetadata(nil) + } + } + } + + // in-progress save to expose changes in the metadata for Retrieve-s + // of the following descriptors + graphW.Save() + } + + // update state of values that do not actually exist + for _, node := range graphW.GetNodes(nil) { + if refreshed := refreshedKeys.Has(node.GetKey()); refreshed { + continue + } + s.refreshUnavailNode(graphW, node, refreshedKeys, 2) + } + + if verbose { + fmt.Println(dumpGraph(graphW)) + } +} + +// refreshValue refreshes node that represents the given retrieved key-value pair. +func (s *Scheduler) refreshValue(graphW graph.RWAccess, retrievedKV kvs.KVWithMetadata, + handler *descriptorHandler, refreshed utils.KeySet, indent int) { + if s.logGraphWalk { + indentStr := strings.Repeat(" ", indent) + msg := fmt.Sprintf("refreshValue (key=%s)", retrievedKV.Key) + fmt.Printf("%s%s %s\n", indentStr, nodeVisitBeginMark, msg) + defer fmt.Printf("%s%s %s\n", indentStr, nodeVisitEndMark, msg) + } + + // refresh node that represents this kv-pair + node := graphW.SetNode(retrievedKV.Key) + node.SetLabel(handler.keyLabel(node.GetKey())) + node.SetValue(retrievedKV.Value) + if handler.descriptor.WithMetadata { + node.SetMetadataMap(handler.descriptor.Name) + node.SetMetadata(retrievedKV.Metadata) + } + s.refreshAvailNode(graphW, node, retrievedKV.Origin, false, node.GetKey(), refreshed, indent+2) + + // determine the set of unavailable derived values + obsolete := getDerivedKeys(node) + derives := handler.derivedValues(node.GetKey(), node.GetValue()) + for _, newDerived := range derives { + obsolete.Del(newDerived.Key) + } + + // keep obsolete derived values still in the relation + for _, key := range obsolete.Iterate() { + derives = append(derives, kvs.KeyValuePair{Key: key}) // value unused + } + + // refresh relations + dependencies := handler.dependencies(node.GetKey(), node.GetValue()) + node.SetTargets(constructTargets(dependencies, derives)) + + // refresh derived values + for _, kv := range derives { + isObsolete := obsolete.Has(kv.Key) + derNode := graphW.SetNode(kv.Key) + if !isObsolete { + derDescr := s.registry.GetDescriptorForKey(kv.Key) + derHandler := descriptorHandler{derDescr} + derNode.SetValue(kv.Value) + dependencies := derHandler.dependencies(derNode.GetKey(), derNode.GetValue()) + derNode.SetTargets(constructTargets(dependencies, nil)) + s.refreshAvailNode(graphW, derNode, retrievedKV.Origin, true, node.GetKey(), refreshed, indent+2) + } else { + s.refreshUnavailNode(graphW, derNode, refreshed, indent+2) + } + } +} + +// refreshAvailNode refreshes state of a node whose value was returned by Retrieve. +func (s *Scheduler) refreshAvailNode(graphW graph.RWAccess, node graph.NodeRW, + origin kvs.ValueOrigin, derived bool, baseKey string, refreshed utils.KeySet, indent int) { + if s.logGraphWalk { + indentStr := strings.Repeat(" ", indent) + var derivedMark string + if derived { + derivedMark = ", is-derived" + } + msg := fmt.Sprintf("refreshAvailNode (key=%s%s)", node.GetKey(), derivedMark) + fmt.Printf("%s%s %s\n", indentStr, nodeVisitBeginMark, msg) + defer fmt.Printf("%s%s %s\n", indentStr, nodeVisitEndMark, msg) + } + + // validate first + descriptor := s.registry.GetDescriptorForKey(node.GetKey()) // nil for properties + if derived && !s.validRetrievedDerivedKV(node, descriptor, refreshed) { + graphW.DeleteNode(node.GetKey()) + return + } + + // update availability + if !isNodeAvailable(node) { + s.updatedStates.Add(baseKey) + node.DelFlags(UnavailValueFlagName) + } + refreshed.Add(node.GetKey()) + + // refresh state + if getNodeState(node) == kvs.ValueState_NONEXISTENT { + // newly found node + if origin == kvs.FromSB { + s.refreshNodeState(node, kvs.ValueState_OBTAINED, indent) + } else { + s.refreshNodeState(node, kvs.ValueState_DISCOVERED, indent) + } + } + if getNodeState(node) == kvs.ValueState_PENDING { + // no longer pending apparently + s.refreshNodeState(node, kvs.ValueState_CONFIGURED, indent) + } + + // update descriptor flag + if descriptor != nil { + node.SetFlags(&DescriptorFlag{descriptor.Name}) + } else { + node.DelFlags(DescriptorFlagName) + } + + // updated flags for derived values + if !derived { + node.DelFlags(DerivedFlagName) + } else { + node.SetFlags(&DerivedFlag{baseKey}) + } +} + +// refreshUnavailNode refreshes state of a node whose value is found to be unavailable. +func (s *Scheduler) refreshUnavailNode(graphW graph.RWAccess, node graph.Node, refreshed utils.KeySet, indent int) { + if s.logGraphWalk { + indentStr := strings.Repeat(" ", indent) + msg := fmt.Sprintf("refreshUnavailNode (key=%s, isDerived=%t)", node.GetKey(), isNodeDerived(node)) + fmt.Printf("%s%s %s\n", indentStr, nodeVisitBeginMark, msg) + defer fmt.Printf("%s%s %s\n", indentStr, nodeVisitEndMark, msg) + } + + refreshed.Add(node.GetKey()) + if isNodeAvailable(node) { + s.updatedStates.Add(getNodeBaseKey(node)) + } + state := getNodeState(node) + if getNodeOrigin(node) == kvs.FromSB || state == kvs.ValueState_DISCOVERED { + // just remove from the graph + graphW.DeleteNode(node.GetKey()) + return + } + + // mark node as unavailable, but do not delete + nodeW := graphW.SetNode(node.GetKey()) + if isNodeAvailable(node) { + nodeW.SetFlags(&UnavailValueFlag{}) + } + + // update state + if state == kvs.ValueState_UNIMPLEMENTED { + // it is expected that unimplemented value is not retrieved + return + } + if state == kvs.ValueState_CONFIGURED { + if getNodeLastUpdate(node).value == nil { + s.refreshNodeState(nodeW, kvs.ValueState_REMOVED, indent) + } else { + s.refreshNodeState(nodeW, kvs.ValueState_MISSING, indent) + } + } +} + +func (s *Scheduler) refreshNodeState(node graph.NodeRW, newState kvs.ValueState, indent int) { + if getNodeState(node) != newState { + if s.logGraphWalk { + fmt.Printf("%s -> change value state from %v to %v\n", + strings.Repeat(" ", indent), getNodeState(node), newState) + } + node.SetFlags(&ValueStateFlag{valueState: newState}) + } +} + +// skipRefresh is used to mark nodes as refreshed without actual refreshing +// if they should not (or cannot) be refreshed. +func (s *Scheduler) skipRefresh(graphR graph.ReadAccess, descriptor string, except utils.KeySet, refreshed utils.KeySet) { + skipped := graphR.GetNodes(nil, + graph.WithFlags(&DescriptorFlag{descriptor}), + graph.WithoutFlags(&DerivedFlag{})) + for _, node := range skipped { + if except != nil { + if toRefresh := except.Has(node.GetKey()); toRefresh { + continue + } + } + refreshed.Add(node.GetKey()) + + // skip refresh for derived nodes + for _, derivedNode := range getDerivedNodes(node) { + refreshed.Add(derivedNode.GetKey()) + } + } +} + +func dumpGraph(g graph.RWAccess) string { + keys := g.GetKeys() + + var buf strings.Builder + graphInfo := fmt.Sprintf("%d nodes", len(keys)) + buf.WriteString("+======================================================================================================================+\n") + buf.WriteString(fmt.Sprintf("| GRAPH %105s |\n", graphInfo)) + buf.WriteString("+======================================================================================================================+\n") + writeLine := func(left, right string) { + n := 115 - len(left) + buf.WriteString(fmt.Sprintf("| %s %"+fmt.Sprint(n)+"s |\n", left, right)) + + } + writeLines := func(linesStr string, prefix string) { + lines := strings.Split(linesStr, "\n") + for _, line := range lines { + if line == "" { + continue + } + writeLine(fmt.Sprintf("%s%s", prefix, line), "") + } + } + for i, key := range keys { + node := g.GetNode(key) + keyLabel := key + if label := node.GetLabel(); label != key && label != "" { + keyLabel = fmt.Sprintf("%s (%s)", key, label) + } + descriptor := "" + if f := node.GetFlag(DescriptorFlagName); f != nil { + descriptor = fmt.Sprintf("[%s] ", f.GetValue()) + } + lastUpdate := "-" + if f := node.GetFlag(LastUpdateFlagName); f != nil { + lastUpdate = f.GetValue() + } + unavailable := "" + if f := node.GetFlag(UnavailValueFlagName); f != nil { + unavailable = " " + } + writeLine(fmt.Sprintf("%s%s", descriptor, keyLabel), fmt.Sprintf("%s %s %s", + unavailable, + lastUpdate, + getNodeState(node).String(), + )) + writeLines(proto.MarshalTextString(node.GetValue()), " ") + + if f := node.GetTargets(DependencyRelation); f != nil && len(f) > 0 { + writeLine("Depends on:", "") + for _, dep := range f { + var nodeDeps []string + for _, node := range dep.Nodes { + nodeDeps = append(nodeDeps, node.GetKey()) + } + if len(nodeDeps) > 1 { + writeLine(fmt.Sprintf(" - %s", dep.Label), "") + writeLines(strings.Join(nodeDeps, "\n"), " -> ") + } else if len(nodeDeps) == 1 { + writeLine(fmt.Sprintf(" - %s -> %v", dep.Label, strings.Join(nodeDeps, " ")), "") + } else { + writeLine(fmt.Sprintf(" - %s -> ", dep.Label), "") + } + } + } + if f := node.GetTargets(DerivesRelation); f != nil && len(f) > 0 { + writeLine("Derives:", "") + var nodeDers []string + for _, der := range f { + if len(der.Nodes) == 0 { + nodeDers = append(nodeDers, fmt.Sprintf("%s", der.Label)) + } else { + for _, node := range der.Nodes { + desc := "" + if d := node.GetFlag(DescriptorFlagName); d != nil { + desc = fmt.Sprintf("[%s] ", d.GetValue()) + } + nodeDers = append(nodeDers, fmt.Sprintf("%s%s", desc, node.GetKey())) + } + } + } + writeLines(strings.Join(nodeDers, "\n"), " - ") + } + if f := node.GetSources(DependencyRelation); len(f) > 0 { + writeLine("Dependency for:", "") + var nodeDeps []string + for _, node := range f { + desc := "" + if d := node.GetFlag(DescriptorFlagName); d != nil { + desc = fmt.Sprintf("[%s] ", d.GetValue()) + } + nodeDeps = append(nodeDeps, fmt.Sprintf("%s%s", desc, node.GetKey())) + } + writeLines(strings.Join(nodeDeps, "\n"), " - ") + } + if f := node.GetSources(DerivesRelation); len(f) > 0 { + var nodeDers []string + for _, der := range f { + nodeDers = append(nodeDers, der.GetKey()) + } + writeLine(fmt.Sprintf("Derived from: %s", strings.Join(nodeDers, " ")), "") + } + if f := node.GetMetadata(); f != nil { + writeLine(fmt.Sprintf("Metadata: %+v", f), "") + } + if f := node.GetFlag(ErrorFlagName); f != nil { + writeLine(fmt.Sprintf("Errors: %+v", f.GetValue()), "") + } + + if i+1 != len(keys) { + buf.WriteString("+----------------------------------------------------------------------------------------------------------------------+\n") + } + } + buf.WriteString("+======================================================================================================================+\n") + + return buf.String() +} + +// validRetrievedKV verifies validity of a retrieved KV-pair. +func (s *Scheduler) validRetrievedKV(kv kvs.KVWithMetadata, descriptor *kvs.KVDescriptor, refreshed utils.KeySet) bool { + if kv.Key == "" { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptor.Name, + }).Warn("Descriptor retrieved value with empty key") + return false + } + if alreadyRetrieved := refreshed.Has(kv.Key); alreadyRetrieved { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptor.Name, + "key": kv.Key, + }).Warn("The same value was retrieved more than once") + return false + } + if kv.Value == nil { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptor.Name, + "key": kv.Key, + }).Warn("Descriptor retrieved nil value") + return false + } + if !descriptor.KeySelector(kv.Key) { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptor.Name, + "key": kv.Key, + "value": kv.Value, + }).Warn("Descriptor retrieved value outside of its key space") + return false + } + return true +} + +// validRetrievedDerivedKV verifies validity of a KV-pair derived from a retrieved value. +func (s *Scheduler) validRetrievedDerivedKV(node graph.Node, descriptor *kvs.KVDescriptor, refreshed utils.KeySet) bool { + descriptorName := "" + if descriptor != nil { + descriptorName = descriptor.Name + } + if node.GetValue() == nil { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptorName, + "key": node.GetKey(), + }).Warn("Derived nil value") + return false + } + if alreadyRetrieved := refreshed.Has(node.GetKey()); alreadyRetrieved { + s.Log.WithFields(logging.Fields{ + "descriptor": descriptorName, + "key": node.GetKey(), + }).Warn("The same value was retrieved more than once") + // return true -> let's overwrite invalidly retrieved derived value + } + return true +} diff --git a/plugins/kvscheduler/rest.go b/plugins/kvscheduler/rest.go new file mode 100644 index 0000000000..7359d26488 --- /dev/null +++ b/plugins/kvscheduler/rest.go @@ -0,0 +1,505 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "context" + "errors" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/unrolled/render" + + "github.com/ligato/cn-infra/rpc/rest" + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +const ( + // prefix used for REST urls of the KVScheduler. + urlPrefix = "/scheduler/" + + // txnHistoryURL is URL used to obtain the transaction history. + txnHistoryURL = urlPrefix + "txn-history" + + // sinceArg is the name of the argument used to define the start of the time + // window for the transaction history to display. + sinceArg = "since" + + // untilArg is the name of the argument used to define the end of the time + // window for the transaction history to display. + untilArg = "until" + + // seqNumArg is the name of the argument used to define the sequence number + // of the transaction to display (txnHistoryURL). + seqNumArg = "seq-num" + + // formatArg is the name of the argument used to set the output format + // for the transaction history API. + formatArg = "format" + + // recognized formats: + formatJSON = "json" + formatText = "text" + + // keyTimelineURL is URL used to obtain timeline of value changes for a given key. + keyTimelineURL = urlPrefix + "key-timeline" + + // keyArg is the name of the argument used to define key for "key-timeline" API. + keyArg = "key" + + // graphSnapshotURL is URL used to obtain graph snapshot from a given point in time. + graphSnapshotURL = urlPrefix + "graph-snapshot" + + // flagStatsURL is URL used to obtain flag statistics. + flagStatsURL = urlPrefix + "flag-stats" + + // flagArg is the name of the argument used to define flag for "flag-stats" API. + flagArg = "flag" + + // prefixArg is the name of the argument used to define prefix to filter keys + // for "flag-stats" API. + prefixArg = "prefix" + + // time is the name of the argument used to define point in time for a graph snapshot + // to retrieve. Value = number of nanoseconds since the start of the epoch. + timeArg = "time" + + // downstreamResyncURL is URL used to trigger downstream-resync. + downstreamResyncURL = urlPrefix + "downstream-resync" + + // retryArg is the name of the argument used for "downstream-resync" API to tell whether + // to retry failed operations or not. + retryArg = "retry" + + // verboseArg is the name of the argument used for "downstream-resync" API + // to tell whether the refreshed graph should be printed to stdout or not. + verboseArg = "verbose" + + // dumpURL is URL used to dump either SB or scheduler's internal state of kv-pairs + // under the given descriptor / key-prefix. + dumpURL = urlPrefix + "dump" + + // descriptorArg is the name of the argument used to define descriptor for "dump" API. + descriptorArg = "descriptor" + + // keyPrefixArg is the name of the argument used to define key prefix for "dump" API. + keyPrefixArg = "key-prefix" + + // viewArg is the name of the argument used for "dump" API to chooses from + // which point of view to look at the key-value space when dumping values. + // See type View from kvscheduler's API to learn the set of possible values. + viewArg = "view" + + // statusURL is URL used to print the state of values under the given + // descriptor / key-prefix or all of them. + statusURL = urlPrefix + "status" +) + +// errorString wraps string representation of an error that, unlike the original +// error, can be marshalled. +type errorString struct { + Error string +} + +// dumpIndex defines "index" page for the Dump REST API. +type dumpIndex struct { + Descriptors []string + KeyPrefixes []string + Views []string +} + +// kvsWithMetaForREST converts a list of key-value pairs with metadata +// into an equivalent list with proto.Message recorded for proper marshalling. +func kvsWithMetaForREST(in []kvs.KVWithMetadata) (out []kvs.KVWithMetadata) { + for _, kv := range in { + out = append(out, kvs.KVWithMetadata{ + Key: kv.Key, + Value: utils.RecordProtoMessage(kv.Value), + Metadata: kv.Metadata, + Origin: kv.Origin, + }) + } + return out +} + +// registerHandlers registers all supported REST APIs. +func (s *Scheduler) registerHandlers(http rest.HTTPHandlers) { + if http == nil { + s.Log.Warn("No http handler provided, skipping registration of KVScheduler REST handlers") + return + } + http.RegisterHTTPHandler(txnHistoryURL, s.txnHistoryGetHandler, "GET") + http.RegisterHTTPHandler(keyTimelineURL, s.keyTimelineGetHandler, "GET") + http.RegisterHTTPHandler(graphSnapshotURL, s.graphSnapshotGetHandler, "GET") + http.RegisterHTTPHandler(flagStatsURL, s.flagStatsGetHandler, "GET") + http.RegisterHTTPHandler(downstreamResyncURL, s.downstreamResyncPostHandler, "POST") + http.RegisterHTTPHandler(dumpURL, s.dumpGetHandler, "GET") + http.RegisterHTTPHandler(statusURL, s.statusGetHandler, "GET") + http.RegisterHTTPHandler(urlPrefix+"graph", s.dotGraphHandler, "GET") +} + +// txnHistoryGetHandler is the GET handler for "txn-history" API. +func (s *Scheduler) txnHistoryGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + var since, until time.Time + var seqNum uint64 + args := req.URL.Query() + + // parse optional *format* argument (default = JSON) + format := formatJSON + if formatStr, withFormat := args[formatArg]; withFormat && len(formatStr) == 1 { + format = formatStr[0] + if format != formatJSON && format != formatText { + err := errors.New("unrecognized output format") + formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()}) + return + } + } + + // parse optional *seq-num* argument + if seqNumStr, withSeqNum := args[seqNumArg]; withSeqNum && len(seqNumStr) == 1 { + var err error + seqNum, err = strconv.ParseUint(seqNumStr[0], 10, 64) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + + // sequence number takes precedence over the since-until time window + txn := s.GetRecordedTransaction(seqNum) + if txn == nil { + err := errors.New("transaction with such sequence number is not recorded") + s.logError(formatter.JSON(w, http.StatusNotFound, errorString{err.Error()})) + return + } + + if format == formatJSON { + s.logError(formatter.JSON(w, http.StatusOK, txn)) + } else { + s.logError(formatter.Text(w, http.StatusOK, txn.StringWithOpts(false, true, 0))) + } + return + } + + // parse optional *until* argument + if untilStr, withUntil := args[untilArg]; withUntil && len(untilStr) == 1 { + var err error + until, err = stringToTime(untilStr[0]) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + } + + // parse optional *since* argument + if sinceStr, withSince := args[sinceArg]; withSince && len(sinceStr) == 1 { + var err error + since, err = stringToTime(sinceStr[0]) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + } + + txnHistory := s.GetTransactionHistory(since, until) + if format == formatJSON { + s.logError(formatter.JSON(w, http.StatusOK, txnHistory)) + } else { + s.logError(formatter.Text(w, http.StatusOK, txnHistory.StringWithOpts(false, false, 0))) + } + } +} + +// keyTimelineGetHandler is the GET handler for "key-timeline" API. +func (s *Scheduler) keyTimelineGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + + // parse optional *time* argument + var timeVal time.Time + if timeStr, withTime := args[timeArg]; withTime && len(timeStr) == 1 { + var err error + timeVal, err = stringToTime(timeStr[0]) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + } + + // parse mandatory *key* argument + if keys, withKey := args[keyArg]; withKey && len(keys) == 1 { + graphR := s.graph.Read() + defer graphR.Release() + + timeline := graphR.GetNodeTimeline(keys[0]) + if !timeVal.IsZero() { + var nodeRecord *graph.RecordedNode + for _, record := range timeline { + if record.Since.Before(timeVal) && + (record.Until.IsZero() || record.Until.After(timeVal)) { + nodeRecord = record + break + } + } + s.logError(formatter.JSON(w, http.StatusOK, nodeRecord)) + return + } + s.logError(formatter.JSON(w, http.StatusOK, timeline)) + return + } + + err := errors.New("missing key argument") + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + } +} + +// graphSnapshotGetHandler is the GET handler for "graph-snapshot" API. +func (s *Scheduler) graphSnapshotGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + timeVal := time.Now() + args := req.URL.Query() + + // parse optional *time* argument + if timeStr, withTime := args[timeArg]; withTime && len(timeStr) == 1 { + var err error + timeVal, err = stringToTime(timeStr[0]) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + } + + graphR := s.graph.Read() + defer graphR.Release() + + snapshot := graphR.GetSnapshot(timeVal) + s.logError(formatter.JSON(w, http.StatusOK, snapshot)) + } +} + +// flagStatsGetHandler is the GET handler for "flag-stats" API. +func (s *Scheduler) flagStatsGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + + // parse repeated *prefix* argument + prefixes := args[prefixArg] + + if flags, withFlag := args[flagArg]; withFlag && len(flags) == 1 { + graphR := s.graph.Read() + defer graphR.Release() + + stats := graphR.GetFlagStats(flags[0], func(key string) bool { + if len(prefixes) == 0 { + return true + } + for _, prefix := range prefixes { + if strings.HasPrefix(key, prefix) { + return true + } + } + return false + }) + s.logError(formatter.JSON(w, http.StatusOK, stats)) + return + } + + err := errors.New("missing flag argument") + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + } +} + +// downstreamResyncPostHandler is the POST handler for "downstream-resync" API. +func (s *Scheduler) downstreamResyncPostHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // parse optional *retry* argument + args := req.URL.Query() + retry := false + if retryStr, withRetry := args[retryArg]; withRetry && len(retryStr) == 1 { + retryVal := retryStr[0] + if retryVal == "true" || retryVal == "1" { + retry = true + } + } + + // parse optional *verbose* argument + verbose := false + if verboseStr, withVerbose := args[verboseArg]; withVerbose && len(verboseStr) == 1 { + verboseVal := verboseStr[0] + if verboseVal == "true" || verboseVal == "1" { + verbose = true + } + } + + ctx := context.Background() + ctx = kvs.WithResync(ctx, kvs.DownstreamResync, verbose) + if retry { + ctx = kvs.WithRetryDefault(ctx) + } + _, err := s.StartNBTransaction().Commit(ctx) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + s.logError(formatter.Text(w, http.StatusOK, "SB was successfully synchronized with KVScheduler\n")) + } +} + +func parseDumpAndStatusCommonArgs(args url.Values) (descriptor, keyPrefix string, err error) { + // parse optional *descriptor* argument + descriptors, withDescriptor := args[descriptorArg] + if withDescriptor && len(descriptors) != 1 { + err = errors.New("descriptor argument listed more than once") + return + } + if withDescriptor { + descriptor = descriptors[0] + } + + // parse optional *key-prefix* argument + keyPrefixes, withKeyPrefix := args[keyPrefixArg] + if withKeyPrefix && len(keyPrefixes) != 1 { + err = errors.New("key-prefix argument listed more than once") + return + } + if withKeyPrefix { + keyPrefix = keyPrefixes[0] + } + return +} + +// dumpGetHandler is the GET handler for "dump" API. +func (s *Scheduler) dumpGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + + descriptor, keyPrefix, err := parseDumpAndStatusCommonArgs(args) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + + // without descriptor and key prefix return "index" page + if descriptor == "" && keyPrefix == "" { + s.txnLock.Lock() + defer s.txnLock.Unlock() + index := dumpIndex{Views: []string{ + kvs.SBView.String(), kvs.NBView.String(), kvs.CachedView.String()}} + for _, descriptor := range s.registry.GetAllDescriptors() { + index.Descriptors = append(index.Descriptors, descriptor.Name) + index.KeyPrefixes = append(index.KeyPrefixes, descriptor.NBKeyPrefix) + } + s.logError(formatter.JSON(w, http.StatusOK, index)) + return + } + + // parse optional *view* argument (default = SBView) + var view kvs.View + if viewStr, withState := args[viewArg]; withState && len(viewStr) == 1 { + switch viewStr[0] { + case kvs.SBView.String(): + view = kvs.SBView + case kvs.NBView.String(): + view = kvs.NBView + case kvs.CachedView.String(): + view = kvs.CachedView + default: + err := errors.New("unrecognized system view") + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + } + + var dump []kvs.KVWithMetadata + if descriptor != "" { + dump, err = s.DumpValuesByDescriptor(descriptor, view) + } else { + dump, err = s.DumpValuesByKeyPrefix(keyPrefix, view) + } + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + s.logError(formatter.JSON(w, http.StatusOK, kvsWithMetaForREST(dump))) + } +} + +// statusGetHandler is the GET handler for "status" API. +func (s *Scheduler) statusGetHandler(formatter *render.Render) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + + descriptor, keyPrefix, err := parseDumpAndStatusCommonArgs(args) + if err != nil { + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + + graphR := s.graph.Read() + defer graphR.Release() + + if descriptor == "" && keyPrefix != "" { + descriptor = s.getDescriptorForKeyPrefix(keyPrefix) + if descriptor == "" { + err = errors.New("unknown key prefix") + } + s.logError(formatter.JSON(w, http.StatusInternalServerError, errorString{err.Error()})) + return + } + + var nodes []graph.Node + if descriptor == "" { + // get all nodes with base values + nodes = graphR.GetNodes(nil, graph.WithoutFlags(&DerivedFlag{})) + } else { + // get nodes with base values under the given descriptor + nodes = graphR.GetNodes(nil, + graph.WithFlags(&DescriptorFlag{descriptor}), + graph.WithoutFlags(&DerivedFlag{})) + } + + var status []*kvs.BaseValueStatus + for _, node := range nodes { + status = append(status, getValueStatus(node, node.GetKey())) + } + // sort by keys + sort.Slice(status, func(i, j int) bool { + return status[i].Value.Key < status[j].Value.Key + }) + s.logError(formatter.JSON(w, http.StatusOK, status)) + } +} + +// logError logs non-nil errors from JSON formatter +func (s *Scheduler) logError(err error) { + if err != nil { + s.Log.Error(err) + } +} + +// stringToTime converts Unix timestamp from string to time.Time. +func stringToTime(s string) (time.Time, error) { + nsec, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(0, nsec), nil +} diff --git a/plugins/kvscheduler/resync_test.go b/plugins/kvscheduler/resync_test.go new file mode 100644 index 0000000000..894ba27147 --- /dev/null +++ b/plugins/kvscheduler/resync_test.go @@ -0,0 +1,1763 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +/* TODO: fix and re-enable UTs +import ( + "context" + "errors" + "testing" + "time" + + . "github.com/onsi/gomega" + + "github.com/gogo/protobuf/proto" + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +func TestEmptyResync(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + WithMetadata: true, + }, mockSB, 0) + + // register descriptor with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + nbPrefixes := scheduler.GetRegisteredNBKeyPrefixes() + Expect(nbPrefixes).To(HaveLen(1)) + Expect(nbPrefixes).To(ContainElement(prefixA)) + + // get metadata map created for the descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + _, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // transaction history should be initially empty + Expect(scheduler.GetTransactionHistory(time.Time{}, time.Time{})).To(BeEmpty()) + + // run transaction with empty resync + startTime := time.Now() + ctx := WithResync(context.Background(), FullResync, true) + description := "testing empty resync" + ctx = WithDescription(ctx, description) + seqNum, err := scheduler.StartNBTransaction().Commit(ctx) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(BeEmpty()) + + // check metadata + Expect(metadataMap.ListAllNames()).To(BeEmpty()) + + // check executed operations + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(1)) + Expect(opHistory[0].OpType).To(Equal(test.MockDump)) + Expect(opHistory[0].CorrelateDump).To(BeEmpty()) + Expect(opHistory[0].Descriptor).To(BeEquivalentTo(descriptor1Name)) + + // single transaction consisted of zero operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Time{}) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(Equal(description)) + Expect(txn.Values).To(BeEmpty()) + Expect(txn.PreErrors).To(BeEmpty()) + Expect(txn.Planned).To(BeEmpty()) + Expect(txn.Executed).To(BeEmpty()) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(0)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(0)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(0)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(0)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(0)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(0)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestResyncWithEmptySB(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixA+baseValue2 { + depKey := prefixA + baseValue1 + "/item1" // base value depends on a derived value + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixA+baseValue1+"/item2" { + depKey := prefixA + baseValue2 + "/item1" // derived value depends on another derived value + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + }, mockSB, 0) + + // register descriptor with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + + // get metadata map created for the descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction with empty SB + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue2, test.NewLazyArrayValue("item1")) + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item1", "item2")) + ctx := WithResync(context.Background(), FullResync, true) + description := "testing resync against empty SB" + ctx = WithDescription(ctx, description) + seqNum, err := schedulerTxn.Commit(ctx) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(1)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixA + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(5)) + + // check scheduler API + prefixAValues := scheduler.GetValues(prefixSelector(prefixA)) + checkValues(prefixAValues, []KeyValuePair{ + {Key: prefixA + baseValue1, Value: test.NewArrayValue("item1", "item2")}, + {Key: prefixA + baseValue1 + "/item1", Value: test.NewStringValue("item1")}, + {Key: prefixA + baseValue1 + "/item2", Value: test.NewStringValue("item2")}, + {Key: prefixA + baseValue2, Value: test.NewArrayValue("item1")}, + {Key: prefixA + baseValue2 + "/item1", Value: test.NewStringValue("item1")}, + }) + Expect(proto.Equal(scheduler.GetValue(prefixA+baseValue1), test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(proto.Equal(scheduler.GetValue(prefixA+baseValue1+"/item1"), test.NewStringValue("item1"))).To(BeTrue()) + Expect(scheduler.GetFailedValues(nil)).To(BeEmpty()) + Expect(scheduler.GetPendingValues(nil)).To(BeEmpty()) + + // check metadata + metadata, exists := nameToInteger.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(1)) + + // check executed operations + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(6)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + { + Key: prefixA + baseValue2, + Value: test.NewArrayValue("item1"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // single transaction consisted of 6 operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(Equal(description)) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue1, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue2, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // now remove everything using resync with empty data + startTime = time.Now() + seqNum, err = scheduler.StartNBTransaction().Commit(WithResync(context.Background(), FullResync, true)) + stopTime = time.Now() + Expect(seqNum).To(BeEquivalentTo(1)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + Expect(mockSB.GetValues(nil)).To(BeEmpty()) + + // check metadata + Expect(metadataMap.ListAllNames()).To(BeEmpty()) + + // check executed operations + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(6)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item1", "item2"), + Metadata: &test.OnlyInteger{Integer: 0}, + Origin: FromNB, + }, + { + Key: prefixA + baseValue2, + Value: test.NewArrayValue("item1"), + Metadata: &test.OnlyInteger{Integer: 1}, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + + // this second transaction consisted of 6 operations + txnHistory = scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(2)) + txn = txnHistory[1] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(nil), Origin: FromNB}, + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(nil), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(0)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(3)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(5)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(2)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(5)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(5)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(5)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(5)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestResyncWithNonEmptySB(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> initial content: + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + mockSB.SetValue(prefixA+baseValue1+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + mockSB.SetValue(prefixA+baseValue2, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 1}, FromNB, false) + mockSB.SetValue(prefixA+baseValue2+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + mockSB.SetValue(prefixA+baseValue3, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 2}, FromNB, false) + mockSB.SetValue(prefixA+baseValue3+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixA+baseValue2+"/item1" { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixA+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item1" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + ModifyWithRecreate: func(key string, oldValue, newValue proto.Message, metadata Metadata) bool { + return key == prefixA+baseValue3 + }, + WithMetadata: true, + }, mockSB, 3) + + // register descriptor with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + + // get metadata map created for the descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction with SB that already has some values added + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue2, test.NewLazyArrayValue("item1", "item2")) + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item2")) + schedulerTxn.SetValue(prefixA+baseValue3, test.NewLazyArrayValue("item1", "item2")) + seqNum, err := schedulerTxn.Commit(WithResync(context.Background(), FullResync, true)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 was removed + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).To(BeNil()) + // -> item2 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(1)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixA + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is pending + value = mockSB.GetValue(prefixA + baseValue2 + "/item2") + Expect(value).To(BeNil()) + // -> base value 3 + value = mockSB.GetValue(prefixA + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(3)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 3 + value = mockSB.GetValue(prefixA + baseValue3 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 3 + value = mockSB.GetValue(prefixA + baseValue3 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(7)) + + // check metadata + metadata, exists := nameToInteger.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(1)) + metadata, exists = nameToInteger.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(3)) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(11)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item2"), + Metadata: nil, + Origin: FromNB, + }, + { + Key: prefixA + baseValue2, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + { + Key: prefixA + baseValue3, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[6] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[7] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[8] + Expect(operation.OpType).To(Equal(test.MockUpdate)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[9] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[10] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Time{}) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item2")), Origin: FromNB}, + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + {Key: prefixA + baseValue3, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Delete, + Key: prefixA + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixA + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue3 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Update, + Key: prefixA + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(5)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(8)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(3)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(8)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(8)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(8)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(8)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestResyncNotRemovingSBValues(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> initial content: + mockSB.SetValue(prefixA+baseValue1, test.NewStringValue(baseValue1), + nil, FromSB, false) + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + KeySelector: prefixSelector(prefixA), + NBKeyPrefix: prefixA, + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixA+baseValue2 { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + }, mockSB, 0) + + // register descriptor with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + + // get metadata map created for the descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction that should keep values not managed by NB untouched + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixA+baseValue2, test.NewLazyArrayValue("item1")) + seqNum, err := schedulerTxn.Commit(WithResync(context.Background(), FullResync, true)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue(baseValue1))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromSB)) + // -> base value 2 + value = mockSB.GetValue(prefixA + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixA + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(3)) + + // check metadata + metadata, exists := nameToInteger.LookupByName(baseValue1) + Expect(exists).To(BeFalse()) + Expect(metadata).To(BeNil()) + metadata, exists = nameToInteger.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(3)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue2, + Value: test.NewArrayValue("item1"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(startTime, time.Now()) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewStringValue(baseValue1)), Origin: FromSB}, + {Key: prefixA + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Add, + Key: prefixA + baseValue2, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue2 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(0)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(1)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(3)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(2)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(3)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(3)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(2)) + Expect(originStats.PerValueCount).To(HaveKey(FromSB.String())) + Expect(originStats.PerValueCount[FromSB.String()]).To(BeEquivalentTo(1)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestResyncWithMultipleDescriptors(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> initial content: + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + mockSB.SetValue(prefixA+baseValue1+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + mockSB.SetValue(prefixB+baseValue2, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + mockSB.SetValue(prefixB+baseValue2+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + mockSB.SetValue(prefixC+baseValue3, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + mockSB.SetValue(prefixC+baseValue3+"/item1", test.NewStringValue("item1"), + nil, FromNB, true) + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 1) + // -> descriptor2: + descriptor2 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor2Name, + NBKeyPrefix: prefixB, + KeySelector: prefixSelector(prefixB), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + Dependencies: func(key string, value proto.Message) []Dependency { + if key == prefixB+baseValue2+"/item1" { + depKey := prefixA + baseValue1 + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + if key == prefixB+baseValue2+"/item2" { + depKey := prefixA + baseValue1 + "/item1" + return []Dependency{ + {Label: depKey, Key: depKey}, + } + } + return nil + }, + WithMetadata: true, + DumpDependencies: []string{descriptor1Name}, + }, mockSB, 1) + // -> descriptor3: + descriptor3 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor3Name, + NBKeyPrefix: prefixC, + KeySelector: prefixSelector(prefixC), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + ModifyWithRecreate: func(key string, oldValue, newValue proto.Message, metadata Metadata) bool { + return key == prefixC+baseValue3 + }, + WithMetadata: true, + DumpDependencies: []string{descriptor2Name}, + }, mockSB, 1) + + // register all 3 descriptors with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + scheduler.RegisterKVDescriptor(descriptor2) + scheduler.RegisterKVDescriptor(descriptor3) + nbPrefixes := scheduler.GetRegisteredNBKeyPrefixes() + Expect(nbPrefixes).To(HaveLen(3)) + Expect(nbPrefixes).To(ContainElement(prefixA)) + Expect(nbPrefixes).To(ContainElement(prefixB)) + Expect(nbPrefixes).To(ContainElement(prefixC)) + + // get metadata map created for each descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger1, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor2.Name) + nameToInteger2, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + metadataMap = scheduler.GetMetadataMap(descriptor3.Name) + nameToInteger3, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction with SB that already has some values added + startTime := time.Now() + schedulerTxn := scheduler.StartNBTransaction() + schedulerTxn.SetValue(prefixB+baseValue2, test.NewLazyArrayValue("item1", "item2")) + schedulerTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item2")) + schedulerTxn.SetValue(prefixC+baseValue3, test.NewLazyArrayValue("item1", "item2")) + seqNum, err := schedulerTxn.Commit(WithResync(context.Background(), FullResync, true)) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ShouldNot(HaveOccurred()) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 was removed + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).To(BeNil()) + // -> item2 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> base value 2 + value = mockSB.GetValue(prefixB + baseValue2) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 2 + value = mockSB.GetValue(prefixB + baseValue2 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 2 is pending + value = mockSB.GetValue(prefixB + baseValue2 + "/item2") + Expect(value).To(BeNil()) + // -> base value 3 + value = mockSB.GetValue(prefixC + baseValue3) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(1)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 3 + value = mockSB.GetValue(prefixC + baseValue3 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(7)) + + // check metadata + metadata, exists := nameToInteger1.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger2.LookupByName(baseValue2) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + metadata, exists = nameToInteger3.LookupByName(baseValue3) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(1)) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(13)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixB + baseValue2, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixC + baseValue3, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[4] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[5] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[6] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[7] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor3Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixC + baseValue3 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[8] + Expect(operation.OpType).To(Equal(test.MockDelete)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[9] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[10] + Expect(operation.OpType).To(Equal(test.MockUpdate)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[11] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[12] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor2Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixB + baseValue2)) + Expect(operation.Err).To(BeNil()) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Time{}) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item2")), Origin: FromNB}, + {Key: prefixB + baseValue2, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + {Key: prefixC + baseValue3, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Delete, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixC + baseValue3, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3, + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + WasPending: true, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixC + baseValue3 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Delete, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Update, + Key: prefixB + baseValue2 + "/item1", + Derived: true, + PrevValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Modify, + Key: prefixB + baseValue2, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixB + baseValue2 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsPending: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(0)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(5)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(8)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(3)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(8)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(2)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor2Name)) + Expect(descriptorStats.PerValueCount[descriptor2Name]).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor3Name)) + Expect(descriptorStats.PerValueCount[descriptor3Name]).To(BeEquivalentTo(3)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(8)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(8)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} + +func TestResyncWithRetry(t *testing.T) { + RegisterTestingT(t) + + // prepare KV Scheduler + scheduler := NewPlugin(UseDeps(func(deps *Deps) { + deps.HTTPHandlers = nil + })) + err := scheduler.Init() + Expect(err).To(BeNil()) + + // prepare mocks + mockSB := test.NewMockSouthbound() + // -> initial content: + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue(), + &test.OnlyInteger{Integer: 0}, FromNB, false) + // -> descriptor1: + descriptor1 := test.NewMockDescriptor(&KVDescriptor{ + Name: descriptor1Name, + NBKeyPrefix: prefixA, + KeySelector: prefixSelector(prefixA), + ValueTypeName: proto.MessageName(test.NewArrayValue()), + DerivedValues: test.ArrayValueDerBuilder, + WithMetadata: true, + }, mockSB, 1) + // -> planned error + mockSB.PlanError(prefixA+baseValue1+"/item2", errors.New("failed to add value"), + func() { + mockSB.SetValue(prefixA+baseValue1, test.NewArrayValue("item1"), + &test.OnlyInteger{Integer: 0}, FromNB, false) + }) + + // register descriptor with the scheduler + scheduler.RegisterKVDescriptor(descriptor1) + + // subscribe to receive notifications about errors + errorChan := make(chan KeyWithError, 5) + scheduler.SubscribeForErrors(errorChan, prefixSelector(prefixA)) + + // get metadata map created for the descriptor + metadataMap := scheduler.GetMetadataMap(descriptor1.Name) + nameToInteger, withMetadataMap := metadataMap.(test.NameToInteger) + Expect(withMetadataMap).To(BeTrue()) + + // run resync transaction that will fail for one value + startTime := time.Now() + resyncTxn := scheduler.StartNBTransaction() + resyncTxn.SetValue(prefixA+baseValue1, test.NewLazyArrayValue("item1", "item2")) + description := "testing resync with retry" + ctx := context.Background() + ctx = WithRetry(ctx, 3*time.Second, false) + ctx = WithResync(ctx, FullResync, true) + ctx = WithDescription(ctx, description) + seqNum, err := resyncTxn.Commit(ctx) + stopTime := time.Now() + Expect(seqNum).To(BeEquivalentTo(0)) + Expect(err).ToNot(BeNil()) + txnErr := err.(*TransactionError) + Expect(txnErr.GetTxnInitError()).ShouldNot(HaveOccurred()) + kvErrors := txnErr.GetKVErrors() + Expect(kvErrors).To(HaveLen(1)) + Expect(kvErrors[0].TxnOperation).To(BeEquivalentTo(Add)) + Expect(kvErrors[0].Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(kvErrors[0].Error.Error()).To(BeEquivalentTo("failed to add value")) + + // check the state of SB + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value := mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item2 derived from base value 1 failed to get added + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).To(BeNil()) + Expect(mockSB.GetValues(nil)).To(HaveLen(2)) + + // check metadata + metadata, exists := nameToInteger.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB + opHistory := mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(5)) + operation := opHistory[0] + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item1", "item2"), + Metadata: nil, + Origin: FromNB, + }, + }) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[2] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item1")) + Expect(operation.Err).To(BeNil()) + operation = opHistory[3] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).ToNot(BeNil()) + Expect(operation.Err.Error()).To(BeEquivalentTo("failed to add value")) + operation = opHistory[4] // refresh failed value + Expect(operation.OpType).To(Equal(test.MockDump)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + checkValuesForCorrelation(operation.CorrelateDump, []KVWithMetadata{ + { + Key: prefixA + baseValue1, + Value: test.NewArrayValue("item1", "item2"), + Metadata: &test.OnlyInteger{Integer: 0}, + Origin: FromNB, + }, + }) + + // check transaction operations + txnHistory := scheduler.GetTransactionHistory(time.Time{}, time.Time{}) + Expect(txnHistory).To(HaveLen(1)) + txn := txnHistory[0] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(startTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(stopTime)).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(0)) + Expect(txn.TxnType).To(BeEquivalentTo(NBTransaction)) + Expect(txn.ResyncType).To(BeEquivalentTo(FullResync)) + Expect(txn.Description).To(Equal(description)) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps := RecordedTxnOps{ + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue()), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item1", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item1")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + }, + } + checkTxnOperations(txn.Planned, txnOps) + txnOps[2].IsPending = true + txnOps[2].NewErr = errors.New("failed to add value") + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR := scheduler.graph.Read() + errorStats := graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(1)) + pendingStats := graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats := graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(2)) + lastUpdateStats := graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(3)) + lastChangeStats := graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(1)) + descriptorStats := graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(3)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(3)) + originStats := graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(3)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(3)) + graphR.Release() + + // check error updates received through the channel + var errorNotif KeyWithError + Eventually(errorChan, time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixA + baseValue1 + "/item2")) + Expect(errorNotif.TxnOperation).To(BeEquivalentTo(Add)) + Expect(errorNotif.Error).ToNot(BeNil()) + Expect(errorNotif.Error.Error()).To(BeEquivalentTo("failed to add value")) + + // eventually the value should get "fixed" + Eventually(errorChan, 5*time.Second).Should(Receive(&errorNotif)) + Expect(errorNotif.Key).To(Equal(prefixA + baseValue1 + "/item2")) + Expect(errorNotif.Error).To(BeNil()) + + // check the state of SB after retry + Expect(mockSB.GetKeysWithInvalidData()).To(BeEmpty()) + // -> base value 1 + value = mockSB.GetValue(prefixA + baseValue1) + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewArrayValue("item1", "item2"))).To(BeTrue()) + Expect(value.Metadata).ToNot(BeNil()) + Expect(value.Metadata.(test.MetaWithInteger).GetInteger()).To(BeEquivalentTo(0)) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + // -> item1 derived from base value 1 + value = mockSB.GetValue(prefixA + baseValue1 + "/item1") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item1"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(3)) + // -> item2 derived from base value 1 was re-added + value = mockSB.GetValue(prefixA + baseValue1 + "/item2") + Expect(value).ToNot(BeNil()) + Expect(proto.Equal(value.Value, test.NewStringValue("item2"))).To(BeTrue()) + Expect(value.Metadata).To(BeNil()) + Expect(value.Origin).To(BeEquivalentTo(FromNB)) + Expect(mockSB.GetValues(nil)).To(HaveLen(3)) + + // check metadata + metadata, exists = nameToInteger.LookupByName(baseValue1) + Expect(exists).To(BeTrue()) + Expect(metadata.GetInteger()).To(BeEquivalentTo(0)) + + // check operations executed in SB during retry + opHistory = mockSB.PopHistoryOfOps() + Expect(opHistory).To(HaveLen(2)) + operation = opHistory[0] + Expect(operation.OpType).To(Equal(test.MockModify)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1)) + Expect(operation.Err).To(BeNil()) + operation = opHistory[1] + Expect(operation.OpType).To(Equal(test.MockAdd)) + Expect(operation.Descriptor).To(BeEquivalentTo(descriptor1Name)) + Expect(operation.Key).To(BeEquivalentTo(prefixA + baseValue1 + "/item2")) + Expect(operation.Err).To(BeNil()) + + // check retry transaction operations + txnHistory = scheduler.GetTransactionHistory(time.Time{}, time.Now()) + Expect(txnHistory).To(HaveLen(2)) + txn = txnHistory[1] + Expect(txn.PreRecord).To(BeFalse()) + Expect(txn.Start.After(stopTime)).To(BeTrue()) + Expect(txn.Start.Before(txn.Stop)).To(BeTrue()) + Expect(txn.Stop.Before(time.Now())).To(BeTrue()) + Expect(txn.SeqNum).To(BeEquivalentTo(1)) + Expect(txn.TxnType).To(BeEquivalentTo(RetryFailedOps)) + Expect(txn.ResyncType).To(BeEquivalentTo(NotResync)) + Expect(txn.Description).To(BeEmpty()) + checkRecordedValues(txn.Values, []RecordedKVPair{ + {Key: prefixA + baseValue1, Value: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), Origin: FromNB}, + }) + Expect(txn.PreErrors).To(BeEmpty()) + + txnOps = RecordedTxnOps{ + { + Operation: Modify, + Key: prefixA + baseValue1, + PrevValue: utils.RecordProtoMessage(test.NewArrayValue("item1")), + NewValue: utils.RecordProtoMessage(test.NewArrayValue("item1", "item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + IsRetry: true, + }, + { + Operation: Add, + Key: prefixA + baseValue1 + "/item2", + Derived: true, + NewValue: utils.RecordProtoMessage(test.NewStringValue("item2")), + PrevOrigin: FromNB, + NewOrigin: FromNB, + PrevErr: errors.New("failed to add value"), + IsRetry: true, + }, + } + checkTxnOperations(txn.Planned, txnOps) + checkTxnOperations(txn.Executed, txnOps) + + // check flag stats + graphR = scheduler.graph.Read() + errorStats = graphR.GetFlagStats(ErrorFlagName, nil) + Expect(errorStats.TotalCount).To(BeEquivalentTo(1)) + pendingStats = graphR.GetFlagStats(PendingFlagName, nil) + Expect(pendingStats.TotalCount).To(BeEquivalentTo(1)) + derivedStats = graphR.GetFlagStats(DerivedFlagName, nil) + Expect(derivedStats.TotalCount).To(BeEquivalentTo(4)) + lastUpdateStats = graphR.GetFlagStats(LastUpdateFlagName, nil) + Expect(lastUpdateStats.TotalCount).To(BeEquivalentTo(6)) + lastChangeStats = graphR.GetFlagStats(LastChangeFlagName, nil) + Expect(lastChangeStats.TotalCount).To(BeEquivalentTo(2)) + descriptorStats = graphR.GetFlagStats(DescriptorFlagName, nil) + Expect(descriptorStats.TotalCount).To(BeEquivalentTo(6)) + Expect(descriptorStats.PerValueCount).To(HaveKey(descriptor1Name)) + Expect(descriptorStats.PerValueCount[descriptor1Name]).To(BeEquivalentTo(6)) + originStats = graphR.GetFlagStats(OriginFlagName, nil) + Expect(originStats.TotalCount).To(BeEquivalentTo(6)) + Expect(originStats.PerValueCount).To(HaveKey(FromNB.String())) + Expect(originStats.PerValueCount[FromNB.String()]).To(BeEquivalentTo(6)) + graphR.Release() + + // close scheduler + err = scheduler.Close() + Expect(err).To(BeNil()) +} +*/ + +/* when graph dump is needed: +graphR := scheduler.graph.Read() +graphDump := graphR.Dump() +fmt.Print(graphDump) +graphR.Release() +*/ diff --git a/plugins/kvscheduler/txn_exec.go b/plugins/kvscheduler/txn_exec.go new file mode 100644 index 0000000000..b8f1475435 --- /dev/null +++ b/plugins/kvscheduler/txn_exec.go @@ -0,0 +1,877 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "fmt" + "sort" + "strings" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/cn-infra/logging" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// applyValueArgs collects all arguments to applyValue method. +type applyValueArgs struct { + graphW graph.RWAccess + txn *transaction + kv kvForTxn + baseKey string + + isRetry bool + dryRun bool + + // set inside of the recursive chain of applyValue-s + isDepUpdate bool + isDerived bool + + // handling of dependency cycles + depth int + branch utils.KeySet +} + +// executeTransaction executes pre-processed transaction. +// If is enabled, Validate/Create/Delete/Update operations will not be executed +// and the graph will be returned to its original state at the end. +func (s *Scheduler) executeTransaction(txn *transaction, dryRun bool) (executed kvs.RecordedTxnOps) { + if s.logGraphWalk { + op := "execute transaction" + if dryRun { + op = "simulate transaction" + } + msg := fmt.Sprintf("%s (seqNum=%d)", op, txn.seqNum) + fmt.Printf("%s %s\n", nodeVisitBeginMark, msg) + defer fmt.Printf("%s %s\n", nodeVisitEndMark, msg) + } + downstreamResync := txn.txnType == kvs.NBTransaction && txn.nb.resyncType == kvs.DownstreamResync + graphW := s.graph.Write(!downstreamResync) + branch := utils.NewMapBasedKeySet() // branch of current recursive calls to applyValue used to handle cycles + + var revert bool + prevValues := make([]kvs.KeyValuePair, 0, len(txn.values)) + // execute transaction either in best-effort mode or with revert on the first failure + for _, kv := range txn.values { + ops, prevValue, err := s.applyValue( + &applyValueArgs{ + graphW: graphW, + txn: txn, + kv: kv, + baseKey: kv.key, + dryRun: dryRun, + isRetry: txn.txnType == kvs.RetryFailedOps, + branch: branch, + }) + executed = append(executed, ops...) + prevValues = append(prevValues, kvs.KeyValuePair{}) + copy(prevValues[1:], prevValues) + prevValues[0] = prevValue + if err != nil { + if txn.txnType == kvs.NBTransaction && txn.nb.revertOnFailure { + // refresh failed value and trigger reverting + failedKey := utils.NewSingletonKeySet(kv.key) + s.refreshGraph(graphW, failedKey, nil, true) + graphW.Save() // certainly not dry-run + revert = true + break + } + } + } + + if revert { + // record graph state in-between failure and revert + graphW.Release() + graphW = s.graph.Write(true) + + // revert back to previous values + for _, kvPair := range prevValues { + ops, _, _ := s.applyValue( + &applyValueArgs{ + graphW: graphW, + txn: txn, + kv: kvForTxn{ + key: kvPair.Key, + value: kvPair.Value, + origin: kvs.FromNB, + isRevert: true, + }, + baseKey: kvPair.Key, + dryRun: dryRun, + branch: branch, + }) + executed = append(executed, ops...) + } + } + + // get rid of uninteresting intermediate pending Create/Delete operations + executed = s.compressTxnOps(executed) + + graphW.Release() + return executed +} + +// applyValue applies new value received from NB or SB. +// It returns the list of executed operations. +func (s *Scheduler) applyValue(args *applyValueArgs) (executed kvs.RecordedTxnOps, prevValue kvs.KeyValuePair, err error) { + // dependency cycle detection + if cycle := args.branch.Has(args.kv.key); cycle { + return executed, prevValue, err + } + args.branch.Add(args.kv.key) + defer args.branch.Del(args.kv.key) + + // verbose logging + if s.logGraphWalk { + endLog := s.logNodeVisit("applyValue", args) + defer endLog() + } + + // create new revision of the node for the given key-value pair + node := args.graphW.SetNode(args.kv.key) + + // remember previous value for a potential revert + prevValue = kvs.KeyValuePair{Key: node.GetKey(), Value: node.GetValue()} + + // remember previous value status to detect and notify about changes + prevState := getNodeState(node) + prevOp := getNodeLastOperation(node) + prevErr := getNodeErrorString(node) + prevDetails := getValueDetails(node) + + // prepare operation description - fill attributes that we can even before executing the operation + txnOp := s.preRecordTxnOp(args, node) + + // determine the operation type + if args.isDepUpdate { + s.determineDepUpdateOperation(node, txnOp) + if txnOp.Operation == kvs.TxnOperation_UNDEFINED { + // nothing needs to be updated + return + } + } else if args.kv.value == nil { + txnOp.Operation = kvs.TxnOperation_DELETE + } else if node.GetValue() == nil || !isNodeAvailable(node) { + txnOp.Operation = kvs.TxnOperation_CREATE + } else { + txnOp.Operation = kvs.TxnOperation_UPDATE + } + + // remaining txnOp attributes to fill: + // NewState bool + // NewErr error + // NOOP bool + // IsRecreate bool + + // update node flags + prevUpdate := getNodeLastUpdate(node) + lastUpdateFlag := &LastUpdateFlag{ + txnSeqNum: args.txn.seqNum, + txnOp: txnOp.Operation, + value: args.kv.value, + revert: args.kv.isRevert, + } + if args.txn.txnType == kvs.NBTransaction { + lastUpdateFlag.retryEnabled = args.txn.nb.retryEnabled + lastUpdateFlag.retryArgs = args.txn.nb.retryArgs + } else if prevUpdate != nil { + // inherit retry arguments from the last NB txn for this value + lastUpdateFlag.retryEnabled = prevUpdate.retryEnabled + lastUpdateFlag.retryArgs = prevUpdate.retryArgs + } + node.SetFlags(lastUpdateFlag) + + // if the value is already "broken" by this transaction, do not try to update + // anymore, unless this is a revert + // (needs to be refreshed first in the post-processing stage) + if (prevState == kvs.ValueState_FAILED || prevState == kvs.ValueState_RETRYING) && + !args.kv.isRevert && prevUpdate != nil && prevUpdate.txnSeqNum == args.txn.seqNum { + _, prevErr := getNodeError(node) + return executed, prevValue, prevErr + } + + // run selected operation + switch txnOp.Operation { + case kvs.TxnOperation_DELETE: + executed, err = s.applyDelete(node, txnOp, args, args.isDepUpdate) + case kvs.TxnOperation_CREATE: + executed, err = s.applyCreate(node, txnOp, args) + case kvs.TxnOperation_UPDATE: + executed, err = s.applyUpdate(node, txnOp, args) + } + + // detect value state changes + if !args.dryRun { + nodeR := args.graphW.GetNode(args.kv.key) + if prevUpdate == nil || prevState != getNodeState(nodeR) || prevOp != getNodeLastOperation(nodeR) || + prevErr != getNodeErrorString(nodeR) || !equalValueDetails(prevDetails, getValueDetails(nodeR)) { + s.updatedStates.Add(args.baseKey) + } + } + + return executed, prevValue, err +} + +// applyDelete removes value. +func (s *Scheduler) applyDelete(node graph.NodeRW, txnOp *kvs.RecordedTxnOp, args *applyValueArgs, pending bool) (executed kvs.RecordedTxnOps, err error) { + if s.logGraphWalk { + endLog := s.logNodeVisit("applyDelete", args) + defer endLog() + } + if !args.dryRun { + defer args.graphW.Save() + } + + if node.GetValue() == nil { + // remove value that does not exist => noop (do not even record) + args.graphW.DeleteNode(args.kv.key) + return executed, nil + } + + // reflect removal in the graph at the return + var ( + inheritedErr error + retriableErr bool + ) + defer func() { + if inheritedErr != nil { + // revert back to available, derived value failed instead + node.DelFlags(UnavailValueFlagName) + return + } + if err == nil { + node.DelFlags(ErrorFlagName) + if pending { + // deleted due to missing dependencies + txnOp.NewState = kvs.ValueState_PENDING + s.updateNodeState(node, txnOp.NewState, args) + } else { + // removed by request + txnOp.NewState = kvs.ValueState_REMOVED + if args.isDerived { + args.graphW.DeleteNode(args.kv.key) + } else { + s.updateNodeState(node, txnOp.NewState, args) + } + } + } else { + txnOp.NewErr = err + txnOp.NewState = s.markFailedValue(node, args, err, retriableErr) + } + executed = append(executed, txnOp) + }() + + if !isNodeAvailable(node) { + // removing value that was pending => just update the state in the graph + txnOp.NOOP = true + return + } + + // already mark as unavailable so that other nodes will not view it as satisfied + // dependency during removal + node.SetFlags(&UnavailValueFlag{}) + if !pending { + // state may still change if delete fails + s.updateNodeState(node, kvs.ValueState_REMOVED, args) + } + + // remove derived values + if !args.isDerived { + var derivedVals []kvForTxn + for _, derivedNode := range getDerivedNodes(node) { + derivedVals = append(derivedVals, kvForTxn{ + key: derivedNode.GetKey(), + value: nil, // delete + origin: args.kv.origin, + isRevert: args.kv.isRevert, + }) + } + derExecs, inheritedErr := s.applyDerived(derivedVals, args, false) + executed = append(executed, derExecs...) + if inheritedErr != nil { + err = inheritedErr + return + } + } + + // update values that depend on this kv-pair + executed = append(executed, s.runDepUpdates(node, args)...) + + // execute delete operation + descriptor := s.registry.GetDescriptorForKey(node.GetKey()) + handler := &descriptorHandler{descriptor} + if !args.dryRun && descriptor != nil { + if args.kv.origin != kvs.FromSB { + err = handler.delete(node.GetKey(), node.GetValue(), node.GetMetadata()) + } + if err != nil { + retriableErr = handler.isRetriableFailure(err) + } + if canNodeHaveMetadata(node) && descriptor.WithMetadata { + node.SetMetadata(nil) + } + } + return +} + +// applyCreate creates new value which previously didn't exist or was unavailable. +func (s *Scheduler) applyCreate(node graph.NodeRW, txnOp *kvs.RecordedTxnOp, args *applyValueArgs) (executed kvs.RecordedTxnOps, err error) { + if s.logGraphWalk { + endLog := s.logNodeVisit("applyCreate", args) + defer endLog() + } + if !args.dryRun { + defer args.graphW.Save() + } + node.SetValue(args.kv.value) + + // get descriptor + descriptor := s.registry.GetDescriptorForKey(args.kv.key) + handler := &descriptorHandler{descriptor} + if descriptor != nil { + node.SetFlags(&DescriptorFlag{descriptor.Name}) + node.SetLabel(handler.keyLabel(args.kv.key)) + } + + // handle unimplemented value + unimplemented := args.kv.origin == kvs.FromNB && !args.isDerived && descriptor == nil + if unimplemented { + if getNodeState(node) == kvs.ValueState_UNIMPLEMENTED { + // already known + return + } + node.SetFlags(&UnavailValueFlag{}) + node.DelFlags(ErrorFlagName) + txnOp.NOOP = true + txnOp.NewState = kvs.ValueState_UNIMPLEMENTED + s.updateNodeState(node, txnOp.NewState, args) + return kvs.RecordedTxnOps{txnOp}, nil + } + + // mark derived value + if args.isDerived { + node.SetFlags(&DerivedFlag{baseKey: args.baseKey}) + } + + // validate value + if !args.dryRun && args.kv.origin == kvs.FromNB { + err = handler.validate(node.GetKey(), node.GetValue()) + if err != nil { + node.SetFlags(&UnavailValueFlag{}) + txnOp.NewErr = err + txnOp.NewState = kvs.ValueState_INVALID + txnOp.NOOP = true + s.updateNodeState(node, txnOp.NewState, args) + node.SetFlags(&ErrorFlag{err: err, retriable: false}) + return kvs.RecordedTxnOps{txnOp}, err + } + } + + // apply new relations + _, updateExecs, inheritedErr := s.applyNewRelations(node, handler, args) + executed = append(executed, updateExecs...) + if inheritedErr != nil { + // error is not expected here, executed operations should be NOOPs + err = inheritedErr + return + } + + derives := handler.derivedValues(node.GetKey(), node.GetValue()) + dependencies := handler.dependencies(node.GetKey(), node.GetValue()) + node.SetTargets(constructTargets(dependencies, derives)) + + if !isNodeReady(node) { + // if not ready, nothing to do + node.SetFlags(&UnavailValueFlag{}) + node.DelFlags(ErrorFlagName) + txnOp.NewState = kvs.ValueState_PENDING + txnOp.NOOP = true + s.updateNodeState(node, txnOp.NewState, args) + return kvs.RecordedTxnOps{txnOp}, nil + } + + // execute Create operation + if !args.dryRun && descriptor != nil { + var metadata interface{} + + if args.kv.origin != kvs.FromSB { + metadata, err = handler.create(node.GetKey(), node.GetValue()) + } else { + // already created in SB + metadata = args.kv.metadata + } + + if err != nil { + // create failed => assume the value is unavailable + node.SetFlags(&UnavailValueFlag{}) + retriableErr := handler.isRetriableFailure(err) + txnOp.NewErr = err + txnOp.NewState = s.markFailedValue(node, args, err, retriableErr) + return kvs.RecordedTxnOps{txnOp}, err + } + + // add metadata to the map + if canNodeHaveMetadata(node) && descriptor.WithMetadata { + node.SetMetadataMap(descriptor.Name) + node.SetMetadata(metadata) + } + } + + // finalize node and save before going to derived values + dependencies + node.DelFlags(ErrorFlagName, UnavailValueFlagName) + if args.kv.origin == kvs.FromSB { + txnOp.NewState = kvs.ValueState_OBTAINED + } else { + txnOp.NewState = kvs.ValueState_CONFIGURED + } + s.updateNodeState(node, txnOp.NewState, args) + executed = append(executed, txnOp) + if !args.dryRun { + args.graphW.Save() + } + + // update values that depend on this kv-pair + executed = append(executed, s.runDepUpdates(node, args)...) + + // created derived values + if !args.isDerived { + var derivedVals []kvForTxn + for _, derivedVal := range derives { + derivedVals = append(derivedVals, kvForTxn{ + key: derivedVal.Key, + value: derivedVal.Value, + origin: args.kv.origin, + isRevert: args.kv.isRevert, + }) + } + derExecs, inheritedErr := s.applyDerived(derivedVals, args, true) + executed = append(executed, derExecs...) + if inheritedErr != nil { + err = inheritedErr + } + } + return +} + +// applyUpdate applies new value to existing non-pending value. +func (s *Scheduler) applyUpdate(node graph.NodeRW, txnOp *kvs.RecordedTxnOp, args *applyValueArgs) (executed kvs.RecordedTxnOps, err error) { + if s.logGraphWalk { + endLog := s.logNodeVisit("applyUpdate", args) + defer endLog() + } + if !args.dryRun { + defer args.graphW.Save() + } + + // validate new value + descriptor := s.registry.GetDescriptorForKey(args.kv.key) + handler := &descriptorHandler{descriptor} + if !args.dryRun && args.kv.origin == kvs.FromNB { + err = handler.validate(node.GetKey(), node.GetValue()) + if err != nil { + node.SetValue(args.kv.value) // save the invalid value + node.SetFlags(&UnavailValueFlag{}) + txnOp.NewErr = err + txnOp.NewState = kvs.ValueState_INVALID + txnOp.NOOP = true + s.updateNodeState(node, txnOp.NewState, args) + node.SetFlags(&ErrorFlag{err: err, retriable: false}) + return kvs.RecordedTxnOps{txnOp}, err + } + } + + // compare new value with the old one + equivalent := handler.equivalentValues(node.GetKey(), node.GetValue(), args.kv.value) + + // re-create the value if required by the descriptor + recreate := !equivalent && + args.kv.origin != kvs.FromSB && + handler.updateWithRecreate(args.kv.key, node.GetValue(), args.kv.value, node.GetMetadata()) + + if recreate { + // record operation as two - delete followed by create + delOp := s.preRecordTxnOp(args, node) + delOp.Operation = kvs.TxnOperation_DELETE + delOp.NewValue = nil + delOp.IsRecreate = true + createOp := s.preRecordTxnOp(args, node) + createOp.Operation = kvs.TxnOperation_CREATE + createOp.PrevValue = nil + createOp.IsRecreate = true + // remove obsolete value + delExec, inheritedErr := s.applyDelete(node, delOp, args, false) + executed = append(executed, delExec...) + if inheritedErr != nil { + err = inheritedErr + return + } + // create the new revision of the value + createExec, inheritedErr := s.applyCreate(node, createOp, args) + executed = append(executed, createExec...) + err = inheritedErr + return + } + + // save the new value + prevValue := node.GetValue() + node.SetValue(args.kv.value) + + // apply new relations + derives, updateExecs, inheritedErr := s.applyNewRelations(node, handler, args) + executed = append(executed, updateExecs...) + if inheritedErr != nil { + err = inheritedErr + return + } + + // if the new dependencies are not satisfied => delete and set as pending with the new value + if !isNodeReady(node) { + delExec, inheritedErr := s.applyDelete(node, txnOp, args, true) + executed = append(executed, delExec...) + if inheritedErr != nil { + err = inheritedErr + } + return + } + + // execute update operation + if !args.dryRun && !equivalent && descriptor != nil { + var newMetadata interface{} + + // call Update handler + if args.kv.origin != kvs.FromSB { + newMetadata, err = handler.update(node.GetKey(), prevValue, node.GetValue(), node.GetMetadata()) + } else { + // already modified in SB + newMetadata = args.kv.metadata + } + + if err != nil { + retriableErr := handler.isRetriableFailure(err) + txnOp.NewErr = err + txnOp.NewState = s.markFailedValue(node, args, err, retriableErr) + executed = append(executed, txnOp) + return + } + + // update metadata + if canNodeHaveMetadata(node) && descriptor.WithMetadata { + node.SetMetadata(newMetadata) + } + } + + // finalize node and save before going to new/modified derived values + dependencies + node.DelFlags(ErrorFlagName, UnavailValueFlagName) + if args.kv.origin == kvs.FromSB { + txnOp.NewState = kvs.ValueState_OBTAINED + } else { + txnOp.NewState = kvs.ValueState_CONFIGURED + } + s.updateNodeState(node, txnOp.NewState, args) + + // if the value was modified or the state changed, record operation + if !equivalent || txnOp.PrevState != txnOp.NewState { + // do not record transition if it only confirms that the value is in sync + confirmsInSync := equivalent && + txnOp.PrevState == kvs.ValueState_DISCOVERED && + txnOp.NewState == kvs.ValueState_CONFIGURED + if !confirmsInSync { + txnOp.NOOP = equivalent + executed = append(executed, txnOp) + } + } + + // save before going into derived values + if !args.dryRun { + args.graphW.Save() + } + + if !args.isDerived { + // update/create derived values + var derivedVals []kvForTxn + for _, derivedVal := range derives { + derivedVals = append(derivedVals, kvForTxn{ + key: derivedVal.Key, + value: derivedVal.Value, + origin: args.kv.origin, + isRevert: args.kv.isRevert, + }) + } + derExecs, inheritedErr := s.applyDerived(derivedVals, args, true) + executed = append(executed, derExecs...) + if inheritedErr != nil { + err = inheritedErr + } + } + return +} + +// applyNewRelations updates relation definitions and removes obsolete derived +// values. +func (s *Scheduler) applyNewRelations(node graph.NodeRW, handler *descriptorHandler, + args *applyValueArgs) (derivedVals []kvs.KeyValuePair, executed kvs.RecordedTxnOps, err error) { + + // get the set of derived keys before update + prevDerived := getDerivedKeys(node) + + // set new targets + derivedVals = nil + if !args.isDerived { + derivedVals = handler.derivedValues(node.GetKey(), node.GetValue()) + } + dependencies := handler.dependencies(node.GetKey(), node.GetValue()) + node.SetTargets(constructTargets(dependencies, derivedVals)) + + if args.isDerived { + return + } + + // remove obsolete derived values + var obsoleteDerVals []kvForTxn + prevDerived.Subtract(getDerivedKeys(node)) + for _, obsolete := range prevDerived.Iterate() { + obsoleteDerVals = append(obsoleteDerVals, kvForTxn{ + key: obsolete, + value: nil, // delete + origin: args.kv.origin, + isRevert: args.kv.isRevert, + }) + } + executed, err = s.applyDerived(obsoleteDerVals, args, false) + return +} + +// applyDerived (re-)applies the given list of derived values. +func (s *Scheduler) applyDerived(derivedVals []kvForTxn, args *applyValueArgs, check bool) (executed kvs.RecordedTxnOps, err error) { + var wasErr error + + // order derivedVals by key (just for deterministic behaviour which simplifies testing) + sort.Slice(derivedVals, func(i, j int) bool { return derivedVals[i].key < derivedVals[j].key }) + + for _, derived := range derivedVals { + if check && !s.validDerivedKV(args.graphW, derived, args.txn.seqNum) { + continue + } + ops, _, err := s.applyValue( + &applyValueArgs{ + graphW: args.graphW, + txn: args.txn, + kv: derived, + baseKey: args.baseKey, + isRetry: args.isRetry, + dryRun: args.dryRun, + isDerived: true, // <- is derived + branch: args.branch, + depth: args.depth, + }) + if err != nil { + wasErr = err + } + executed = append(executed, ops...) + } + return executed, wasErr +} + +// runDepUpdates triggers dependency updates on all nodes that depend on the given node. +func (s *Scheduler) runDepUpdates(node graph.Node, args *applyValueArgs) (executed kvs.RecordedTxnOps) { + depNodes := node.GetSources(DependencyRelation) + + // order depNodes by key (just for deterministic behaviour which simplifies testing) + sort.Slice(depNodes, func(i, j int) bool { return depNodes[i].GetKey() < depNodes[j].GetKey() }) + + for _, depNode := range depNodes { + if getNodeOrigin(depNode) != kvs.FromNB { + continue + } + var value proto.Message + if lastUpdate := getNodeLastUpdate(depNode); lastUpdate != nil { + value = lastUpdate.value + } else { + // state=DISCOVERED + value = depNode.GetValue() + } + ops, _, _ := s.applyValue( + &applyValueArgs{ + graphW: args.graphW, + txn: args.txn, + kv: kvForTxn{ + key: depNode.GetKey(), + value: value, + origin: getNodeOrigin(depNode), + isRevert: args.kv.isRevert, + }, + baseKey: getNodeBaseKey(depNode), + isRetry: args.isRetry, + dryRun: args.dryRun, + isDerived: isNodeDerived(depNode), + isDepUpdate: true, // <- dependency update + branch: args.branch, + depth: args.depth, + }) + executed = append(executed, ops...) + } + return executed +} + +// determineDepUpdateOperation determines if the value needs update wrt. dependencies +// and what operation to execute. +func (s *Scheduler) determineDepUpdateOperation(node graph.NodeRW, txnOp *kvs.RecordedTxnOp) { + // create node if dependencies are now all met + if !isNodeAvailable(node) { + if !isNodeReady(node) { + // nothing to do + return + } + txnOp.Operation = kvs.TxnOperation_CREATE + } else if !isNodeReady(node) { + // node should not be available anymore + txnOp.Operation = kvs.TxnOperation_DELETE + } +} + +// compressTxnOps removes uninteresting intermediate pending Create/Delete operations. +func (s *Scheduler) compressTxnOps(executed kvs.RecordedTxnOps) kvs.RecordedTxnOps { + // compress Create operations + compressed := make(kvs.RecordedTxnOps, 0, len(executed)) + for i, op := range executed { + compressedOp := false + if op.Operation == kvs.TxnOperation_CREATE && op.NewState == kvs.ValueState_PENDING { + for j := i + 1; j < len(executed); j++ { + if executed[j].Key == op.Key { + if executed[j].Operation == kvs.TxnOperation_CREATE { + // compress + compressedOp = true + executed[j].PrevValue = op.PrevValue + executed[j].PrevErr = op.PrevErr + executed[j].PrevState = op.PrevState + } + break + } + } + } + if !compressedOp { + compressed = append(compressed, op) + } + } + + // compress Delete operations + length := len(compressed) + for i := length - 1; i >= 0; i-- { + op := compressed[i] + compressedOp := false + if op.Operation == kvs.TxnOperation_DELETE && op.PrevState == kvs.ValueState_PENDING { + for j := i - 1; j >= 0; j-- { + if compressed[j].Key == op.Key { + if compressed[j].Operation == kvs.TxnOperation_DELETE { + // compress + compressedOp = true + compressed[j].NewValue = op.NewValue + compressed[j].NewErr = op.NewErr + compressed[j].NewState = op.NewState + } + break + } + } + } + if compressedOp { + copy(compressed[i:], compressed[i+1:]) + length-- + } + } + compressed = compressed[:length] + return compressed +} + +// updateNodeState updates node state if it is really necessary. +func (s *Scheduler) updateNodeState(node graph.NodeRW, newState kvs.ValueState, args *applyValueArgs) { + if getNodeState(node) != newState { + if s.logGraphWalk { + indent := strings.Repeat(" ", (args.depth+1)*2) + fmt.Printf("%s-> change value state from %v to %v\n", indent, getNodeState(node), newState) + } + node.SetFlags(&ValueStateFlag{valueState: newState}) + } +} + +func (s *Scheduler) markFailedValue(node graph.NodeRW, args *applyValueArgs, err error, + retriableErr bool) (newState kvs.ValueState) { + + // decide value state between FAILED and RETRYING + newState = kvs.ValueState_FAILED + toBeReverted := args.txn.txnType == kvs.NBTransaction && args.txn.nb.revertOnFailure && !args.kv.isRevert + if retriableErr && !toBeReverted { + // consider operation retry + var alreadyRetried bool + if args.txn.txnType == kvs.RetryFailedOps { + baseKey := getNodeBaseKey(node) + _, alreadyRetried = args.txn.retry.keys[baseKey] + } + attempt := 1 + if alreadyRetried { + attempt = args.txn.retry.attempt + 1 + } + lastUpdate := getNodeLastUpdate(node) + if lastUpdate.retryEnabled && lastUpdate.retryArgs != nil && + (lastUpdate.retryArgs.MaxCount == 0 || attempt <= lastUpdate.retryArgs.MaxCount) { + // retry is allowed + newState = kvs.ValueState_RETRYING + } + } + s.updateNodeState(node, newState, args) + node.SetFlags(&ErrorFlag{err: err, retriable: retriableErr}) + return newState +} + +func (s *Scheduler) logNodeVisit(operation string, args *applyValueArgs) func() { + var msg string + if args.isDepUpdate { + msg = fmt.Sprintf("%s (key = %s, dep-update)", operation, args.kv.key) + } else { + msg = fmt.Sprintf("%s (key = %s)", operation, args.kv.key) + } + args.depth++ + indent := strings.Repeat(" ", args.depth*2) + fmt.Printf("%s%s %s\n", indent, nodeVisitBeginMark, msg) + return func() { + args.depth-- + fmt.Printf("%s%s %s\n", indent, nodeVisitEndMark, msg) + } +} + +// validDerivedKV check validity of a derived KV pair. +func (s *Scheduler) validDerivedKV(graphR graph.ReadAccess, kv kvForTxn, txnSeqNum uint64) bool { + node := graphR.GetNode(kv.key) + if kv.value == nil { + s.Log.WithFields(logging.Fields{ + "txnSeqNum": txnSeqNum, + "key": kv.key, + }).Warn("Derived nil value") + return false + } + if node != nil { + if !isNodeDerived(node) { + s.Log.WithFields(logging.Fields{ + "txnSeqNum": txnSeqNum, + "value": kv.value, + "key": kv.key, + }).Warn("Skipping derived value colliding with a base value") + return false + } + } + return true +} diff --git a/plugins/kvscheduler/txn_order.go b/plugins/kvscheduler/txn_order.go new file mode 100644 index 0000000000..83e7b0a736 --- /dev/null +++ b/plugins/kvscheduler/txn_order.go @@ -0,0 +1,65 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "sort" +) + +// orderValuesByOp orders values by operations (in average should yield the shortest +// sequence of operations): +// 1. delete +// 2. update with re-create +// 3. create +// 4. update +func (s *Scheduler) orderValuesByOp(values []kvForTxn) []kvForTxn { + graphR := s.graph.Read() + defer graphR.Release() + + // first order values alphabetically by keys to get deterministic behaviour and + // output that is easier to read + sort.Slice(values, func(i, j int) bool { + return values[i].key < values[j].key + }) + + // sort values by operations + var delete, recreate, create, update []kvForTxn + for _, kv := range values { + descriptor := s.registry.GetDescriptorForKey(kv.key) + handler := &descriptorHandler{descriptor} + node := graphR.GetNode(kv.key) + + if kv.value == nil { + delete = append(delete, kv) + continue + } + if node == nil || node.GetFlag(UnavailValueFlagName) != nil { + create = append(create, kv) + continue + } + if handler.updateWithRecreate(kv.key, node.GetValue(), kv.value, node.GetMetadata()) { + recreate = append(recreate, kv) + } else { + update = append(update, kv) + } + } + + ordered := make([]kvForTxn, 0, len(values)) + ordered = append(ordered, delete...) + ordered = append(ordered, recreate...) + ordered = append(ordered, create...) + ordered = append(ordered, update...) + return ordered +} diff --git a/plugins/kvscheduler/txn_process.go b/plugins/kvscheduler/txn_process.go new file mode 100644 index 0000000000..6a65e5644d --- /dev/null +++ b/plugins/kvscheduler/txn_process.go @@ -0,0 +1,518 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/ligato/cn-infra/logging" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// transaction represents kscheduler transaction that is being queued/processed. +// Once finalized, it is recorded as instance of RecordedTxn and these data +// are thrown away. +type transaction struct { + seqNum uint64 + txnType kvs.TxnType + values []kvForTxn + nb *nbTxn // defined for NB transactions + retry *retryTxn // defined for retry of failed operations +} + +// kvForTxn represents a new value for a given key to be applied in a transaction. +type kvForTxn struct { + key string + value proto.Message + metadata kvs.Metadata + origin kvs.ValueOrigin + isRevert bool +} + +// nbTxn encapsulates data for NB transaction. +type nbTxn struct { + resyncType kvs.ResyncType + verboseRefresh bool + isBlocking bool + + retryEnabled bool + retryArgs *kvs.RetryOpt + + revertOnFailure bool + description string + resultChan chan txnResult +} + +// retryTxn encapsulates data for retry of failed operations. +type retryTxn struct { + retryTxnMeta + keys map[string]uint64 // key -> value revision (last update) when the retry was enqueued +} + +// retryTxnMeta contains metadata for Retry transaction. +type retryTxnMeta struct { + txnSeqNum uint64 + delay time.Duration + attempt int +} + +// txnResult represents transaction result. +type txnResult struct { + err error + txnSeqNum uint64 +} + +// consumeTransactions pulls the oldest queued transaction and starts the processing. +func (s *Scheduler) consumeTransactions() { + defer s.wg.Done() + for { + txn, canceled := s.dequeueTxn() + if canceled { + return + } + s.processTransaction(txn) + } +} + +// processTransaction processes transaction in 6 steps: +// 1. Pre-processing: transaction parameters are initialized, retry operations +// are filtered from the obsolete ones and for the resync the graph is refreshed +// 2. Simulation: simulating transaction without actually executing any of the +// Create/Delete/Update operations in order to obtain the "execution plan" +// 3. Pre-recording: logging transaction arguments + plan before execution to +// persist some information in case there is a crash during execution +// 4. Execution: executing the transaction, collecting errors +// 5. Recording: recording the finalized transaction (log + in-memory) +// 6. Post-processing: scheduling retry for failed operations, propagating value +// state updates to the subscribers and returning error/nil to the caller +// of blocking commit +func (s *Scheduler) processTransaction(txn *transaction) { + var ( + simulatedOps kvs.RecordedTxnOps + executedOps kvs.RecordedTxnOps + startTime time.Time + stopTime time.Time + ) + s.txnLock.Lock() + defer s.txnLock.Unlock() + + // 1. Pre-processing: + startTime = time.Now() + skipTxnExec := s.preProcessTransaction(txn) + + // 2. Ordering: + if !skipTxnExec { + txn.values = s.orderValuesByOp(txn.values) + } + + // 3. Simulation: + if !skipTxnExec { + simulatedOps = s.executeTransaction(txn, true) + } + + // 4. Pre-recording + preTxnRecord := s.preRecordTransaction(txn, simulatedOps) + + // 5. Execution: + if !skipTxnExec { + executedOps = s.executeTransaction(txn, false) + } + stopTime = time.Now() + + // 6. Recording: + s.recordTransaction(preTxnRecord, executedOps, startTime, stopTime) + + // 7. Post-processing: + s.postProcessTransaction(txn, executedOps) +} + +// preProcessTransaction initializes transaction parameters, filters obsolete retry +// operations and refreshes the graph for resync. +func (s *Scheduler) preProcessTransaction(txn *transaction) (skip bool) { + // allocate new transaction sequence number + txn.seqNum = s.txnSeqNumber + s.txnSeqNumber++ + + switch txn.txnType { + case kvs.SBNotification: + skip = s.preProcessNotification(txn) + case kvs.NBTransaction: + skip = s.preProcessNBTransaction(txn) + case kvs.RetryFailedOps: + skip = s.preProcessRetryTxn(txn) + } + + return skip +} + +// preProcessNotification filters out non-valid SB notification. +func (s *Scheduler) preProcessNotification(txn *transaction) (skip bool) { + graphR := s.graph.Read() + defer graphR.Release() + + kv := txn.values[0] + skip = s.filterNotification(graphR, kv.key, kv.value, txn.seqNum) + return +} + +// preProcessNBTransaction refreshes the graph for resync. +func (s *Scheduler) preProcessNBTransaction(txn *transaction) (skip bool) { + if txn.nb.resyncType == kvs.NotResync { + // nothing to do in the pre-processing stage + return false + } + + // for resync refresh the graph + collect deletes + graphW := s.graph.Write(false) + defer graphW.Release() + defer graphW.Save() + s.resyncCount++ + + if txn.nb.resyncType == kvs.DownstreamResync { + // for downstream resync it is assumed that scheduler is in-sync with NB + currentNodes := graphW.GetNodes(nil, nbBaseValsSelectors()...) + for _, node := range currentNodes { + lastUpdate := getNodeLastUpdate(node) + txn.values = append(txn.values, + kvForTxn{ + key: node.GetKey(), + value: lastUpdate.value, + origin: kvs.FromNB, + isRevert: lastUpdate.revert, + }) + } + } + + // build the set of keys currently in NB + nbKeys := utils.NewMapBasedKeySet() + for _, kv := range txn.values { + nbKeys.Add(kv.key) + } + + // unless this is only UpstreamResync, refresh the graph with the current + // state of SB + if txn.nb.resyncType != kvs.UpstreamResync { + s.refreshGraph(graphW, nil, &resyncData{ + first: s.resyncCount == 1, + values: txn.values, + }, txn.nb.verboseRefresh) + } + + // collect deletes for obsolete values + currentNodes := graphW.GetNodes(nil, nbBaseValsSelectors()...) + for _, node := range currentNodes { + if nbKey := nbKeys.Has(node.GetKey()); nbKey { + continue + } + txn.values = append(txn.values, + kvForTxn{ + key: node.GetKey(), + value: nil, // remove + origin: kvs.FromNB, + }) + } + + // update (record) SB values + sbNodes := graphW.GetNodes(nil, sbBaseValsSelectors()...) + for _, node := range sbNodes { + if nbKey := nbKeys.Has(node.GetKey()); nbKey { + continue + } + txn.values = append(txn.values, + kvForTxn{ + key: node.GetKey(), + value: node.GetValue(), + origin: kvs.FromSB, + }) + } + + skip = len(txn.values) == 0 + return +} + +// preProcessRetryTxn filters out obsolete retry operations. +func (s *Scheduler) preProcessRetryTxn(txn *transaction) (skip bool) { + graphR := s.graph.Read() + defer graphR.Release() + + for key, retryRev := range txn.retry.keys { + node := graphR.GetNode(key) + if node == nil { + continue + } + lastUpdate := getNodeLastUpdate(node) + if lastUpdate == nil || lastUpdate.txnSeqNum > retryRev { + // obsolete retry, the value has been updated since the failure + continue + } + txn.values = append(txn.values, + kvForTxn{ + key: key, + value: lastUpdate.value, + origin: kvs.FromNB, + isRevert: lastUpdate.revert, + }) + } + skip = len(txn.values) == 0 + return +} + +// postProcessTransaction schedules retry for failed operations and propagates +// value state updates to the subscribers and error/nil to the caller of a blocking +// commit. +func (s *Scheduler) postProcessTransaction(txn *transaction, executed kvs.RecordedTxnOps) { + // collect new failures (combining derived with base) + toRetry := utils.NewSliceBasedKeySet() + toRefresh := utils.NewSliceBasedKeySet() + var verboseRefresh bool + graphR := s.graph.Read() + for _, op := range executed { + node := graphR.GetNode(op.Key) + if node == nil { + continue + } + state := getNodeState(node) + baseKey := getNodeBaseKey(node) + if state == kvs.ValueState_UNIMPLEMENTED { + continue + } + if state == kvs.ValueState_FAILED { + toRefresh.Add(baseKey) + verboseRefresh = true + } + if state == kvs.ValueState_RETRYING { + toRefresh.Add(baseKey) + toRetry.Add(baseKey) + verboseRefresh = true + } + if s.verifyMode { + toRefresh.Add(baseKey) + } + } + graphR.Release() + + // refresh base values which themselves are in a failed state or have derived failed values + // - in verifyMode all updated values are re-freshed + if toRefresh.Length() > 0 { + graphW := s.graph.Write(false) + s.refreshGraph(graphW, toRefresh, nil, verboseRefresh) + graphW.Save() + + // split values based on the retry metadata + retryTxns := make(map[retryTxnMeta]*retryTxn) + for _, retryKey := range toRetry.Iterate() { + node := graphW.GetNode(retryKey) + lastUpdate := getNodeLastUpdate(node) + // did retry fail? + var alreadyRetried bool + if txn.txnType == kvs.RetryFailedOps { + _, alreadyRetried = txn.retry.keys[retryKey] + } + // determine how long to delay the retry + delay := lastUpdate.retryArgs.Period + if alreadyRetried && lastUpdate.retryArgs.ExpBackoff { + delay = txn.retry.delay * 2 + } + // determine which attempt this is + attempt := 1 + if alreadyRetried { + attempt = txn.retry.attempt + 1 + } + // determine which transaction this retry is for + seqNum := txn.seqNum + if alreadyRetried { + seqNum = txn.retry.txnSeqNum + } + // add key into the set to retry within a single transaction + retryMeta := retryTxnMeta{ + txnSeqNum: seqNum, + delay: delay, + attempt: attempt, + } + if _, has := retryTxns[retryMeta]; !has { + retryTxns[retryMeta] = &retryTxn{ + retryTxnMeta: retryMeta, + keys: make(map[string]uint64), + } + } + retryTxns[retryMeta].keys[retryKey] = lastUpdate.txnSeqNum + } + + // schedule a series of re-try transactions for failed values + for _, retryTxn := range retryTxns { + s.enqueueRetry(retryTxn) + } + graphW.Release() + } + + // collect state updates + var stateUpdates []*kvs.BaseValueStatus + removed := utils.NewSliceBasedKeySet() + graphR = s.graph.Read() + for _, key := range s.updatedStates.Iterate() { + node := graphR.GetNode(key) + status := getValueStatus(node, key) + if status.Value.State == kvs.ValueState_REMOVED { + removed.Add(key) + } + stateUpdates = append(stateUpdates, status) + } + graphR.Release() + // clear the set of updated states + s.updatedStates = utils.NewSliceBasedKeySet() + + // if enabled, verify transaction effects + var kvErrors []kvs.KeyWithError + if s.verifyMode { + graphR = s.graph.Read() + for _, op := range executed { + key := op.Key + node := graphR.GetNode(key) + if node == nil { + continue + } + state := getNodeState(node) + if state == kvs.ValueState_RETRYING || state == kvs.ValueState_FAILED { + // effects of failed operations are uncertain and cannot be therefore verified + continue + } + expValue := getNodeLastAppliedValue(node) + lastOp := getNodeLastOperation(node) + expToNotExist := expValue == nil || state == kvs.ValueState_PENDING || state == kvs.ValueState_INVALID + if expToNotExist && isNodeAvailable(node) { + kvErrors = append(kvErrors, kvs.KeyWithError{ + Key: key, + Error: kvs.NewVerificationError(key, kvs.ExpectedToNotExist), + TxnOperation: lastOp, + }) + continue + } + if expValue == nil { + // properly removed + continue + } + if !expToNotExist && !isNodeAvailable(node) { + kvErrors = append(kvErrors, kvs.KeyWithError{ + Key: key, + Error: kvs.NewVerificationError(key, kvs.ExpectedToExist), + TxnOperation: lastOp, + }) + continue + } + descriptor := s.registry.GetDescriptorForKey(key) + handler := &descriptorHandler{descriptor} + equivalent := handler.equivalentValues(key, node.GetValue(), expValue) + if !equivalent { + kvErrors = append(kvErrors, kvs.KeyWithError{ + Key: key, + Error: kvs.NewVerificationError(key, kvs.NotEquivalent), + TxnOperation: lastOp, + }) + s.Log.WithFields( + logging.Fields{ + "applied": expValue, + "refreshed": node.GetValue(), + }).Warn("Detected non-equivalent applied vs. refreshed values") + } + } + graphR.Release() + } + + // build transaction error + var txnErr error + for _, txnOp := range executed { + if txnOp.NewErr == nil { + continue + } + kvErrors = append(kvErrors, + kvs.KeyWithError{ + Key: txnOp.Key, + TxnOperation: txnOp.Operation, + Error: txnOp.NewErr, + }) + } + if len(kvErrors) > 0 { + txnErr = kvs.NewTransactionError(nil, kvErrors) + } + if txn.txnType == kvs.NBTransaction && txn.nb.isBlocking { + // for blocking txn, send non-nil errors to the resultChan + select { + case txn.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}: + default: + s.Log.WithField("txnSeq", txn.seqNum). + Warn("Failed to deliver transaction result to the caller") + } + } else { + // for asynchronous events, just log the transaction error + if txnErr == nil { + s.Log.Infof("Transaction %d successful!", txn.seqNum) + } else { + s.Log.Error(txnErr.Error()) + } + } + + // send value status updates to the watchers + for _, watcher := range s.valStateWatchers { + for _, stateUpdate := range stateUpdates { + if watcher.selector == nil || watcher.selector(stateUpdate.Value.Key) { + select { + case watcher.channel <- stateUpdate: + default: + s.Log.WithField("txnSeq", txn.seqNum). + Warn("Failed to deliver value status update to a watcher") + } + } + } + } + + // delete removed values from the graph after the notifications have been sent + if removed.Length() > 0 { + graphW := s.graph.Write(true) + for _, key := range removed.Iterate() { + graphW.DeleteNode(key) + } + graphW.Save() + graphW.Release() + } +} + +// filterNotification checks if the received notification should be filtered +// or normally applied. +func (s *Scheduler) filterNotification(graphR graph.ReadAccess, key string, value proto.Message, txnSeqNum uint64) bool { + descriptor := s.registry.GetDescriptorForKey(key) + if descriptor == nil { + s.Log.WithFields(logging.Fields{ + "txnSeqNum": txnSeqNum, + "key": key, + }).Debug("Ignoring unimplemented notification") + return true + } + node := graphR.GetNode(key) + if node != nil { + if getNodeOrigin(node) == kvs.FromNB { + s.Log.WithFields(logging.Fields{ + "txnSeqNum": txnSeqNum, + "key": key, + }).Debug("Ignoring notification for a NB-managed value") + return true + } + } + return false +} diff --git a/plugins/kvscheduler/txn_queue.go b/plugins/kvscheduler/txn_queue.go new file mode 100644 index 0000000000..0d3e09c584 --- /dev/null +++ b/plugins/kvscheduler/txn_queue.go @@ -0,0 +1,78 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "time" + + "github.com/ligato/cn-infra/logging" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +// enqueueTxn adds transaction into the FIFO queue (channel) for execution. +func (s *Scheduler) enqueueTxn(txn *transaction) error { + if txn.txnType == kvs.NBTransaction && txn.nb.isBlocking { + select { + case <-s.ctx.Done(): + return kvs.ErrClosedScheduler + case s.txnQueue <- txn: + return nil + } + } + select { + case <-s.ctx.Done(): + return kvs.ErrClosedScheduler + case s.txnQueue <- txn: + return nil + default: + return kvs.ErrTxnQueueFull + } +} + +// dequeueTxn pulls the oldest queued transaction. +func (s *Scheduler) dequeueTxn() (txn *transaction, canceled bool) { + select { + case <-s.ctx.Done(): + return nil, true + case txn = <-s.txnQueue: + return txn, false + } +} + +// enqueueRetry schedules retry for failed operations. +func (s *Scheduler) enqueueRetry(args *retryTxn) { + go s.delayRetry(args) +} + +// delayRetry postpones retry until a given time period has elapsed. +func (s *Scheduler) delayRetry(args *retryTxn) { + s.wg.Add(1) + defer s.wg.Done() + + select { + case <-s.ctx.Done(): + return + case <-time.After(args.delay): + err := s.enqueueTxn(&transaction{txnType: kvs.RetryFailedOps, retry: args}) + if err != nil { + s.Log.WithFields(logging.Fields{ + "txnSeqNum": args.txnSeqNum, + "err": err, + }).Warn("Failed to enqueue re-try for failed operations") + s.enqueueRetry(args) // try again with the same time period + } + } +} diff --git a/plugins/kvscheduler/txn_record.go b/plugins/kvscheduler/txn_record.go new file mode 100644 index 0000000000..0f8d6bdf58 --- /dev/null +++ b/plugins/kvscheduler/txn_record.go @@ -0,0 +1,220 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "fmt" + "sort" + "strings" + "time" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" +) + +// GetTransactionHistory returns history of transactions started within the specified +// time window, or the full recorded history if the timestamps are zero values. +func (s *Scheduler) GetTransactionHistory(since, until time.Time) (history kvs.RecordedTxns) { + s.historyLock.Lock() + defer s.historyLock.Unlock() + + if !since.IsZero() && !until.IsZero() && until.Before(since) { + // invalid time window + return + } + + lastBefore := -1 + firstAfter := len(s.txnHistory) + + if !since.IsZero() { + for ; lastBefore+1 < len(s.txnHistory); lastBefore++ { + if !s.txnHistory[lastBefore+1].Start.Before(since) { + break + } + } + } + + if !until.IsZero() { + for ; firstAfter > 0; firstAfter-- { + if !s.txnHistory[firstAfter-1].Start.After(until) { + break + } + } + } + + return s.txnHistory[lastBefore+1 : firstAfter] +} + +// GetRecordedTransaction returns record of a transaction referenced by the sequence number. +func (s *Scheduler) GetRecordedTransaction(SeqNum uint64) (txn *kvs.RecordedTxn) { + s.historyLock.Lock() + defer s.historyLock.Unlock() + + for _, txn := range s.txnHistory { + if txn.SeqNum == SeqNum { + return txn + } + } + return nil +} + +// preRecordTxnOp prepares txn operation record - fills attributes that we can even +// before executing the operation. +func (s *Scheduler) preRecordTxnOp(args *applyValueArgs, node graph.Node) *kvs.RecordedTxnOp { + prevOrigin := getNodeOrigin(node) + if prevOrigin == kvs.UnknownOrigin { + // new value + prevOrigin = args.kv.origin + } + _, prevErr := getNodeError(node) + return &kvs.RecordedTxnOp{ + Key: args.kv.key, + PrevValue: utils.RecordProtoMessage(node.GetValue()), + NewValue: utils.RecordProtoMessage(args.kv.value), + PrevState: getNodeState(node), + PrevErr: prevErr, + IsDerived: args.isDerived, + IsProperty: args.isDerived && s.registry.GetDescriptorForKey(args.kv.key) == nil, + IsRevert: args.kv.isRevert, + IsRetry: args.isRetry, + } +} + +// preRecordTransaction logs transaction arguments + plan before execution to +// persist some information in case there is a crash during execution. +func (s *Scheduler) preRecordTransaction(txn *transaction, planned kvs.RecordedTxnOps) *kvs.RecordedTxn { + // allocate new transaction record + record := &kvs.RecordedTxn{ + PreRecord: true, + SeqNum: txn.seqNum, + TxnType: txn.txnType, + Planned: planned, + } + if txn.txnType == kvs.NBTransaction { + record.ResyncType = txn.nb.resyncType + record.Description = txn.nb.description + } + if txn.txnType == kvs.RetryFailedOps { + record.RetryForTxn = txn.retry.txnSeqNum + record.RetryAttempt = txn.retry.attempt + } + + // build header for the log + var downstreamResync bool + txnInfo := fmt.Sprintf("%s", txn.txnType.String()) + if txn.txnType == kvs.NBTransaction && txn.nb.resyncType != kvs.NotResync { + ResyncType := "Full Resync" + if txn.nb.resyncType == kvs.DownstreamResync { + ResyncType = "SB Sync" + downstreamResync = true + } + if txn.nb.resyncType == kvs.UpstreamResync { + ResyncType = "NB Sync" + } + txnInfo = fmt.Sprintf("%s (%s)", txn.txnType.String(), ResyncType) + } + + // record values sorted alphabetically by keys + if !downstreamResync { + for _, kv := range txn.values { + record.Values = append(record.Values, kvs.RecordedKVPair{ + Key: kv.key, + Value: utils.RecordProtoMessage(kv.value), + Origin: kv.origin, + }) + } + sort.Slice(record.Values, func(i, j int) bool { + return record.Values[i].Key < record.Values[j].Key + }) + } + + // send to the log + var buf strings.Builder + buf.WriteString("+======================================================================================================================+\n") + msg := fmt.Sprintf("Transaction #%d", record.SeqNum) + n := 115 - len(msg) + buf.WriteString(fmt.Sprintf("| %s %"+fmt.Sprint(n)+"s |\n", msg, txnInfo)) + buf.WriteString("+======================================================================================================================+\n") + buf.WriteString(record.StringWithOpts(false, false, 2)) + fmt.Println(buf.String()) + + return record +} + +// recordTransaction records the finalized transaction (log + in-memory). +func (s *Scheduler) recordTransaction(txnRecord *kvs.RecordedTxn, executed kvs.RecordedTxnOps, start, stop time.Time) { + txnRecord.PreRecord = false + txnRecord.Start = start + txnRecord.Stop = stop + txnRecord.Executed = executed + + var buf strings.Builder + buf.WriteString("o----------------------------------------------------------------------------------------------------------------------o\n") + buf.WriteString(txnRecord.StringWithOpts(true, false, 2)) + buf.WriteString("x----------------------------------------------------------------------------------------------------------------------x\n") + msg := fmt.Sprintf("#%d", txnRecord.SeqNum) + msg2 := fmt.Sprintf("took %v", stop.Sub(start).Round(time.Millisecond)) + buf.WriteString(fmt.Sprintf("x %s %"+fmt.Sprint(115-len(msg))+"s x\n", msg, msg2)) + buf.WriteString("x----------------------------------------------------------------------------------------------------------------------x\n") + fmt.Println(buf.String()) + + // add transaction record into the history + if s.config.RecordTransactionHistory { + s.historyLock.Lock() + s.txnHistory = append(s.txnHistory, txnRecord) + s.historyLock.Unlock() + } +} + +// transactionHistoryTrimming runs in a separate go routine and periodically removes +// transaction records too old to keep (by the configuration). +func (s *Scheduler) transactionHistoryTrimming() { + defer s.wg.Done() + + for { + select { + case <-s.ctx.Done(): + return + case <-time.After(txnHistoryTrimmingPeriod): + s.historyLock.Lock() + now := time.Now() + ageLimit := time.Duration(s.config.TransactionHistoryAgeLimit) * time.Minute + initPeriod := time.Duration(s.config.PermanentlyRecordedInitPeriod) * time.Minute + var i, j int // i = first after init period, j = first after init period to keep + for i = 0; i < len(s.txnHistory); i++ { + sinceStart := s.txnHistory[i].Start.Sub(s.startTime) + if sinceStart > initPeriod { + break + } + } + for j = i; j < len(s.txnHistory); j++ { + elapsed := now.Sub(s.txnHistory[j].Stop) + if elapsed <= ageLimit { + break + } + } + if j > i { + copy(s.txnHistory[i:], s.txnHistory[j:]) + newLen := len(s.txnHistory) - (j - i) + for k := newLen; k < len(s.txnHistory); k++ { + s.txnHistory[k] = nil + } + s.txnHistory = s.txnHistory[:newLen] + } + s.historyLock.Unlock() + } + } +} diff --git a/plugins/kvscheduler/utils_for_test.go b/plugins/kvscheduler/utils_for_test.go new file mode 100644 index 0000000000..3a6c4dfec9 --- /dev/null +++ b/plugins/kvscheduler/utils_for_test.go @@ -0,0 +1,132 @@ +// Copyright (c) 2018 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "strings" + + "github.com/gogo/protobuf/proto" + . "github.com/onsi/gomega" + + . "github.com/ligato/vpp-agent/plugins/kvscheduler/api" + "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/test" +) + +const ( + descriptor1Name = "descriptor1" + descriptor2Name = "descriptor2" + descriptor3Name = "descriptor3" + + prefixA = "/prefixA/" + prefixB = "/prefixB/" + prefixC = "/prefixC/" + + baseValue1 = "base-value1" + baseValue2 = "base-value2" + baseValue3 = "base-value3" + baseValue4 = "base-value4" +) + +func prefixSelector(prefix string) func(key string) bool { + return func(key string) bool { + return strings.HasPrefix(key, prefix) + } +} + +func checkValues(received, expected []KeyValuePair) { + Expect(len(received)).To(Equal(len(expected))) + for _, kv := range expected { + found := false + for _, kv2 := range received { + if kv2.Key == kv.Key { + Expect(proto.Equal(kv2.Value, kv.Value)).To(BeTrue()) + found = true + } + } + Expect(found).To(BeTrue()) + } +} + +func checkRecordedValues(recorded, expected []RecordedKVPair) { + Expect(len(recorded)).To(Equal(len(expected))) + for _, kv := range expected { + found := false + for _, kv2 := range recorded { + if kv2.Key == kv.Key { + found = true + Expect(proto.Equal(kv2.Value, kv.Value)).To(BeTrue()) + Expect(kv2.Origin).To(Equal(kv.Origin)) + } + } + Expect(found).To(BeTrue()) + } +} + +func checkTxnOperation(recorded, expected *RecordedTxnOp) { + Expect(recorded.Operation).To(Equal(expected.Operation)) + Expect(recorded.Key).To(Equal(expected.Key)) + Expect(proto.Equal(recorded.PrevValue, expected.PrevValue)).To(BeTrue()) + Expect(proto.Equal(recorded.NewValue, expected.NewValue)).To(BeTrue()) + Expect(recorded.PrevState).To(Equal(expected.PrevState)) + Expect(recorded.NewState).To(Equal(expected.NewState)) + if expected.PrevErr == nil { + Expect(recorded.PrevErr).To(BeNil()) + } else { + Expect(recorded.PrevErr).ToNot(BeNil()) + Expect(recorded.PrevErr.Error()).To(BeEquivalentTo(expected.PrevErr.Error())) + } + if expected.NewErr == nil { + Expect(recorded.NewErr).To(BeNil()) + } else { + Expect(recorded.NewErr).ToNot(BeNil()) + Expect(recorded.NewErr.Error()).To(BeEquivalentTo(expected.NewErr.Error())) + } + Expect(recorded.NOOP).To(Equal(expected.NOOP)) + Expect(recorded.IsDerived).To(Equal(expected.IsDerived)) + Expect(recorded.IsProperty).To(Equal(expected.IsProperty)) + Expect(recorded.IsRevert).To(Equal(expected.IsRevert)) + Expect(recorded.IsRetry).To(Equal(expected.IsRetry)) + Expect(recorded.IsRecreate).To(Equal(expected.IsRecreate)) +} + +func checkTxnOperations(recorded, expected RecordedTxnOps) { + Expect(recorded).To(HaveLen(len(expected))) + for idx, recordedOp := range recorded { + checkTxnOperation(recordedOp, expected[idx]) + } +} + +func checkValuesForCorrelation(received, expected []KVWithMetadata) { + Expect(received).To(HaveLen(len(expected))) + for _, kv := range expected { + found := false + for _, kv2 := range received { + if kv2.Key == kv.Key { + found = true + Expect(kv2.Origin).To(BeEquivalentTo(kv.Origin)) + Expect(proto.Equal(kv2.Value, kv.Value)).To(BeTrue()) + if kv.Metadata == nil { + Expect(kv2.Metadata).To(BeNil()) + } else { + Expect(kv2.Metadata).ToNot(BeNil()) + expIntMeta := kv.Metadata.(*test.OnlyInteger) + receivedMeta := kv2.Metadata.(*test.OnlyInteger) + Expect(receivedMeta.GetInteger()).To(BeEquivalentTo(expIntMeta.GetInteger())) + } + } + } + Expect(found).To(BeTrue()) + } +} diff --git a/plugins/kvscheduler/value_flags.go b/plugins/kvscheduler/value_flags.go new file mode 100644 index 0000000000..bd8c280592 --- /dev/null +++ b/plugins/kvscheduler/value_flags.go @@ -0,0 +1,168 @@ +// Copyright (c) 2019 Cisco and/or its affiliates. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvscheduler + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + + kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" +) + +const ( + ////// updated by transactions: + + // LastUpdateFlagName is the name of the LastUpdate flag. + LastUpdateFlagName = "last-update" + + // ErrorFlagName is the name of the Error flag. + ErrorFlagName = "error" + + ////// updated by transactions + refresh: + + // ValueStateFlagName is the name of the Value-State flag. + ValueStateFlagName = "value-state" + + // UnavailValueFlagName is the name of the Unavailable-Value flag. + UnavailValueFlagName = "unavailable" + + // DescriptorFlagName is the name of the Descriptor flag. + DescriptorFlagName = "descriptor" + + // DerivedFlagName is the name of the Derived flag. + DerivedFlagName = "derived" +) + +/****************************** LastUpdate Flag *******************************/ + +// LastUpdateFlag is set to remember the last transaction which has +// changed/updated the value. +// Not set to values just discovered by refresh (state = DISCOVERED). +type LastUpdateFlag struct { + txnSeqNum uint64 + txnOp kvs.TxnOperation + value proto.Message + + // updated only when the value content is being modified + revert bool + + // set by NB txn, inherited by Retry and SB notifications + retryEnabled bool + retryArgs *kvs.RetryOpt +} + +// GetName return name of the LastUpdate flag. +func (flag *LastUpdateFlag) GetName() string { + return LastUpdateFlagName +} + +// GetValue describes the last update (txn-seq number only). +func (flag *LastUpdateFlag) GetValue() string { + return fmt.Sprintf("TXN-%d", flag.txnSeqNum) +} + +/******************************* Error Flag ***********************************/ + +// ErrorFlag is used to store error returned from the last operation, including +// validation errors. +type ErrorFlag struct { + err error + retriable bool +} + +// GetName return name of the Origin flag. +func (flag *ErrorFlag) GetName() string { + return ErrorFlagName +} + +// GetValue returns the error as string. +func (flag *ErrorFlag) GetValue() string { + if flag.err == nil { + return "" + } + return flag.err.Error() +} + +/***************************** Value State Flag *******************************/ + +// ValueStateFlag stores current state of the value. +// Assigned to every value. +type ValueStateFlag struct { + valueState kvs.ValueState +} + +// GetName returns name of the ValueState flag. +func (flag *ValueStateFlag) GetName() string { + return ValueStateFlagName +} + +// GetValue returns the string representation of the state. +func (flag *ValueStateFlag) GetValue() string { + return flag.valueState.String() +} + +/************************** Unavailable Value Flag ****************************/ + +// UnavailValueFlag is used to mark NB values which should not be considered +// when resolving dependencies of other values (for various possible reasons). +type UnavailValueFlag struct { +} + +// GetName return name of the UnavailValue flag. +func (flag *UnavailValueFlag) GetName() string { + return UnavailValueFlagName +} + +// GetValue return empty string (presence of the flag is the only information). +func (flag *UnavailValueFlag) GetValue() string { + return "" +} + +/*************************** Descriptor Value Flag ****************************/ + +// DescriptorFlag is used to lookup values by their descriptor. +// Not assigned to properties and UNIMPLEMENTED values. +type DescriptorFlag struct { + descriptorName string +} + +// GetName return name of the Descriptor flag. +func (flag *DescriptorFlag) GetName() string { + return DescriptorFlagName +} + +// GetValue returns the descriptor name. +func (flag *DescriptorFlag) GetValue() string { + return flag.descriptorName +} + +/**************************** Derived Value Flag ******************************/ + +// DerivedFlag is used to mark derived values. +type DerivedFlag struct { + baseKey string +} + +// GetName return name of the Derived flag. +func (flag *DerivedFlag) GetName() string { + return DerivedFlagName +} + +// GetValue returns the key of the base value from which the given derived value +// is derived from (directly or transitively). +func (flag *DerivedFlag) GetValue() string { + return flag.baseKey +} diff --git a/plugins/linux/README.md b/plugins/linux/README.md deleted file mode 100644 index 864d3477e7..0000000000 --- a/plugins/linux/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Linux Plugin - -The `linuxplugin` is a core Agent Plugin for the management of a subset of the Linux -network configuration. Configuration of VETH (virtual ethernet pair) interfaces, linux routes and ARP entries -is currently supported. Detailed description can be found in particular READMEs: - - [ifplugin](ifplugin) - - [l3plugin](l3plugin) - - [nsplugin](nsplugin) - -In general, the northbound configuration is translated to a sequence of Netlink API -calls (using `github.com/vishvananda/netlink` and `github.com/vishvananda/netns` libraries). \ No newline at end of file diff --git a/plugins/linux/data_change.go b/plugins/linux/data_change.go deleted file mode 100644 index c4e051e0a3..0000000000 --- a/plugins/linux/data_change.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linux - -import ( - "strings" - - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/linux/model/l3" -) - -func (plugin *Plugin) changePropagateRequest(dataChng datasync.ChangeEvent) error { - var err error - key := dataChng.GetKey() - - plugin.Log.WithField("revision", dataChng.GetRevision()). - Debugf("Processing change for key: %q", key) - - if strings.HasPrefix(key, interfaces.InterfaceKeyPrefix()) { - var value, prevValue interfaces.LinuxInterfaces_Interface - err = dataChng.GetValue(&value) - if err != nil { - return err - } - var diff bool - diff, err = dataChng.GetPrevValue(&prevValue) - if err == nil { - err = plugin.dataChangeIface(diff, &value, &prevValue, dataChng.GetChangeType()) - } - } else if strings.HasPrefix(key, l3.StaticArpKeyPrefix()) { - var value, prevValue l3.LinuxStaticArpEntries_ArpEntry - err = dataChng.GetValue(&value) - if err != nil { - return err - } - var diff bool - diff, err = dataChng.GetPrevValue(&prevValue) - if err == nil { - err = plugin.dataChangeArp(diff, &value, &prevValue, dataChng.GetChangeType()) - } - } else if strings.HasPrefix(key, l3.StaticRouteKeyPrefix()) { - var value, prevValue l3.LinuxStaticRoutes_Route - err = dataChng.GetValue(&value) - if err != nil { - return err - } - var diff bool - diff, err = dataChng.GetPrevValue(&prevValue) - if err == nil { - err = plugin.dataChangeRoute(diff, &value, &prevValue, dataChng.GetChangeType()) - } - } else { - plugin.Log.Warn("ignoring change ", dataChng) //NOT ERROR! - } - return err -} - -// DataChangeIface propagates data change to the ifConfigurator. -func (plugin *Plugin) dataChangeIface(diff bool, value *interfaces.LinuxInterfaces_Interface, prevValue *interfaces.LinuxInterfaces_Interface, - changeType datasync.Op) error { - plugin.Log.Debug("dataChangeIface ", diff, " ", changeType, " ", value, " ", prevValue) - - var err error - if datasync.Delete == changeType { - err = plugin.ifConfigurator.DeleteLinuxInterface(prevValue) - } else if diff { - err = plugin.ifConfigurator.ModifyLinuxInterface(value, prevValue) - } else { - err = plugin.ifConfigurator.ConfigureLinuxInterface(value) - } - return plugin.ifConfigurator.LogError(err) -} - -// DataChangeArp propagates data change to the arpConfigurator -func (plugin *Plugin) dataChangeArp(diff bool, value *l3.LinuxStaticArpEntries_ArpEntry, prevValue *l3.LinuxStaticArpEntries_ArpEntry, - changeType datasync.Op) error { - plugin.Log.Debug("dataChangeArp ", diff, " ", changeType, " ", value, " ", prevValue) - - var err error - if datasync.Delete == changeType { - err = plugin.arpConfigurator.DeleteLinuxStaticArpEntry(prevValue) - } else if diff { - err = plugin.arpConfigurator.ModifyLinuxStaticArpEntry(value, prevValue) - } else { - err = plugin.arpConfigurator.ConfigureLinuxStaticArpEntry(value) - } - return plugin.arpConfigurator.LogError(err) -} - -// DataChangeRoute propagates data change to the routeConfigurator -func (plugin *Plugin) dataChangeRoute(diff bool, value *l3.LinuxStaticRoutes_Route, prevValue *l3.LinuxStaticRoutes_Route, - changeType datasync.Op) error { - plugin.Log.Debug("dataChangeRoute ", diff, " ", changeType, " ", value, " ", prevValue) - - var err error - if datasync.Delete == changeType { - err = plugin.routeConfigurator.DeleteLinuxStaticRoute(prevValue) - } else if diff { - err = plugin.routeConfigurator.ModifyLinuxStaticRoute(value, prevValue) - } else { - err = plugin.routeConfigurator.ConfigureLinuxStaticRoute(value) - } - return plugin.routeConfigurator.LogError(err) -} diff --git a/plugins/linux/data_resync.go b/plugins/linux/data_resync.go deleted file mode 100644 index b9503400bd..0000000000 --- a/plugins/linux/data_resync.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2017 Cisco and/or its affiliates. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at: -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linux - -import ( - "fmt" - "strings" - - "github.com/ligato/cn-infra/datasync" - "github.com/ligato/vpp-agent/plugins/linux/model/interfaces" - "github.com/ligato/vpp-agent/plugins/linux/model/l3" -) - -// DataResyncReq is used to transfer expected configuration of the Linux network stack to the plugins. -type DataResyncReq struct { - // Interfaces is a list af all interfaces that are expected to be in Linux after RESYNC. - Interfaces []*interfaces.LinuxInterfaces_Interface - // ARPs is a list af all arp entries that are expected to be in Linux after RESYNC. - ARPs []*l3.LinuxStaticArpEntries_ArpEntry - // Routes is a list af all routes that are expected to be in Linux after RESYNC. - Routes []*l3.LinuxStaticRoutes_Route -} - -// NewDataResyncReq is a constructor of object requirements which are expected to be re-synced. -func NewDataResyncReq() *DataResyncReq { - return &DataResyncReq{ - // Interfaces is a list af all interfaces that are expected to be in Linux after RESYNC. - Interfaces: []*interfaces.LinuxInterfaces_Interface{}, - // ARPs is a list af all arp entries that are expected to be in Linux after RESYNC. - ARPs: []*l3.LinuxStaticArpEntries_ArpEntry{}, - // Routes is a list af all routes that are expected to be in Linux after RESYNC. - Routes: []*l3.LinuxStaticRoutes_Route{}, - } -} - -// DataResync delegates resync request linuxplugin configurators. -func (plugin *Plugin) resyncPropageRequest(req *DataResyncReq) error { - plugin.Log.Info("resync the Linux Configuration") - - // store all resync errors - var resyncErrs []error - - if err := plugin.ifConfigurator.Resync(req.Interfaces); err != nil { - resyncErrs = append(resyncErrs, plugin.ifConfigurator.LogError(err)) - } - - if err := plugin.arpConfigurator.Resync(req.ARPs); err != nil { - resyncErrs = append(resyncErrs, plugin.arpConfigurator.LogError(err)) - } - - if err := plugin.routeConfigurator.Resync(req.Routes); err != nil { - resyncErrs = append(resyncErrs, plugin.routeConfigurator.LogError(err)) - } - - // log errors if any - if len(resyncErrs) == 0 { - return nil - } - for _, err := range resyncErrs { - plugin.Log.Error(err) - } - - return fmt.Errorf("%v errors occured during linuxplugin resync", len(resyncErrs)) -} - -func (plugin *Plugin) resyncParseEvent(resyncEv datasync.ResyncEvent) *DataResyncReq { - req := NewDataResyncReq() - for key, resyncData := range resyncEv.GetValues() { - plugin.Log.Debug("Received RESYNC key ", key) - if strings.HasPrefix(key, interfaces.InterfaceKeyPrefix()) { - plugin.resyncAppendInterface(resyncData, req) - } else if strings.HasPrefix(key, l3.StaticArpKeyPrefix()) { - plugin.resyncAppendARPs(resyncData, req) - } else if strings.HasPrefix(key, l3.StaticRouteKeyPrefix()) { - plugin.resyncAppendRoutes(resyncData, req) - } else { - plugin.Log.Warn("ignoring ", resyncEv) - } - } - return req -} - -func (plugin *Plugin) resyncAppendInterface(iterator datasync.KeyValIterator, req *DataResyncReq) { - num := 0 - for { - if interfaceData, stop := iterator.GetNext(); stop { - break - } else { - value := &interfaces.LinuxInterfaces_Interface{} - if err := interfaceData.GetValue(value); err != nil { - plugin.Log.Errorf("error getting value of Linux interface: %v", err) - continue - } - req.Interfaces = append(req.Interfaces, value) - num++ - - plugin.Log.WithField("revision", interfaceData.GetRevision()). - Debugf("Processing resync for key: %q", interfaceData.GetKey()) - } - } - - plugin.Log.Debugf("Received RESYNC Linux interface values %d", num) -} - -func (plugin *Plugin) resyncAppendARPs(iterator datasync.KeyValIterator, req *DataResyncReq) { - num := 0 - for { - if arpData, stop := iterator.GetNext(); stop { - break - } else { - value := &l3.LinuxStaticArpEntries_ArpEntry{} - if err := arpData.GetValue(value); err != nil { - plugin.Log.Errorf("error getting value of Linux ARP: %v", err) - continue - } - req.ARPs = append(req.ARPs, value) - num++ - - plugin.Log.WithField("revision", arpData.GetRevision()). - Debugf("Processing resync for key: %q", arpData.GetKey()) - } - } - - plugin.Log.Debugf("Received RESYNC Linux ARP entry values %d", num) -} - -func (plugin *Plugin) resyncAppendRoutes(iterator datasync.KeyValIterator, req *DataResyncReq) { - num := 0 - for { - if routeData, stop := iterator.GetNext(); stop { - break - } else { - value := &l3.LinuxStaticRoutes_Route{} - if err := routeData.GetValue(value); err != nil { - plugin.Log.Errorf("error getting value of Linux ARP: %v", err) - continue - } - req.Routes = append(req.Routes, value) - num++ - - plugin.Log.WithField("revision", routeData.GetRevision()). - Debugf("Processing resync for key: %q", routeData.GetKey()) - } - } - - plugin.Log.Debugf("Received RESYNC Linux Route values %d", num) -} - -func (plugin *Plugin) subscribeWatcher() (err error) { - plugin.Log.Debug("subscribeWatcher begin") - plugin.ifIndexes.WatchNameToIdx(plugin.String(), plugin.ifIndexesWatchChan) - plugin.watchDataReg, err = plugin.Watcher. - Watch("linuxplugin", plugin.changeChan, plugin.resyncChan, - interfaces.InterfaceKeyPrefix(), - l3.StaticArpKeyPrefix(), - l3.StaticRouteKeyPrefix()) - if err != nil { - return err - } - - plugin.Log.Debug("data watcher watch finished") - - return nil -} diff --git a/plugins/linux/ifplugin/README.md b/plugins/linux/ifplugin/README.md deleted file mode 100644 index 230b606cec..0000000000 --- a/plugins/linux/ifplugin/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Linux interface Plugin - - -The plugin watches the northbound configuration of Linux network interfaces, -which is modelled by [interfaces proto file](../model/interfaces/interfaces.proto) -and stored in ETCD under the following key: - -``` -/vnf-agent//linux/config/v1/interface/ -``` - -Linux interface is uniquely identified in the northbound configuration by its name. The same string is also used -in the Linux network stack to label the interface and it has to be unique across all network namespaces. -It is therefore recommended for the northbound applications to prefix interface names with namespace -identifiers in case there is a chance of collisions across namespaces. - -The re-synchronization procedure is a bit simpler than that from the VPP plugins. Linux plugin will not touch existing -Linux interfaces which are not part of the northbound configuration. Managed interfaces which are already present when -the agent starts will be re-created from the scratch. - -The current version does not yet support interface notifications and statistics. - -Linux plugin is also an optional dependency for `ifplugin` from VPP plugins. If `linuxplugin` is loaded, `ifplugin` will -be watching for newly created and removed Linux interfaces. This is useful because `AFPACKET` interface will not function -properly if it gets created when the target host interface is not available. `ifplugin` will ensure that `AFPACKET` interfaces -are always created *after* the associated host interfaces. If the host interface gets removed so will the associated -afpacket. `ifplugin` will keep the afpacket configuration in the cache and re-create it once the host interface is available again. -To enable this feature, `linuxplugin` must be loaded *before* VPP default plugins. - -*VETH* - -Virtual Ethernet interfaces come in pairs, and they are connected like a tube — whatever comes in one VETH -interface will come out the other peer VETH interface. As a result, you can use VETH interfaces to connect -a network namespace to the outside world via the “default” or “global” namespace where physical interfaces -exist. - -VETH pair is configured through the northbound API as two separate interfaces, both of the type `LinuxInterfaces_VETH` -and pointing to each other through the `veth.peer_if_name` reference. -Agent will physically create the pair only after both sides are configured and the target namespaces are available. -Similarly, to maintain the symmetry, VETH pair gets removed from the Linux network stack as soon as any of the sides is -un-configured or a target namespace disappears (e.g. the destination microservice has terminated). The agent, however, -will not forget a partial configuration and once all the requirements are met again the VETH will get automatically recreated. - -*VETH usage example* - -Consider a scenario in which we need to connect VPP running in the host (i.e. "default") -namespace with a VPP running inside a Docker container. This can be achieved by both memif interface -as well as through a combination of a Linux VETH pair with AF packet interfaces from VPP (confusingly called `host` interface in VPP). -First you would supply northbound configurations for both sides of the VETH pair. That is two interfaces -of type `LinuxInterfaces_VETH` with one end in the default namespace (`namespace.Name=""`), making it visible for the host VPP, -and the other end inserted into the namespace of the container with the other VPP. This can be achieved by either directly referencing -the PID of the container (`namespace.type=PID_REF_NS; namespace.pid=`), or, if the container is actually a microservice, -a more convenient way is to reference the namespace by the microservice label -(`namespace.type=MICROSERVICE_REF_NS; namespace.microservice=