diff --git a/.github/coverage/cpp.develop.coverage_report.txt b/.github/coverage/cpp.develop.coverage_report.txt index 0d3fe874..b5a45de9 100644 --- a/.github/coverage/cpp.develop.coverage_report.txt +++ b/.github/coverage/cpp.develop.coverage_report.txt @@ -14,7 +14,7 @@ src/CommunicationManager.cc 46 0 0% 42-43,46-47,49 src/DescriptorsCommand.cc 735 455 61% 95,101,114,167-169,173,187-191,243-254,258-259,263-268,278,286-289,303-305,309,324-333,335,355-358,365-369,402,405-408,414,416-417,420-422,424-425,427-428,431-432,434-436,459-461,465-469,476,482,484,486-487,490,524,563-565,575,585-590,596,598,600-601,603-604,644,672,692,696,698,701,703-706,709-714,716-720,722,725,727,729,732,735-736,738,740,742-747,750,752-754,757-758,760,762,764-765,767,769,771-773,775,777-782,787-788,791,793,795,820-822,841-842,845-846,859-862,877,879,882-883,886,916-918,935-937,965-967,973-974,1004-1007,1023-1024,1026-1027,1029,1031-1032,1034,1049,1052,1060,1065,1086-1088,1092-1095,1104,1106,1108-1112,1115-1116,1119-1122,1124,1126-1133,1158-1162,1167-1171,1181-1184,1198-1201,1213-1214,1255-1258,1288,1291,1311-1315,1331-1332 src/DescriptorsManager.cc 25 23 92% 49-50 src/ExceptionsCommand.cc 6 0 0% 35-40 -src/ImageCommand.cc 309 139 45% 55,58-59,62-69,71-76,78,80-81,83,90,93,95-96,98-99,101-102,104-105,108,135,146-147,158-159,161,166-169,179-180,182,187-190,197-199,205-213,215-217,230-231,241-251,253-254,256-257,262,270,281,288,292,295,297,299,321,323-324,327-332,334,336,358-360,363-365,369-372,378,380,387-390,404,411,417-420,424-425,436-439,442-447,452-454,465-468,473-477,482-483,485-486,488-492,495,497-501,504-507,510-511,514,516,521 +src/ImageCommand.cc 323 157 48% 65,69,73,75,77-79,81,83-86,101-102,107,112,114-115,123,125,132,135,137-138,140-141,143-144,146-147,150,177,188-189,200-201,203,208-211,221-222,224,229-232,247-255,257-259,272-273,283-293,295-296,298-299,304,312,323,330,334,337,339,341,363,365-366,369-374,376,378,400-402,405-407,411-414,420,422,429-432,446,453,459-462,466-467,478-481,484-489,494-496,507-510,515-519,524-525,527-528,530-534,537,539-543,546-549,552-553,556,558,563 src/ImageLoop.cc 253 232 91% 63,130,182-185,215,221,265,285,288,297-298,300,307-308,322-323,330,334,338 src/Neo4jBaseCommands.cc 36 23 63% 17,21,23-24,26,33,53,57-59,66-68 src/Neo4JDescriptorCommands.cc 551 405 73% 90,96-99,101-102,104-105,107,110-111,113,116-117,135-136,138-139,141,143-144,146,151,154-155,158-161,163-164,168,170-173,175-178,231,234,313-324,329-331,355-357,361-362,376-385,387,404-408,510,513-516,539-541,545-550,556-557,614-616,628-633,693,738-740,745-746,749-750,765-768,815-817,847-849,856-857,876-879,904-906,910-913,975-976,1028-1029,1032-1033,1054-1058 @@ -44,10 +44,10 @@ src/vcl/TDBDescriptorSet.cc 51 46 90% 127,148,150,15 src/vcl/TDBImage.cc 471 370 78% 164,186,209,255-257,268-271,276,300-302,305-306,308,325,341-344,346-350,352-354,364,366,386,406-411,414-417,421-424,428-431,433-435,437-439,523-524,551,553-556,558-559,561-562,564-567,578,580,583,585,644,664-668,750-754,756-758,760,762-767,770-772,785 src/vcl/TDBObject.cc 326 271 83% 112-114,116,118,120,219,221-222,258,321-322,386-388,398,432-433,462-463,493-494,496,500-501,503,621-632,638-651,661-663 src/vcl/TDBSparseDescriptorSet.cc 245 230 93% 163,190-191,230-232,252,294-296,308-309,380-381,441 -src/vcl/utils.cc 73 65 89% 55-56,66,72,80,92,94,122 -src/vcl/Video.cc 776 584 75% 67,128,134,139,161,167-168,190,192-196,199-202,219-224,232,234-240,242-246,249-251,255-256,261-262,264-265,267,269,272-273,275-276,279-280,297,315-328,345,347,349-351,380,413-415,457,466,489,495,505,528,649,651,662,673,679-682,691,710,713,715-716,719-720,749-750,771-773,776-777,779-780,783-784,803-806,834-836,839,842-844,859-862,870,872,874-879,891-894,928,950,965,988,992,1022,1024-1026,1028-1029,1048-1049,1070,1107,1115,1132,1147,1151,1158,1162,1181,1184,1189-1190,1193-1194,1196,1207,1211,1216,1234-1237,1290,1296-1298,1309,1311,1315,1319,1325-1337 -src/vdms.cc 118 0 0% 40,42-43,45-48,50-51,53-56,58-59,62-64,66-67,69,71,74-76,79,84,86-89,91,93-99,101-104,106-107,109-112,114-116,118-121,123-125,127-130,132-133,135-138,140-141,143-144,148-150,153-156,159,162-164,167,169,172,175-181,188,190,193-194,198,201,207-208,211,217-220,223-232,235 -src/VDMSConfig.cc 225 210 93% 109-110,137-139,214,216,219-220,226-227,231-232,343-344 +src/vcl/utils.cc 71 63 88% 54-55,65,71,79,91,93,121 +src/vcl/Video.cc 746 565 75% 66,127,133,138,160,166-167,189,191-195,198-201,218-223,231,233-239,241-245,248-250,254-255,260-261,263-264,266,268,271-272,274-275,278-279,296,314-327,344,346,348-350,379,412-414,456,465,488,494,517,637,639,650,656,660-661,670,689,692,694-695,698-699,728-729,750-752,755-756,758-759,762-763,782-785,813-815,818,821-823,838-841,849,851,853-858,870-873,907,929,944,967,971,1000-1001,1020-1021,1042,1079,1087,1104,1119,1123,1130,1134,1151,1154,1159-1160,1163-1164,1166,1177,1181,1186,1204-1207,1260,1264,1266-1267,1269-1270,1272,1274,1277-1278,1281-1282,1288-1292 +src/vdms.cc 108 0 0% 39,41-42,44-47,49-50,52-55,57-58,61-63,65-66,68,70,73-75,78,82,84-87,89,91-97,99-102,104-105,107-110,112-114,116-119,121-122,124-127,129-130,132-135,137-138,140-141,145-147,150-153,156,158-160,163,165,168,171-177,183,185,188-189,193,196,202-203,206,212-215,218,220 +src/VDMSConfig.cc 226 213 94% 131-133,209,211,214-215,221-222,226-227,338-339 src/VideoCommand.cc 474 117 24% 50,53-54,56-58,60,62,65-66,68-69,72,74-76,78-80,82,84-87,89-90,92-93,95,97-99,102,109,111,116,121-124,130,132,158-161,167-168,170,181,184,201,213,217-220,227-229,231-233,239,241-247,249-250,253-255,257-259,261-262,264,266-278,280-282,284-285,296,300,325,329,331,333,335,337,340-341,343,346,350,352,357-358,380-381,383-384,387-392,394,396,398-399,405,407,429-431,436,442-445,449-454,456-463,467-473,475,480-485,488,490-491,494-496,504,509,527-532,535-539,555,558,560-562,565-567,569-570,572-576,579-580,583-585,587,589-591,594-597,601-606,611-612,614-615,617-621,624-626,628,630-632,634-637,640-641,644,646,651,664,666-673,677,680,683,688-689,691-695,698-699,701,703,705,708,712,714,716-719,721-723,726,728,730,732-733,735-736,740,745,748-749,751-753,755,757,759-761,763-764,767-769,773-776,780-786,790-794,798,801,803,805,807,809-813,817-821,824-825,827-830,833-836,841-842,846-851,855-856,859-860 src/VideoLoop.cc 250 217 86% 33,81,98-101,103-109,180,188,197,201,207,211,217,220,290,312,315,320-321,324-325,327,334-335,354,370 utils/src/comm/ConnClient.cc 69 57 82% 49,55,59-60,98,103,108,114,120,127,130,149 diff --git a/.github/requirements.txt b/.github/requirements.txt index 663ab463..c36918fa 100644 --- a/.github/requirements.txt +++ b/.github/requirements.txt @@ -2,21 +2,23 @@ blinker==1.9.0 cffi==1.17.1 click==8.1.8 colorlog==6.9.0 -coverage==7.6.12 -cryptography==44.0.1 +coverage==7.7.0 +cryptography==44.0.2 Flask==3.1.0 gcovr==8.3 imutils==0.5.4 itsdangerous==2.2.0 -Jinja2==3.1.5 +Jinja2==3.1.6 lxml==5.3.1 MarkupSafe==3.0.2 numpy==1.26.4 -opencv-python-headless==4.9.0.80 +opencv-python-headless==4.11.0.86 +pillow==11.1.0 protobuf==4.24.2 pycparser==2.22 Pygments==2.19.1 -pyzmq==26.0.3 +pyzmq==26.3.0 scipy==1.15.2 sk-video==1.1.10 -Werkzeug==3.1.3 \ No newline at end of file +Werkzeug==3.1.3 +zmq==0.0.0 diff --git a/.github/scripts/Dockerfile.checkin b/.github/scripts/Dockerfile.checkin index 0e0ae270..1416b663 100644 --- a/.github/scripts/Dockerfile.checkin +++ b/.github/scripts/Dockerfile.checkin @@ -15,6 +15,7 @@ ARG NEO4J_PASS="" ARG NEO4J_ENDPOINT="" ARG AWS_API_PORT=9000 ARG AWS_CONSOLE_PORT=9001 +ARG USE_K8S="OFF" ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" @@ -32,6 +33,11 @@ ENV NEO4J_ENDPOINT="${NEO4J_ENDPOINT}" ENV AWS_API_PORT="${AWS_API_PORT}" ENV AWS_CONSOLE_PORT="${AWS_CONSOLE_PORT}" +# Convert ARG value to uppercase and set ENV +RUN export USE_K8S=$(echo "${USE_K8S^^}") && \ + echo "export USE_K8S=${USE_K8S}" >> /etc/profile.d/envvars.sh +ENV USE_K8S="${USE_K8S}" + ############################################################ # BUILD DEPENDENCIES FROM base AS build @@ -42,14 +48,14 @@ RUN apt-get update -y && apt-get upgrade -y && \ apt-get install -o 'Acquire::Retries=3' -y --no-install-suggests \ --no-install-recommends --fix-broken --fix-missing \ apt-transport-https automake bison build-essential bzip2 ca-certificates \ - cppzmq-dev curl ed flex g++ gcc git gnupg-agent javacc libarchive-tools libatlas-base-dev \ - libavcodec-dev libavformat-dev libavutil-dev libboost-all-dev libbz2-dev libc-ares-dev \ + cppzmq-dev curl ed flex g++ bazel-bootstrap gcc git gnupg-agent javacc libarchive-tools libatlas-base-dev \ + libavcodec-dev libavformat-dev libavutil-dev libbison-dev libboost-all-dev libbz2-dev libc-ares-dev \ libcurl4-openssl-dev libdc1394-dev libgflags-dev libgoogle-glog-dev \ libgtk-3-dev libgtk2.0-dev libhdf5-dev libjpeg-dev libjpeg62-turbo-dev libjsoncpp-dev \ libleveldb-dev liblmdb-dev liblz4-dev libncurses5-dev libopenblas-dev libopenmpi-dev \ libpng-dev librdkafka-dev libsnappy-dev libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 \ - libtiff-dev libtiff5-dev libtool linux-libc-dev mpich openjdk-17-jdk-headless \ - pkg-config procps software-properties-common swig unzip uuid-dev && \ + libtiff-dev libtiff5-dev libtool libwebsockets-dev linux-libc-dev mpich openjdk-17-jdk-headless \ + pkg-config procps software-properties-common swig uncrustify unzip uuid-dev && \ apt-get --purge remove -y python3.11 && apt-get autoremove -y && \ apt-get clean && rm -rf /var/lib/apt/lists/* @@ -163,6 +169,29 @@ RUN curl -L -O https://github.com/gpakosz/peg/releases/download/${PEG_VERSION}/p ./configure --disable-werror --prefix=/opt/dist/usr && \ make clean check && make install -w --debug +RUN if [ "${USE_K8S}" = "ON" ]; then \ + git clone --depth 1 https://github.com/yaml/libyaml.git /dependencies/libyaml && \ + cd /dependencies/libyaml && \ + mkdir build && \ + cd build && \ + cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_TESTING=OFF -DBUILD_SHARED_LIBS=ON .. && \ + make ${BUILD_THREADS} && \ + make install; \ + fi + +#Kubernetes +RUN if [ "${USE_K8S}" = "ON" ]; then \ + git clone https://github.com/kubernetes-client/c.git /dependencies/k8s && \ + ls /dependencies/k8s && \ + CLIENT_REPO_ROOT=/dependencies/k8s && \ + cd ${CLIENT_REPO_ROOT}/kubernetes && \ + mkdir build && \ + cd build && \ + cmake -DCMAKE_PREFIX_PATH=/usr/local -DCMAKE_INSTALL_PREFIX=/usr/local .. && \ + make ${BUILD_THREADS} && \ + make install; \ + fi + # CLEANUP RUN rm -rf /dependencies /usr/local/share/doc /usr/local/share/man && \ mkdir -p /opt/dist/usr/include/x86_64-linux-gnu && \ @@ -172,7 +201,11 @@ RUN rm -rf /dependencies /usr/local/share/doc /usr/local/share/man && \ ############################################################ # FINAL IMAGE FROM base -ARG BUILD_COVERAGE="on" +ARG BUILD_COVERAGE="ON" + +# Convert ARG value to uppercase and set ENV +RUN export BUILD_COVERAGE=$(echo "${BUILD_COVERAGE^^}") && \ + echo "export BUILD_COVERAGE=${BUILD_COVERAGE}" >> /etc/profile.d/envvars.sh ENV BUILD_COVERAGE="${BUILD_COVERAGE}" # COPY FILES @@ -190,22 +223,29 @@ COPY user_defined_operations /vdms/user_defined_operations COPY utils /vdms/utils COPY CMakeLists.txt /vdms/CMakeLists.txt COPY config-vdms.json /vdms/config-vdms.json +COPY kubernetes/global_vdms_setup_script.sh /vdms/kubernetes/global_vdms_setup_script.sh +COPY kubernetes/kubeConfig.json /vdms/kubernetes/kubeConfig.json COPY docker/override_default_config.py /vdms/override_default_config.py COPY --from=build /opt/dist / COPY --from=build /usr/local/bin/python${PYTHON_BASE} /usr/local/bin/python${PYTHON_BASE} COPY --from=build /usr/local/lib/python${PYTHON_BASE} /usr/local/lib/python${PYTHON_BASE} COPY --from=build ${VIRTUAL_ENV} ${VIRTUAL_ENV} +COPY --from=build /usr/local/include/kubernete[s] /usr/local/include/kubernetes +COPY --from=build /usr/include/libwebsocket[s] /usr/include/libwebsockets +COPY --from=build /usr/local/lib/libkubernetes.s[o] /usr/local/lib/libkubernetes.so +COPY --from=build /usr/local/lib/libyaml.s[o] /usr/local/lib/libyaml.so +COPY --from=build /usr/lib/x86_64-linux-gnu/libwebsockets.s[o] /usr/lib/x86_64-linux-gnu/libwebsockets.so ENV PATH="$VIRTUAL_ENV/bin:$PATH" # hadolint ignore=DL3008,SC2086 RUN apt-get update -y && apt-get upgrade -y && \ apt-get install -o 'Acquire::Retries=3' -y --no-install-suggests \ --no-install-recommends --fix-broken --fix-missing \ - build-essential bzip2 cppzmq-dev curl g++ gcc git javacc libarchive-tools libavcodec-dev \ + build-essential bzip2 cppzmq-dev curl g++ gcc git javacc libarchive-tools libavcodec-dev bazel-bootstrap \ libavformat-dev libcurl4-openssl-dev libdc1394-dev libgoogle-glog-dev libgtk-3-dev \ libhdf5-dev libjpeg62-turbo-dev libjsoncpp-dev libopenblas-dev libpng-dev librdkafka-dev \ - libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 libtiff5-dev libzip-dev openjdk-17-jdk-headless \ - procps && \ + libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 libtiff5-dev libwebsockets-dev libzip-dev \ + openjdk-17-jdk-headless procps uncrustify && \ apt-get --purge remove -y python3.11 && apt-get autoremove -y && \ apt-get clean && rm -rf /var/lib/apt/lists/* && \ echo "/usr/local/lib" >> /etc/ld.so.conf.d/all-libs.conf && ldconfig && \ @@ -215,7 +255,7 @@ RUN apt-get update -y && apt-get upgrade -y && \ # COVERAGE TESTING WORKDIR /vdms # hadolint ignore=DL3008,SC2086 -RUN if [ "${BUILD_COVERAGE}" = "on" ]; then \ +RUN if [ "${BUILD_COVERAGE}" = "ON" ]; then \ apt-get update -y ; \ apt-get install -y --no-install-suggests --no-install-recommends gdb ; \ apt-get clean ; \ @@ -240,11 +280,12 @@ RUN git submodule update --init --recursive && \ sed -i "s|#include |#include \n#include |" /vdms/src/pmgd/test/neighbortest.cc && \ sed -i "s|#include |#include \n#include |" /vdms/src/pmgd/tools/mkgraph.cc && \ mkdir -p /vdms/build && cd /vdms/build && \ - cmake -DCODE_COVERAGE="${BUILD_COVERAGE}" .. && make ${BUILD_THREADS} && \ + cmake -DUSE_K8S="${USE_K8S}" -DCODE_COVERAGE="${BUILD_COVERAGE}" .. && make ${BUILD_THREADS} VERBOSE=1 && \ echo '#!/bin/bash' > /start.sh && echo 'cd /vdms/build' >> /start.sh && \ - echo 'python /vdms/override_default_config.py -i /vdms/config-vdms.json -o /vdms/build/config-vdms.json' >> /start.sh && \ + echo 'python3 /vdms/override_default_config.py -i /vdms/config-vdms.json -o /vdms/build/config-vdms.json' >> /start.sh && \ echo './vdms' >> /start.sh && chmod 755 /start.sh -ENV PYTHONPATH=/vdms/client/python:${PYTHONPATH} +ENV PYTHONPATH=/vdms/client/python + HEALTHCHECK CMD echo "This is a healthcheck test." || exit 1 CMD ["/start.sh"] diff --git a/.github/scripts/docker-compose.yml b/.github/scripts/docker-compose.yml index 24e34abc..acc20ef7 100644 --- a/.github/scripts/docker-compose.yml +++ b/.github/scripts/docker-compose.yml @@ -4,7 +4,8 @@ services: dockerfile: .github/scripts/Dockerfile.checkin context: ../.. args: - - BUILD_COVERAGE=on + - BUILD_COVERAGE=ON + - USE_K8S=ON image: vdms:${SOURCE_CONTAINER_NAME} container_name: ${SOURCE_CONTAINER_NAME} networks: [backend, frontend] diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fab21e6..f9eb35fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,11 +3,16 @@ cmake_minimum_required (VERSION 3.17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") set(CMAKE_CXX_STANDARD 17) +option(USE_K8S "Enable kubernetes client libraries" OFF) +string(TOUPPER ${USE_K8S} USE_K8S) + +option(CLIENT "Built client library." OFF) IF(CODE_COVERAGE) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") - enable_testing() + string(TOUPPER ${CODE_COVERAGE} CODE_COVERAGE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") + enable_testing() ENDIF() project(vdms_application) @@ -41,7 +46,6 @@ protobuf_generate( PROTOC_OUT_DIR "${PROTO_BINARY_DIR}" ) -option(CLIENT "Built client library." OFF) if (CLIENT) add_definitions("-D CLIENT") @@ -59,7 +63,14 @@ else() add_subdirectory(distributed) link_directories(/usr/local/lib /usr/lib/x86_64-linux-gnu/) - include_directories(/usr/include/jsoncpp utils/include/ src/pmgd/include src/pmgd/util include/ src/vcl /usr/include ${CMAKE_CURRENT_BINARY_DIR}/utils/src/protobuf) + + if(USE_K8S) + include_directories(/usr/include/jsoncpp utils/include/ src/pmgd/include src/pmgd/util include/ src/vcl /usr/include ${CMAKE_CURRENT_BINARY_DIR}/utils/src/protobuf /usr/local/include/kubernetes /usr/include/libwebsockets) + add_definitions("-D HAS_KUBERNETES_CLIENT") + else() + include_directories(/usr/include/jsoncpp utils/include/ src/pmgd/include src/pmgd/util include/ src/vcl /usr/include ${CMAKE_CURRENT_BINARY_DIR}/utils/src/protobuf) + endif() + add_library(dms SHARED src/BackendNeo4j.cc src/BoundingBoxCommand.cc @@ -90,7 +101,13 @@ else() src/ImageLoop.cc src/VideoLoop.cc ) + target_link_libraries(dms vcl pmgd pmgd-util protobuf tbb tiledb vdms-utils pthread -lcurl -lzmq -lzip ${AWSSDK_LINK_LIBRARIES} neo4j-client) + add_executable(vdms src/vdms.cc) + target_link_libraries(vdms dms vdms_protobuf vcl tiledb faiss flinng jsoncpp ${OpenCV_LIBS} ${AWSSDK_LINK_LIBRARIES}) endif () + +message("Coverage:" ${CODE_COVERAGE}) +message("USE_K8S:" ${USE_K8S}) diff --git a/INSTALL.md b/INSTALL.md index aed88745..6fefadfe 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -275,8 +275,29 @@ cd $VDMS_DEP_DIR/libomni make clean check sudo make install -w --debug ``` +
+#### **Kubernetes Client** +Installation required only if you plan to use the kubernetes environment +Follow [[Kubernetes README](kubernetes/README.md)] for how to set up the environment. +```bash +git clone --depth 1 https://github.com/yaml/libyaml.git /dependencies/libyaml +cd $VDMS_DEP_DIR/libyaml +mkdir build && cd build +cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ -DBUILD_TESTING=OFF -DBUILD_SHARED_LIBS=ON .. +make +sudo make install + +CLIENT_REPO_ROOT=$VDMS_DEP_DIR/k8s +git clone https://github.com/kubernetes-client/c.git ${CLIENT_REPO_ROOT} +cd ${CLIENT_REPO_ROOT}/kubernetes +mkdir build && cd build +cmake .. +make +sudo make install +``` + ## Install VDMS This version of VDMS treats PMGD as a submodule so both libraries are compiled at one time. After entering the vdms directory, the command `git submodule update --init --recursive` will pull pmgd into the appropriate directory. Furthermore, Cmake is used to compile all directories. ```bash @@ -297,7 +318,7 @@ sed -i "s|#include ||" include/vcl/KeyFrame.h sed -i "s|#include ||" include/vcl/KeyFrame.h ``` -When compiling on a target without Optane persistent memory, use the following: +When compiling on a target without Optane persistent memory and without Kubernetes, use the following: ```bash mkdir build && cd build cmake .. @@ -313,6 +334,14 @@ make ${BUILD_THREADS} cp ../config-vdms.json . ``` +If you plan on setting up the Kubernetes environment with VDMS and remote operations (experimental), use the following: +```bash +mkdir build && cd build +cmake -DUSE_K8S=ON .. +make ${BUILD_THREADS} +cp ../config-vdms.json . +``` + ***NOTE:*** If error similar to `cannot open shared object file: No such file or directory` obtained during loading shared libraries, such as `libpmgd.so` or `libvcl.so`, add the correct directories to `LD_LIBRARY_PATH`. This may occur for non-root users. To find the correct directory, run `find` command for missing object file. An example solution for missing `libpmgd.so` and `libvcl.so` is: ```bash find / -name "libpmgd*so*" # /build/src/pmgd/src diff --git a/config-vdms.json b/config-vdms.json index f6920b62..b0170998 100755 --- a/config-vdms.json +++ b/config-vdms.json @@ -13,5 +13,6 @@ // use_endpoint: [true|false] in case of "storage_type" is equals to "aws", this key is used to specify whether it is going to use a "mocked" AWS connection "use_endpoint": false, "bucket_name": "minio-bucket", + "k8s_container": false, "more-info": "github.com/IntelLabs/vdms" } diff --git a/docker/README.md b/docker/README.md index cc0a2b0e..1a9c8201 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,26 +1,61 @@ # VDMS Docker +Here are the instructions for building and running a docker image of the VDMS Server. -Here are the instructions to build a docker image of the VDMS Server. - -The build command is: +## Build VDMS Server +Here are a few methods for building the docker image as `vdms:latest`: +* Build image as-is: + ```bash cd base Dockerfile | docker build -t vdms:latest . + ``` -If you are under a proxy, use: + or build from main VDMS directory using: + ```bash + docker build -f docker/base/Dockerfile -t vdms:latest . + ``` +* Build image under a proxy: + ```bash cd base Dockerfile | docker build -t vdms:latest --build-arg=http_proxy --build-arg=https_proxy . + ``` -To run the docker image as a container, include the --net flag. -This flag is needed as the server will be accepting connections on the default VDMS port (55555). + or build from main VDMS directory using: + ```bash + docker build --build-arg=http_proxy --build-arg=https_proxy -f docker/base/Dockerfile -t vdms:latest . + ``` - // Run the image interactively - docker run -it --net=host vdms:latest +* Build image with Kubernetes client support: + ```bash + cd base + Dockerfile | docker build -t vdms:latest --build-arg USE_K8S="on" . + ``` - // or + or build from main VDMS directory using: + ```bash + docker build --build-arg=http_proxy --build-arg=https_proxy --build-arg USE_K8S="on" -f docker/base/Dockerfile -t vdms:latest . + ``` +
- // Run the image and deattach it from your bash - docker run -d --net=host vdms:latest +## Run VDMS Server +To run the docker image as a container, include `--net=host` argument or specify a port to map to the default VDMS port (55555). +Here are a few methods for running the container: + +* Run the image interactively in Host Mode: + ```bash + docker run -it --net=host vdms:latest + ``` + The `--net=host` argument runs container in Host Mode which shares the container's network namespace with the host. + When using Host Mode, it can provide near bare-metal speed but be cautious of port conflicts. + +* Run the image in Host Mode but detached from your bash: + ```bash + docker run -d --net=host vdms:latest + ``` +* Run the image detached from your bash with host port 55555 mapped to VDMS default port. + ```bash + docker run -d -p 55555:55555 vdms:latest + ``` diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index b65174ed..b0397d74 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -15,6 +15,7 @@ ARG NEO4J_PASS="" ARG NEO4J_ENDPOINT="" ARG AWS_API_PORT=9000 ARG AWS_CONSOLE_PORT=9001 +ARG USE_K8S="OFF" ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" @@ -32,6 +33,11 @@ ENV NEO4J_ENDPOINT="${NEO4J_ENDPOINT}" ENV AWS_API_PORT="${AWS_API_PORT}" ENV AWS_CONSOLE_PORT="${AWS_CONSOLE_PORT}" +# Convert ARG value to uppercase and set ENV +RUN export USE_K8S=$(echo "${USE_K8S^^}") && \ + echo "export USE_K8S=${USE_K8S}" >> /etc/profile.d/envvars.sh +ENV USE_K8S="${USE_K8S}" + ############################################################ # BUILD DEPENDENCIES FROM base AS build @@ -42,14 +48,14 @@ RUN apt-get update -y && apt-get upgrade -y && \ apt-get install -o 'Acquire::Retries=3' -y --no-install-suggests \ --no-install-recommends --fix-broken --fix-missing \ apt-transport-https automake bison build-essential bzip2 ca-certificates \ - cppzmq-dev curl ed flex g++ gcc git gnupg-agent javacc libarchive-tools libatlas-base-dev \ - libavcodec-dev libavformat-dev libavutil-dev libboost-all-dev libbz2-dev libc-ares-dev \ + cppzmq-dev curl ed flex g++ bazel-bootstrap gcc git gnupg-agent javacc libarchive-tools libatlas-base-dev \ + libavcodec-dev libavformat-dev libavutil-dev libbison-dev libboost-all-dev libbz2-dev libc-ares-dev \ libcurl4-openssl-dev libdc1394-dev libgflags-dev libgoogle-glog-dev \ libgtk-3-dev libgtk2.0-dev libhdf5-dev libjpeg-dev libjpeg62-turbo-dev libjsoncpp-dev \ libleveldb-dev liblmdb-dev liblz4-dev libncurses5-dev libopenblas-dev libopenmpi-dev \ libpng-dev librdkafka-dev libsnappy-dev libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 \ - libtiff-dev libtiff5-dev libtool linux-libc-dev mpich openjdk-17-jdk-headless \ - pkg-config procps software-properties-common swig unzip uuid-dev && \ + libtiff-dev libtiff5-dev libtool libwebsockets-dev linux-libc-dev mpich openjdk-17-jdk-headless \ + pkg-config procps software-properties-common swig uncrustify unzip uuid-dev && \ apt-get --purge remove -y python3.11 && apt-get autoremove -y && \ apt-get clean && rm -rf /var/lib/apt/lists/* @@ -163,6 +169,29 @@ RUN curl -L -O https://github.com/gpakosz/peg/releases/download/${PEG_VERSION}/p ./configure --disable-werror --prefix=/opt/dist/usr && \ make clean check && make install -w --debug +RUN if [ "${USE_K8S}" = "ON" ]; then \ + git clone --depth 1 https://github.com/yaml/libyaml.git /dependencies/libyaml && \ + cd /dependencies/libyaml && \ + mkdir build && \ + cd build && \ + cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_TESTING=OFF -DBUILD_SHARED_LIBS=ON .. && \ + make ${BUILD_THREADS} && \ + make install; \ + fi + +#Kubernetes +RUN if [ "${USE_K8S}" = "ON" ]; then \ + git clone https://github.com/kubernetes-client/c.git /dependencies/k8s && \ + ls /dependencies/k8s && \ + CLIENT_REPO_ROOT=/dependencies/k8s && \ + cd ${CLIENT_REPO_ROOT}/kubernetes && \ + mkdir build && \ + cd build && \ + cmake -DCMAKE_PREFIX_PATH=/usr/local -DCMAKE_INSTALL_PREFIX=/usr/local .. && \ + make ${BUILD_THREADS} && \ + make install; \ + fi + # CLEANUP RUN rm -rf /dependencies /usr/local/share/doc /usr/local/share/man && \ mkdir -p /opt/dist/usr/include/x86_64-linux-gnu && \ @@ -176,17 +205,22 @@ COPY --from=build /opt/dist / COPY --from=build /usr/local/bin/python${PYTHON_BASE} /usr/local/bin/python${PYTHON_BASE} COPY --from=build /usr/local/lib/python${PYTHON_BASE} /usr/local/lib/python${PYTHON_BASE} COPY --from=build ${VIRTUAL_ENV} ${VIRTUAL_ENV} +COPY --from=build /usr/local/include/kubernete[s] /usr/local/include/kubernetes +COPY --from=build /usr/include/libwebsocket[s] /usr/include/libwebsockets +COPY --from=build /usr/local/lib/libkubernetes.s[o] /usr/local/lib/libkubernetes.so +COPY --from=build /usr/local/lib/libyaml.s[o] /usr/local/lib/libyaml.so +COPY --from=build /usr/lib/x86_64-linux-gnu/libwebsockets.s[o] /usr/lib/x86_64-linux-gnu/libwebsockets.so ENV PATH="$VIRTUAL_ENV/bin:$PATH" # hadolint ignore=DL3008,SC2086 RUN apt-get update -y && apt-get upgrade -y && \ apt-get install -o 'Acquire::Retries=3' -y --no-install-suggests \ --no-install-recommends --fix-broken --fix-missing \ - build-essential bzip2 cppzmq-dev curl g++ gcc git javacc libarchive-tools libavcodec-dev \ + build-essential bzip2 cppzmq-dev curl g++ gcc git javacc libarchive-tools libavcodec-dev bazel-bootstrap \ libavformat-dev libcurl4-openssl-dev libdc1394-dev libgoogle-glog-dev libgtk-3-dev \ libhdf5-dev libjpeg62-turbo-dev libjsoncpp-dev libopenblas-dev libpng-dev librdkafka-dev \ - libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 libtiff5-dev libzip-dev openjdk-17-jdk-headless \ - procps && \ + libssl-dev libswscale-dev libtbb-dev libtbbmalloc2 libtiff5-dev libwebsockets-dev libzip-dev \ + openjdk-17-jdk-headless procps uncrustify && \ apt-get --purge remove -y python3.11 && apt-get autoremove -y && \ apt-get clean && rm -rf /var/lib/apt/lists/* && \ echo "/usr/local/lib" >> /etc/ld.so.conf.d/all-libs.conf && ldconfig && \ @@ -200,13 +234,14 @@ RUN git clone -b master --recurse-submodules https://github.com/IntelLabs/vdms.g sed -i "s|java-11-openjdk|java-17-openjdk|g" /vdms/src/pmgd/java/CMakeLists.txt && \ sed -i "s|#include |#include \n#include |" /vdms/src/pmgd/test/neighbortest.cc && \ sed -i "s|#include |#include \n#include |" /vdms/src/pmgd/tools/mkgraph.cc && \ - cp /vdms/docker/override_default_config.py /vdms/override_default_config.py && \ + cp /vdms/docker/override_default_config.py /vdms/override_default_config.py && \ mkdir -p /vdms/build && cd /vdms/build && \ - cmake .. && make ${BUILD_THREADS} && \ + cmake -DUSE_K8S="${USE_K8S}" .. && make ${BUILD_THREADS} VERBOSE=1 && \ echo '#!/bin/bash' > /start.sh && echo 'cd /vdms/build' >> /start.sh && \ - echo 'python /vdms/override_default_config.py -i /vdms/config-vdms.json -o /vdms/build/config-vdms.json' >> /start.sh && \ + echo 'python3 /vdms/override_default_config.py -i /vdms/config-vdms.json -o /vdms/build/config-vdms.json' >> /start.sh && \ echo './vdms' >> /start.sh && chmod 755 /start.sh -ENV PYTHONPATH=/vdms/client/python:${PYTHONPATH} +ENV PYTHONPATH=/vdms/client/python + HEALTHCHECK CMD echo "This is a healthcheck test." || exit 1 CMD ["/start.sh"] diff --git a/kubernetes/README.md b/kubernetes/README.md new file mode 100644 index 00000000..76c3769c --- /dev/null +++ b/kubernetes/README.md @@ -0,0 +1,125 @@ +# Prerequisites: + +Use the following steps to create the VDMS tar file. + ++ Change to the docker/base/ directory ++ Follow the README to generate the VDMS docker image ++ Run the following command to create the tar file `sudo docker save -o vdms.tar vdms` + +Use the following steps to create the remote UDF tar file. + ++ Change to the remote_function directory ++ Follow the README to generate the remote UDF docker image ++ Run the following command to create the tar file `sudo docker save -o remote_segment.tar rudf` + +# Configure kubeConfig.json # +Sample kubeConfig file that can be used to add details of Master and Worker nodes. + +```json +{ + "MasterNodeDetail": { + "_HOST-NAME-OF-MASTER-NODE_": "_IPADDRESS-OF-MASTER-NODE_" + }, + "WorkerNodeDetail": [ + {"_HOST-NAME-OF-WORKER-NODE_1": "_IPADDRESS-OF-WORKER-NODE_1"}, + {"_HOST-NAME-OF-WORKER-NODE_2" : "_IPADDRESS-OF-WORKER-NODE_2"}, + {"_use-similar-blocks-to-add-more-node_"} + ] +} +``` + +# Proxy setting for running containerd behind a proxy # + +Follow the steps below for containerd + +```bash + sudo mkdir -p /etc/systemd/system/containerd.service.d + sudo touch /etc/systemd/system/containerd.service.d/http-proxy.conf + sudo nano /etc/systemd/system/containerd.service.d/http-proxy.conf +``` + +Edit the http-proxy.conf as below, add the proxy details as per your system for containerd + +```bash + [Service] + Environment="HTTP_PROXY=http://proxy.example.com" + Environment="HTTPS_PROXY=http://proxy.example.com" + Environment="NO_PROXY=localhost" +``` + +Restart the services as mentioned below + +```bash + sudo systemctl daemon-reload + sudo systemctl restart containerd +``` + +Follow the steps below for Docker + +```bash + sudo mkdir -p /etc/systemd/system/docker.service.d + sudo touch /etc/systemd/system/docker.service.d/http-proxy.conf + sudo nano /etc/systemd/system/docker.service.d/http-proxy.conf +``` + +Edit the http-proxy.conf as below, add the proxy details as per your system for containerd + +```bash + [Service] + Environment="HTTP_PROXY=http://proxy.example.com" + Environment="HTTPS_PROXY=http://proxy.example.com" + Environment="NO_PROXY=localhost" +``` + +Restart the services as mentioned below + +```bash + sudo systemctl daemon-reload + sudo systemctl restart docker +``` +# Bringing up your cluster to run Multi-node Cluster for VDMS application # + +Clone the VDMS github repository on the Master and Worker nodes. + +On the Master node follow the steps below after downloading the VDMS image - +```bash + cd kubernetes/ + chmod +x global_vdms_setup_script.sh + ./global_vdms_setup_script.sh -m master -i yes +``` +On the Worker Node follow the steps below after downloading/creating the remote UDF - + +```bash + cd kubernetes/ + chmod +x global_vdms_setup_script.sh + ./global_vdms_setup_script.sh -m remote -i yes +``` + +Now update the kubeConfig.json file to add the Master and Worker Node details as per steps provided in first section + +On the Worker Node follow the steps below to load the remote UDF image locally + +```bash + ./global_vdms_setup_script.sh -m remote -s yes +``` + +## Setting up the Multinode Cluster and running VDMS Application ## + +On the Master Node execute the following command +```bash + ./global_vdms_setup_script.sh -m master -s yes -j +``` + +The file named join_vdms_cluster.sh will be created in kubernetes/ folder, copy/transfer that to the kubernetes/ folder at the Worker nodes + +On the Worker Node execute the following command +```bash + ./global_vdms_setup_script.sh -m remote -k yes +``` + +Final step, On the Master Node execute the following command +```bash + ./global_vdms_setup_script.sh -m master -k yes -j +``` + +Use ipconfig/ip addr to get the IP address of the Control plane. \ No newline at end of file diff --git a/kubernetes/global_vdms_setup_script.sh b/kubernetes/global_vdms_setup_script.sh new file mode 100644 index 00000000..38d20cf5 --- /dev/null +++ b/kubernetes/global_vdms_setup_script.sh @@ -0,0 +1,348 @@ +#!/bin/bash + +helpFunction() +{ + echo "" + echo "Usage: $0 -m machinetype -i install -s setup_the_node -k k8s_setup -c clear_the_node -j local_kubeConfig" + echo "\t-m Input the machine type either 'remote' or 'master'" + echo "\t-i Input the installation task as 'yes' or 'no'" + echo "\t-s Input the status for the Node setup as - 'yes' or 'no'" + echo "\t-k Input the status for the Kubernetes setup on Node as - 'yes' or 'no'" + echo "\t-c Input to clear the node of the Kubernetes setup on Node as - 'yes' or 'no'" + echo "\t-j Path to the master node kubeConfig.json" + exit 1 # Exit script after printing help +} + +jsonparserFunction() +{ + echo "Now parsing the file KubeConfig.json to get worker node info" +} +remoteSetupFunction() +{ + echo "Setup the docker images and registries will be created on the remote machine" + sudo docker image load < remote_segment.tar + sudo docker run -d -p 5000:5000 --name registry registry:2 + sudo docker tag rudf:latest localhost:5000/remote-udf-1 + sudo docker push localhost:5000/remote-udf-1 +} +remoteInstallFunction() +{ + echo "Dependency Installations will now be done on the remote machine" + + ##install containerd + curl -L https://github.com/containerd/containerd/releases/download/v1.6.2/containerd-1.6.2-linux-amd64.tar.gz -o containerd-1.6.2-linux-amd64.tar.gz + sudo tar Cxzvf /usr/local containerd-1.6.2-linux-amd64.tar.gz + curl -L https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 -o runc.amd64 + sudo install -m 755 runc.amd64 /usr/local/sbin/runc + sudo mkdir -p /etc/containerd + containerd config default | sudo tee /etc/containerd/config.toml + sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml + sudo curl -L https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -o /etc/systemd/system/containerd.service + sudo systemctl daemon-reload + sudo systemctl enable --now containerd + + #install docker engine + # Add Docker's official GPG key: + sudo apt-get update + sudo apt-get install ca-certificates curl jq + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + + sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + sudo apt-get install conntrack + ## install kubeadm, kubelet, kubectl + CNI_PLUGINS_VERSION="v1.3.0" + ARCH="amd64" + DEST="/opt/cni/bin" + sudo mkdir -p "$DEST" + curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${ARCH}-${CNI_PLUGINS_VERSION}.tgz" | sudo tar -C "$DEST" -xz + DOWNLOAD_DIR="/usr/local/bin" + sudo mkdir -p "$DOWNLOAD_DIR" + CRICTL_VERSION="v1.31.0" + curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz + RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" + CDIR=$(pwd) + cd $DOWNLOAD_DIR + sudo curl -L --remote-name-all https://dl.k8s.io/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet} + sudo chmod +x {kubeadm,kubelet} + RELEASE_VERSION="v0.16.2" + curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubelet/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /usr/lib/systemd/system/kubelet.service + sudo mkdir -p /usr/lib/systemd/system/kubelet.service.d + curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf + sudo systemctl enable --now kubelet + cd $CDIR +} + + + +masterInstallFunction() +{ + echo "Dependency Installation will now be done on the VDMS Master node" + ##install containerd + curl -L https://github.com/containerd/containerd/releases/download/v1.6.2/containerd-1.6.2-linux-amd64.tar.gz -o containerd-1.6.2-linux-amd64.tar.gz + sudo tar Cxzvf /usr/local containerd-1.6.2-linux-amd64.tar.gz + curl -L https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64 -o runc.amd64 + sudo install -m 755 runc.amd64 /usr/local/sbin/runc + sudo mkdir /etc/containerd + containerd config default | sudo tee /etc/containerd/config.toml + sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml + sudo curl -L https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -o /etc/systemd/system/containerd.service + sudo systemctl daemon-reload + sudo systemctl enable --now containerd + + #install docker engine + # Add Docker's official GPG key: + sudo apt-get update + sudo apt-get install ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + + sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + sudo apt-get install conntrack + + ## install kubeadm, kubelet, kubectl + CNI_PLUGINS_VERSION="v1.3.0" + ARCH="amd64" + DEST="/opt/cni/bin" + sudo mkdir -p "$DEST" + curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${ARCH}-${CNI_PLUGINS_VERSION}.tgz" | sudo tar -C "$DEST" -xz + + DOWNLOAD_DIR="/usr/local/bin" + sudo mkdir -p "$DOWNLOAD_DIR" + + CRICTL_VERSION="v1.31.0" + curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz + + RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" + CDIR=$(pwd) + cd $DOWNLOAD_DIR + sudo curl -L --remote-name-all https://dl.k8s.io/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet} + sudo chmod +x {kubeadm,kubelet} + + RELEASE_VERSION="v0.16.2" + curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubelet/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /usr/lib/systemd/system/kubelet.service + sudo mkdir -p /usr/lib/systemd/system/kubelet.service.d + curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf + sudo systemctl enable --now kubelet + + #Install Cillium + cd $CDIR + CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) + CLI_ARCH=amd64 + if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi + curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} + sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum + sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin + rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} + + sudo docker image load < vdms.tar + sudo docker run -d -p 5000:5000 --name registry registry:2 + sudo docker tag vdms localhost:5000/vdms + sudo docker push localhost:5000/vdms + + +} + +masterSetupFunction() +{ + sudo kubeadm reset -f --cri-socket=unix:///var/run/cri-dockerd.sock + sudo rm -rf $HOME/.kube + sudo rm -rf /etc/cni/net.d + sudo kubeadm init --cri-socket=unix:///var/run/cri-dockerd.sock + + mkdir -p $HOME/.kube + export KUBECONFIG=$HOME/.kube/config + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + + cilium install --version 1.16.0 +} + +jsonparserFunction_remote() +{ + json_data=$(cat $1) + workers=$(echo $json_data | jq ".WorkerNodeDetail") + num_workers=$(echo $workers | jq length) + one=1 + count=$(($num_workers-$one)) + for i in $(seq 0 $count); + do + node=$(echo $workers | jq -r ".[$i]") + dict_string="${node#\{}" + dict_string="${dict_string%\}}" + key=$(echo "$dict_string" | grep -o '[^:,]*:' | tr -d ':' | tr ',' '\n') + value=$(echo "$dict_string" | grep -o ':[^:,]*' | tr -d ':' | tr ',' '\n') + key=$(echo "$key" | sed 's/"//g') + value=$(echo "$value" | sed 's/"//g') + key=${key// /} + value=${value// /} + kubectl label --overwrite node ${key} nodeSelector=${key} + done +} + +jsonparserFunction_setup() +{ + json_data=$(cat $1) + workers=$(echo $json_data | jq ".WorkerNodeDetail") + num_workers=$(echo $workers | jq length) + one=1 + count=$(($num_workers-$one)) + for i in $(seq 0 $count); + do + node=$(echo $workers | jq -r ".[$i]") + dict_string="${node#\{}" + dict_string="${dict_string%\}}" + key=$(echo "$dict_string" | grep -o '[^:,]*:' | tr -d ':' | tr ',' '\n') + value=$(echo "$dict_string" | grep -o ':[^:,]*' | tr -d ':' | tr ',' '\n') + key=$(echo "$key" | sed 's/"//g') + value=$(echo "$value" | sed 's/"//g') + key=${key// /} + value=${value// /} + sudo -- sh -c -e "echo '${value} ${key}' >> /etc/hosts"; + done +} + +jsonparserFunction_master() +{ + json_data=$(cat $1) + masternode=$(echo $json_data | jq ".MasterNodeDetail") + dict_string="${masternode#\{}" + dict_string="${dict_string%\}}" + MASTER=$(echo "$dict_string" | grep -o '[^:,]*:' | tr -d ':' | tr ',' '\n') + MASTER_IP=$(echo "$dict_string" | grep -o ':[^:,]*' | tr -d ':' | tr ',' '\n') + MASTER=$(echo "$MASTER" | sed 's/"//g') + MASTER_IP=$(echo "$MASTER_IP" | sed 's/"//g') + MASTER=${MASTER// /} + MASTER_IP=${MASTER_IP// /} +} + +masterVDMSk8setupFunction() +{ + echo "Setup the VDMS on the master node and generate the keys" + ## use the json parser here + jsonparserFunction_master $1 + kubectl label node ${MASTER} vdmstype=vdmsmaster + kubectl create clusterrolebinding serviceaccounts-cluster-admin \ + --clusterrole=cluster-admin \ + --group=system:serviceaccounts + kubectl create configmap node-map --from-file=kubeConfig.json + kubectl taint node ${MASTER} node-role.kubernetes.io/control-plane:NoSchedule- + kubectl apply -f vdms-config.yaml + kubectl apply -f service-config.yaml + jsonparserFunction_remote $1 +} + + + +OPTSTRING=":m:i:s:k:c:j:p" + +while getopts ${OPTSTRING} opt; do + case ${opt} in + m) + echo "The type of machine is - ${OPTARG}" + machinetype="$OPTARG" + machinetype=${machinetype// /} + ;; + i) + install_arg="$OPTARG" + echo "Are we going to install the dependencies - ${OPTARG}" + install_arg=${install_arg// /} + ;; + s) + setup_arg="$OPTARG" + echo "Do we setup the $machinetype ? - ${OPTARG}" + setup_arg=${setup_arg// /} + ;; + k) + k8s_setup_arg="$OPTARG" + echo "Do we configure the k8s cluster on $machinetype ? - ${OPTARG}" + k8s_setup_arg=${k8s_setup_arg// /} + ;; + c) + clean_up="$OPTARG" + clean_up=${clean_up// /} + echo "Do we clean up the k8s cluster? - ${OPTARG}" + ;; + j) + config_path="$OPTARG" + config_path=${config_path// /} + echo "Path to the kubeConfig.json - ${OPTARG}" + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + usage + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + usage + ;; + esac +done + +if [ -z "$machinetype" ]; +then + echo "Please mention the type of machine - either remote or master"; + helpFunction +fi + +if [ "$install_arg" == "yes" ]; then + if [ "$machinetype" == "remote" ]; then + echo "Installing Dependencies on the remote Node" + remoteInstallFunction + fi + if [ "$machinetype" == "master" ]; then + echo "Installing Dependencies on the master Node" + masterInstallFunction + fi +fi + +if [ "$setup_arg" == "yes" ]; then + if [ "$machinetype" == "remote" ]; then + echo "setup the remote Node" + remoteSetupFunction + fi + if [ "$machinetype" == "master" ]; then + echo "setup the master Node" + masterSetupFunction + jsonparserFunction_setup $config_path + echo "sudo $(kubeadm token create --print-join-command)" > join_vdms_cluster.sh + fi +fi + +if [ "$k8s_setup_arg" == "yes" ]; then + if [ "$machinetype" == "remote" ]; then + echo "setup the k8s on remote Node" + chmod +x join_vdms_cluster.sh + ./join_vdms_cluster.sh + fi + if [ "$machinetype" == "master" ]; then + echo "setup the k8s on master Node" + masterVDMSk8setupFunction $config_path + fi +fi + +if [ "$clean_up" == "yes" ]; then + sudo kubeadm reset -f + if [ "$machinetype" == "master" ]; then + sudo rm -rf $HOME/.kube + sudo rm -f join_vdms_cluster.sh + else + sudo rm -f join_vdms_cluster.sh + fi +fi diff --git a/kubernetes/kubeConfig.json b/kubernetes/kubeConfig.json new file mode 100644 index 00000000..5d251e45 --- /dev/null +++ b/kubernetes/kubeConfig.json @@ -0,0 +1,4 @@ +{ + "MasterNodeDetail": {"vdms": "127.0.0.1"}, + "WorkerNodeDetail": [{"worker": "127.0.0.1"}] +} \ No newline at end of file diff --git a/kubernetes/service-config.yaml b/kubernetes/service-config.yaml new file mode 100644 index 00000000..fe69f8e2 --- /dev/null +++ b/kubernetes/service-config.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: my-nodeport-service +spec: + type: NodePort + selector: + app: vdms + ports: + - protocol: TCP + port: 55555 + targetPort: 55555 + nodePort: 30001 # Specify the node port here, you can choose any available port \ No newline at end of file diff --git a/kubernetes/vdms-config.yaml b/kubernetes/vdms-config.yaml new file mode 100644 index 00000000..d5f63686 --- /dev/null +++ b/kubernetes/vdms-config.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vdms-deployment + labels: + app: vdms +spec: + selector: + matchLabels: + app: vdms + template: + metadata: + labels: + app: vdms + spec: + containers: + - name: vdms-container + image: localhost:5000/vdms + volumeMounts: + - name: config-volume + mountPath: /etc/config + imagePullPolicy: Always + command: ["/start.sh"] + ports: + - containerPort: 55555 + env: + - name: no_proxy + value: "kubernetes.default.svc,.svc,.cluster.local,127.0.0.1,localhost,docker.io" + volumes: + - name: config-volume + configMap: + name: node-map + nodeSelector: + vdmstype : vdmsmaster diff --git a/remote_function/Dockerfile.txt b/remote_function/Dockerfile.txt new file mode 100644 index 00000000..6fc61f3a --- /dev/null +++ b/remote_function/Dockerfile.txt @@ -0,0 +1,9 @@ +FROM python:3.10 +RUN /bin/bash -c "python3 -m venv venv" +RUN /bin/bash -c "source venv/bin/activate" +WORKDIR /remoteUDF +COPY . . +RUN /bin/bash -c "python3 -m pip install pip --upgrade" +RUN pip install -r requirements.txt + +CMD ["python3","udf_server.py","5030"] \ No newline at end of file diff --git a/remote_function/README.md b/remote_function/README.md index 3f9710d1..95cb33ad 100644 --- a/remote_function/README.md +++ b/remote_function/README.md @@ -147,4 +147,7 @@ python3 udf_server.py 5010 [path_tmp_dir] } ] } -``` \ No newline at end of file +``` +## To compile Docker Image of Remote UDF +1. Use the command given below +```sudo docker build -t rudf:latest --build-arg=http_proxy --build-arg=https_proxy --file=Dockerfile.txt .``` \ No newline at end of file diff --git a/remote_function/requirements.txt b/remote_function/requirements.txt index 03e6998f..aab16828 100644 --- a/remote_function/requirements.txt +++ b/remote_function/requirements.txt @@ -1,5 +1,6 @@ flask>=3.0.2 imutils>=0.5.4 numpy<2.0.0 -opencv-python-headless==4.9.0.80 -sk-video==1.1.10 \ No newline at end of file +opencv-python-headless>=4.9.0.80 +pillow>=11.1.0 +sk-video>=1.1.10 \ No newline at end of file diff --git a/src/ExceptionsCommand.h b/src/ExceptionsCommand.h index 19a6afd0..a66c5a49 100644 --- a/src/ExceptionsCommand.h +++ b/src/ExceptionsCommand.h @@ -78,4 +78,4 @@ struct ExceptionCommand { ExceptionCommand(VDMS::name, #name, ##__VA_ARGS__, __FILE__, __LINE__) }; // namespace VDMS -extern void print_exception(const VDMS::ExceptionCommand &e, FILE *f = stdout); +extern void print_exception(const VDMS::ExceptionCommand &e, FILE *f = stdout); \ No newline at end of file diff --git a/src/ImageCommand.cc b/src/ImageCommand.cc index 796d8858..2439f3a1 100644 --- a/src/ImageCommand.cc +++ b/src/ImageCommand.cc @@ -30,13 +30,23 @@ */ #include - +#include #include "ImageCommand.h" #include "VDMSConfig.h" #include "defines.h" +#include +#include +#include + #include "ImageLoop.h" +#ifdef HAS_KUBERNETES_CLIENT +#include "../utils/include/kubernetes/KubeHelper.h" +using namespace kubernetes; +static kubernetes::KubeHelper kubernetes_get_url; +#endif + using namespace VDMS; //========= AddImage definitions ========= @@ -75,7 +85,23 @@ int ImageCommand::enqueue_operations(VCL::Image &img, const Json::Value &ops, options["ingestion"] = 1; img.syncremoteOperation(get_value(op, "url"), options); } else { - img.remoteOperation(get_value(op, "url"), options); + #ifdef HAS_KUBERNETES_CLIENT + bool kube_cfg = VDMS::VDMSConfig::instance()->get_k8s_flag(); + if(kube_cfg){ + // Use the url generator from the utils path by creating the object + std::string url_k8s = kubernetes_get_url.query_scheduler("image"); + img.remoteOperation(url_k8s, get_value(op, "options")); + } + else{ + // In case of absence of Kubernetes Infrastructure + img.remoteOperation(get_value(op, "url"), + get_value(op, "options")); + } + #else + img.remoteOperation(get_value(op, "url"), + get_value(op, "options")); + #endif + } } else if (type == "userOp") { img.userOperation(get_value(op, "options")); @@ -522,4 +548,4 @@ Json::Value FindImage::construct_responses(Json::Value &responses, } ret[_cmd_name].swap(findImage); return ret; -} +} \ No newline at end of file diff --git a/src/VDMSConfig.cc b/src/VDMSConfig.cc index 6722c283..822cd5bc 100644 --- a/src/VDMSConfig.cc +++ b/src/VDMSConfig.cc @@ -63,6 +63,7 @@ const std::string KEY_NOT_FOUND = "KEY_NOT_FOUND"; const std::string DEFAULT_ENDPOINT = "http://127.0.0.1:9000"; const std::string DEFAULT_AWS_LOG_LEVEL = "off"; const bool DEFAULT_USE_ENDPOINT = false; +const bool DEFAULT_KUBERNETES_CONTAINER = false; using namespace VDMS; @@ -113,6 +114,7 @@ VDMSConfig::VDMSConfig(std::string config_file) { cfg = nullptr; storage_type = StorageType::LOCAL; aws_flag = false; + k8s_flag = false; use_endpoint = false; aws_log_level = Aws::Utils::Logging::LogLevel::Off; endpoint_override = std::nullopt; @@ -140,6 +142,7 @@ VDMSConfig::VDMSConfig(std::string config_file) { } build_dirs(); + set_kubernetes_config(); } int VDMSConfig::get_int_value(std::string val, int def) { @@ -452,6 +455,10 @@ void VDMSConfig::build_dirs() { } +void VDMSConfig::set_kubernetes_config() { + k8s_flag = get_bool_value(PARAM_KUBERNETES_CONTAINER, DEFAULT_KUBERNETES_CONTAINER); +} + bool VDMSConfig::exists_key(const std::string &key) { return (json_config[key] != Json::nullValue); } diff --git a/src/VDMSConfig.h b/src/VDMSConfig.h index 69634c88..7682214a 100644 --- a/src/VDMSConfig.h +++ b/src/VDMSConfig.h @@ -89,6 +89,7 @@ const std::string PARAM_PROXY_PORT = "proxy_port"; const std::string PARAM_PROXY_SCHEME = "proxy_scheme"; const std::string PARAM_USE_ENDPOINT = "use_endpoint"; const std::string PARAM_AWS_LOG_LEVEL = "aws_log_level"; +const std::string PARAM_KUBERNETES_CONTAINER = "use_k8s_container"; const std::string PARAM_FLINNG_NUM_ROWS = "flinng_num_rows"; const std::string PARAM_FLINNG_CELLS_PER_ROW = "flinng_cells_per_row"; @@ -146,6 +147,7 @@ class VDMSConfig { const Aws::Utils::Logging::LogLevel get_aws_log_level() { return aws_log_level; } + const bool &get_k8s_flag() { return k8s_flag; } // Descriptor Optional Parameters const std::optional &get_flinng_num_rows() { return flinng_num_rows; } @@ -196,6 +198,8 @@ class VDMSConfig { std::string aws_bucket_name; // aws bucket name bool use_endpoint; // Use Mocked S3 server or real AWS S3 + bool k8s_flag; + std::optional endpoint_override; std::optional proxy_host; std::optional proxy_port; @@ -224,12 +228,14 @@ class VDMSConfig { void build_dirs(); void check_or_create(std::string path); int create_dir(std::string path); + void set_kubernetes_config(); VDMSConfig *getCfg() { return cfg; } VDMSConfig() { cfg = nullptr; storage_type = StorageType::LOCAL; aws_flag = false; + k8s_flag = false; use_endpoint = false; aws_log_level = Aws::Utils::Logging::LogLevel::Off; endpoint_override = std::nullopt; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 13622ee3..7fc0bf6e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -2,13 +2,11 @@ cmake_minimum_required (VERSION 3.17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") set(CMAKE_CXX_STANDARD 17) -option(CODE_COVERAGE "Collect coverage" OFF) IF(CODE_COVERAGE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -Wall -coverage -fprofile-abs-path") enable_testing() ENDIF() -message("Coverage:" ${CODE_COVERAGE}) project(tests LANGUAGES "CXX" @@ -17,6 +15,10 @@ find_package( OpenCV REQUIRED ) find_package( Threads REQUIRED ) find_package(AWSSDK REQUIRED COMPONENTS core s3) +if(USE_K8S) + add_definitions("-D HAS_KUBERNETES_CLIENT") +endif() + link_directories(/usr/local/lib/ /usr/lib/x86_64-linux-gnu/) include_directories( ../src @@ -90,3 +92,4 @@ target_link_libraries(unit_tests ${AWSSDK_LINK_LIBRARIES} neo4j-client ) + diff --git a/tests/cleandbs.sh b/tests/cleandbs.sh index 4b05eba8..e34f3c4a 100755 --- a/tests/cleandbs.sh +++ b/tests/cleandbs.sh @@ -5,4 +5,5 @@ rm -rf test_db_client || true rm -rf test_db_1 || true rm -rf db || true rm -rf db_backup || true -rm -rf /tmp/rpathimage.jpg || true \ No newline at end of file +rm -rf /tmp/rpathimage.jpg || true +rm -rf /tmp/kubeconfig || true \ No newline at end of file diff --git a/tests/run_all_tests.py b/tests/run_all_tests.py index 761fa75b..f7d6d4b7 100755 --- a/tests/run_all_tests.py +++ b/tests/run_all_tests.py @@ -1634,6 +1634,12 @@ def setup_for_local_udf_message_queue_tests(self, tmp_dir, stderrFD, stdoutFD): + str(e) ) + def setup_k8s_test_files(self): + if not os.path.exists("/tmp/kubeconfig"): + os.mkdir("/tmp/kubeconfig") + global DEFAULT_DIR_REPO + shutil.copy2("../kubernetes/kubeConfig.json", "/tmp/kubeconfig") + def fill_default_arguments(self, testingArgs: TestingArgs) -> TestingArgs: """ Fills in default arguments for the NonRemoteTest object. @@ -1785,6 +1791,9 @@ def run(self, testingArgs: TestingArgs): # Prepare the TLS environment for testing self.run_prep_certs_script(tlsStderrFD, tlsStdoutFD) + # Copy K8s test files + self.setup_k8s_test_files() + # Start an instance of the VDMS server per each config file given as argument self.run_vdms_server(testingArgs, vdmsStderrFD, vdmsStdoutFD) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 55f7700a..3e5424c4 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -65,6 +65,8 @@ function execute_commands() { # set in the test files in unit_test dir cp unit_tests/config-tests.json /tmp/tests_output_dir/config-tests.json cp unit_tests/config-client-tests.json /tmp/tests_output_dir/config-client-tests.json + mkdir /tmp/kubeconfig || true + cp ../kubernetes/kubeConfig.json /tmp/kubeconfig # Stop UDF Queue and Remote Server if already running pkill -9 -f udf_server.py || true diff --git a/tests/unit_tests/client_image.cc b/tests/unit_tests/client_image.cc index 773179d4..6c65ee9e 100644 --- a/tests/unit_tests/client_image.cc +++ b/tests/unit_tests/client_image.cc @@ -2,6 +2,11 @@ #include "meta_data_helper.h" +#ifdef HAS_KUBERNETES_CLIENT +#include "kubernetes/KubeHelper.h" +using namespace kubernetes; +#endif + void add_image_util(Meta_Data *meta_obj) { EXPECT_TRUE(nullptr != meta_obj); @@ -206,7 +211,6 @@ TEST(CLIENT_CPP, add_image_dynamic_metadata) { meta_obj->_aclient->query(meta_obj->_fastwriter.write(tuple), blobs); Json::Value result; meta_obj->_reader.parse(response.json.c_str(), result); - int status1 = result[0]["AddImage"]["status"].asInt(); EXPECT_EQ(status1, 0); delete meta_obj; @@ -235,7 +239,6 @@ TEST(CLIENT_CPP, add_image_dynamic_metadata_remote) { meta_obj->_aclient->query(meta_obj->_fastwriter.write(tuple), blobs); Json::Value result; meta_obj->_reader.parse(response.json.c_str(), result); - int status1 = result[0]["AddImage"]["status"].asInt(); EXPECT_EQ(status1, 0); delete meta_obj; @@ -281,4 +284,14 @@ TEST(CLIENT_CPP, find_image_dynamic_metadata) { EXPECT_EQ(status_b, 0); EXPECT_STREQ(objectId.data(), "face"); delete meta_obj; +} + +TEST(CLIENT_CPP, kubehelper_url) { + #ifdef HAS_KUBERNETES_CLIENT + static kubernetes::KubeHelper kubernetes_get_url; + kubernetes_get_url.query_counter++; + std::string url_k8s = kubernetes_get_url.query_scheduler("image"); + + EXPECT_STREQ(url_k8s.data(), "rudf0svc:5050/image"); + #endif } \ No newline at end of file diff --git a/user_defined_operations/requirements.txt b/user_defined_operations/requirements.txt index d40f7ba0..6d1ae6d3 100644 --- a/user_defined_operations/requirements.txt +++ b/user_defined_operations/requirements.txt @@ -1,2 +1,5 @@ -opencv-python-headless==4.9.0.80 -pyzmq==26.0.3 \ No newline at end of file +opencv-python-headless>=4.9.0.80 +pyzmq>=26.0.3 +sk-video>=1.1.10 +imutils>=0.5.4 +zmq>=0.0.0 \ No newline at end of file diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 66288fb3..d6bd7f1f 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,4 +1,12 @@ cmake_minimum_required (VERSION 3.10) project(vdms-utils) -include_directories(include/comm include/chrono include/stats include/timers) -add_library(vdms-utils SHARED src/timers/TimerMap.cc src/comm/ConnClient.cc src/comm/Connection.cc src/comm/Exception.cc src/comm/ConnServer.cc src/stats/SystemStats.cc) + +if(USE_K8S) + message(STATUS "Including kubernetes client libraries and kubernetes helper class") + include_directories(include/comm include/chrono include/stats include/timers include/kubernetes /usr/include/libwebsockets) + add_library(vdms-utils SHARED src/timers/TimerMap.cc src/comm/ConnClient.cc src/comm/Connection.cc src/comm/Exception.cc src/comm/ConnServer.cc src/stats/SystemStats.cc src/kubernetes/KubeHelper.cc) + target_link_libraries(vdms-utils -L/usr/local/lib/ -lkubernetes) +else() + include_directories(include/comm include/chrono include/stats include/timers) + add_library(vdms-utils SHARED src/timers/TimerMap.cc src/comm/ConnClient.cc src/comm/Connection.cc src/comm/Exception.cc src/comm/ConnServer.cc src/stats/SystemStats.cc) +endif() diff --git a/utils/include/kubernetes/KubeHelper.h b/utils/include/kubernetes/KubeHelper.h new file mode 100644 index 00000000..afae093d --- /dev/null +++ b/utils/include/kubernetes/KubeHelper.h @@ -0,0 +1,21 @@ +#include +#include +#include + +namespace kubernetes { +// Helper classe for handling Kubernetes API calls +// pod_creator method will help populate the fields required to create a pod +// within the cluster with a PodNAme in the argument service_creator method will +// help populate fields specific to crearion of a service with the AppSelector +// and ServiceName in the arguments +class KubeHelper { + public: + // define method to create a pod + int pod_creator(char *PodName, std::string worker_node_name); + int service_creator(char *ServiceName, char *AppSelector); + std::vector get_workernode(); + std::string k8s_objects_creator(char *appname, std::string node_name); + std::string query_scheduler(std::string mediaType); + static inline int query_counter = 0; +}; +} // namespace kubernetes diff --git a/utils/src/kubernetes/KubeHelper.cc b/utils/src/kubernetes/KubeHelper.cc new file mode 100644 index 00000000..c83c4a66 --- /dev/null +++ b/utils/src/kubernetes/KubeHelper.cc @@ -0,0 +1,250 @@ +#include "KubeHelper.h" +#include +#include +#include +#include +#include +#include +#include + + +extern "C"{ +#include +#include +#include +#include +} + +using namespace kubernetes; +using namespace std::chrono; +using namespace std::this_thread; +//This function will load the necessary authentication configs for interating with the kube-api-server +// It will populate the necessary fields and then create a pod +int KubeHelper::pod_creator(char* PodName,std::string worker_node_name){ + char *basePath = NULL; + sslConfig_t *sslConfig = NULL; + list_t *apiKeys = NULL; + int rc = load_incluster_config(&basePath, &sslConfig, &apiKeys); + if (rc != 0) { + printf("Cannot load kubernetes configuration in cluster.\n"); + return -1; + } + apiClient_t *apiClient = apiClient_create_with_base_path(basePath, sslConfig, apiKeys); + if (!apiClient) { + printf("Cannot create a kubernetes client.\n"); + return -1; + }else{ + v1_pod_t *podinfo = (v1_pod_t*)calloc(1, sizeof(v1_pod_t)); + list_t *conportList = list_createList(); + podinfo->api_version = strdup("v1"); + podinfo->kind = strdup("Pod"); + podinfo->spec = (v1_pod_spec_t*)calloc(1, sizeof(v1_pod_spec_t)); + podinfo->metadata = (v1_object_meta_t*)calloc(1, sizeof(v1_object_meta_t)); + list_t *labelList = list_createList(); + keyValuePair_t *label = keyValuePair_create(strdup("app"), PodName); + list_addElement(labelList,label); + const char* node = worker_node_name.c_str(); + keyValuePair_t *nodeSelect = keyValuePair_create(strdup("nodeSelector"), strdup(node)); + list_t *node_select_list = list_createList(); + list_addElement(node_select_list, nodeSelect); + /* set pod name */ + podinfo->metadata->name = PodName; + podinfo->metadata->labels = labelList; + v1_container_port_t *containerPort = (v1_container_port_t*)calloc(1, sizeof(v1_container_port_t)); + containerPort->container_port = 5050; + /* set containers for pod */ + list_t *containerlist = list_createList(); + v1_container_t *con = (v1_container_t*)calloc(1, sizeof(v1_container_t)); + con->name = strdup("my-container-fnfdnvnoimdk"); + con->image = strdup("localhost:5000/remote-udf-1"); + con->image_pull_policy = strdup("Always"); + con->ports = conportList; + list_addElement(containerlist, con); + podinfo->spec->containers = containerlist; + podinfo->spec->node_selector = node_select_list; + char* ns = "default"; + + /* call API in libkubernetes to create pod */ + v1_pod_t *apod = CoreV1API_createNamespacedPod(apiClient,ns, podinfo, NULL, NULL, NULL, NULL); + if(apiClient->response_code != 200 && apiClient->response_code != 201) { + fprintf(stderr, "Failed to create service: %ld\n", apiClient->response_code); + free(podinfo); + free(apod); + free_client_config(basePath, sslConfig, apiKeys); + basePath = NULL; + sslConfig = NULL; + apiKeys = NULL; + free(apiClient); + apiClient_unsetupGlobalEnv(); + return -1; + }else{ + printf("Service created successfully\n"); + free(podinfo); + free(apod); + free_client_config(basePath, sslConfig, apiKeys); + basePath = NULL; + sslConfig = NULL; + apiKeys = NULL; + free(apiClient); + apiClient_unsetupGlobalEnv(); + return 0;} + } + return -1; +} +//This function will load the necessary authentication configs for interating with the kube-api-server +// It will populate the necessary fields and then create a service in the cluster +int KubeHelper::service_creator(char * ServiceName, char *AppSelector){ + char *basePath = NULL; + sslConfig_t *sslConfig = NULL; + list_t *apiKeys = NULL; + + int rc = load_incluster_config(&basePath, &sslConfig, &apiKeys); + + if (rc != 0) { + printf("Cannot load kubernetes configuration in cluster.\n"); + return -1; + } + apiClient_t *apiClient = apiClient_create_with_base_path(basePath, sslConfig, apiKeys); + if (!apiClient) { + printf("Cannot create a kubernetes client.\n"); + return -1; + } + if(apiClient){ + v1_service_port_t *servicePort = (v1_service_port_t*)calloc(1, sizeof(v1_service_port_t)); + if (!servicePort) { + fprintf(stderr, "Memory allocation for servicePort failed\n"); + return -1;} + int prt = 5050; + servicePort->port = prt; + // Add the ServicePort to a list + list_t *servicePortList = list_createList(); + if (!servicePortList) { + fprintf(stderr, "Memory allocation for servicePortList failed\n"); + free(servicePort); + return -1;} + list_addElement(servicePortList, servicePort); + // Create a ServiceSpec + v1_service_spec_t *serviceSpec = (v1_service_spec_t*)calloc(1, sizeof(v1_service_spec_t)); + if (!serviceSpec) { + fprintf(stderr, "Memory allocation for serviceSpec failed\n"); + free(servicePort); + list_freeList(servicePortList); + return -1;} + keyValuePair_t *selectorPair = keyValuePair_create(strdup("app"), strdup(AppSelector)); + list_t *selectorlist = list_createList(); + list_addElement(selectorlist, selectorPair); + serviceSpec->selector = selectorlist; + serviceSpec->ports = servicePortList; + if (!serviceSpec->selector) { + fprintf(stderr, "Memory allocation for serviceSpec->selector failed\n"); + free(servicePort); + list_freeList(servicePortList); + free(serviceSpec); + return -1;} + + // Create a Service + v1_service_t *service = (v1_service_t*)calloc(1, sizeof(v1_service_t)); + if (!service) { + fprintf(stderr, "Memory allocation for service failed\n");} + service->api_version = strdup("v1"); + service->kind = strdup("Service"); + service->metadata = (v1_object_meta_t*)calloc(1, sizeof(v1_object_meta_t)); + service->metadata->name = strdup(ServiceName); + service->spec = serviceSpec; + char *response = NULL; + // Create the service in the specified namespace + CoreV1API_createNamespacedService(apiClient, "default", service, NULL, NULL, NULL,NULL); + // Check the response code + if(apiClient->response_code != 200 && apiClient->response_code != 201) { + fprintf(stderr, "Failed to create service: %ld\n", apiClient->response_code); + v1_service_free(service); + return -1; + }else{ + printf("Service created successfully\n"); + v1_service_free(service); + return 0;} + } + return -1; +} + +std::vector KubeHelper::get_workernode(){ + std::string filename = "/etc/config/kubeConfig.json"; + if (!std::filesystem::exists(filename)){ + filename = "/tmp/kubeconfig/kubeConfig.json"; + } + std::ifstream fileStream(filename); + if (!fileStream.is_open()) { + std::cerr << "Failed to open " << filename << std::endl; + } + Json::Value root; + Json::CharReaderBuilder builder; + JSONCPP_STRING errs; + if (!Json::parseFromStream(builder, fileStream, &root, &errs)) { + std::cerr << "Failed to parse JSON: " << errs << std::endl; + } + fileStream.close(); + std::vector node_names; + if (root.isMember("WorkerNodeDetail") && root["WorkerNodeDetail"].isArray()) { + const Json::Value workerNodeDetail = root["WorkerNodeDetail"]; + for (const auto& nodeDetail : workerNodeDetail) { + for (Json::ValueConstIterator it = nodeDetail.begin(); it != nodeDetail.end(); ++it) { + std::string nodeName = it.key().asString(); + std::string nodeIP = it->asString(); + node_names.push_back(nodeName); + } + } + } + else{ + std::cout<<"Can not parse the kubernetes config for VDMS"; + } + return node_names; +} + +// Orchestration Creator +std::string KubeHelper::k8s_objects_creator(char* appname,std::string node_name){ + KubeHelper k8s_object; + int status_pod = k8s_object.pod_creator(appname,node_name); + sleep_for(nanoseconds(10)); + sleep_until(system_clock::now() + seconds(5)); + if (status_pod!=-1){ + std::string app_str(appname); + std::string svc_name_str = app_str+"svc"; + char* svc_name = &svc_name_str[0]; + int status_svc = k8s_object.service_creator(svc_name,appname); + sleep_for(nanoseconds(10)); + sleep_until(system_clock::now() + seconds(2)); + if(status_svc!=-1){ + std::string url_final = svc_name_str +":5050/image"; + return url_final;}} +} + + +// Scheduler +std::string KubeHelper::query_scheduler(std::string mediaType){ + // Below logic creates parameters for creating new pods in cluster with help of kube-api-server + std::string url_final; + KubeHelper k8s_object; + + static std::vector worker_nodes_list = k8s_object.get_workernode(); + static int num_worker_node = worker_nodes_list.size(); + if((KubeHelper::query_counter)>=0 && (KubeHelper::query_counter)