From 75a373c98f654438eedcdcbf14140bf9b51d0823 Mon Sep 17 00:00:00 2001 From: Lalith Kota Date: Sun, 4 Feb 2024 14:11:30 +0000 Subject: [PATCH] GITBOOK-163: Deployment Documentation 1.2 Organize --- SUMMARY.md | 43 +++-- .../deployment-architecture.md | 0 .../external-components-setup/README.md | 16 ++ .../e-signet-deployment.md | 35 ++++ .../kafka-deployment.md | 24 +++ .../keycloak-deployment.md | 25 +++ .../keymanager-deployment.md | 27 +++ .../logging-and-opensearch-deployment.md | 67 +++++++ .../minio-deployment.md | 30 +++ .../odk-central-deployment.md | 42 +++++ .../postgresql-server-deployment.md | 25 +++ deployment/infrastructure-setup/README.md | 9 + .../k8s-cluster-requirements.md | 0 .../infrastructure-setup/k8s-cluster.md | 165 +++++++++++++++++ .../loadbalancer-setup.md | 10 + deployment/infrastructure-setup/nfs-server.md | 21 +++ .../infrastructure-setup/rancher.md | 50 ++++- .../wireguard-server-setup.md | 27 +++ .../openg2p-modules-deployment/README.md | 13 ++ .../gctb-deployment.md | 37 ++++ .../pbms-deployment/README.md | 39 ++++ .../post-install-instructions.md | 0 .../openg2p-modules-deployment/reporting.md | 57 ++++++ .../social-registry-deployment.md | 0 .../spar-deployment/README.md | 29 +++ .../spar-post-installation-configuration.md | 16 ++ guides/deployment-guide/README.md | 2 +- .../deployment-on-kubernetes/README.md | 173 ------------------ .../gctb-deployment.md | 2 - .../k8s-infrastructure-setup/README.md | 2 - .../k8s-infrastructure-setup/cluster-setup.md | 152 --------------- .../nfs-server-setup.md | 5 - .../pbms-deployment/README.md | 30 --- .../postgresql-server.md | 19 -- .../spar-deployment.md | 2 - .../ssl-certificates-using-letsencrypt.md | 0 .../g2p-cash-transfer-bridge/README.md | 4 +- .../social-payments-account-registry-spar.md | 21 ++- platform/releases/1.1.0/release-notes.md | 2 +- 39 files changed, 800 insertions(+), 421 deletions(-) rename {guides/deployment-guide => deployment}/deployment-architecture.md (100%) create mode 100644 deployment/external-components-setup/README.md create mode 100644 deployment/external-components-setup/e-signet-deployment.md create mode 100644 deployment/external-components-setup/kafka-deployment.md create mode 100644 deployment/external-components-setup/keycloak-deployment.md create mode 100644 deployment/external-components-setup/keymanager-deployment.md create mode 100644 deployment/external-components-setup/logging-and-opensearch-deployment.md create mode 100644 deployment/external-components-setup/minio-deployment.md create mode 100644 deployment/external-components-setup/odk-central-deployment.md create mode 100644 deployment/external-components-setup/postgresql-server-deployment.md create mode 100644 deployment/infrastructure-setup/README.md rename {guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup => deployment/infrastructure-setup}/k8s-cluster-requirements.md (100%) create mode 100644 deployment/infrastructure-setup/k8s-cluster.md create mode 100644 deployment/infrastructure-setup/loadbalancer-setup.md create mode 100644 deployment/infrastructure-setup/nfs-server.md rename guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/rancher-server-setup.md => deployment/infrastructure-setup/rancher.md (57%) create mode 100644 deployment/infrastructure-setup/wireguard-server-setup.md create mode 100644 deployment/openg2p-modules-deployment/README.md create mode 100644 deployment/openg2p-modules-deployment/gctb-deployment.md create mode 100644 deployment/openg2p-modules-deployment/pbms-deployment/README.md rename {guides/deployment-guide/deployment-on-kubernetes => deployment/openg2p-modules-deployment}/pbms-deployment/post-install-instructions.md (100%) create mode 100644 deployment/openg2p-modules-deployment/reporting.md rename {guides/deployment-guide/deployment-on-kubernetes => deployment/openg2p-modules-deployment}/social-registry-deployment.md (100%) create mode 100644 deployment/openg2p-modules-deployment/spar-deployment/README.md create mode 100644 deployment/openg2p-modules-deployment/spar-deployment/spar-post-installation-configuration.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/README.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/gctb-deployment.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/README.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/cluster-setup.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/nfs-server-setup.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/README.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/postgresql-server.md delete mode 100644 guides/deployment-guide/deployment-on-kubernetes/spar-deployment.md rename guides/deployment-guide/{deployment-on-kubernetes/k8s-infrastructure-setup => }/ssl-certificates-using-letsencrypt.md (100%) diff --git a/SUMMARY.md b/SUMMARY.md index 9d6bf44e..f21b6b96 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -60,6 +60,34 @@ * [Registration in Low Connectivity Areas](use-cases/use-cases/registration-in-low-connectivity-areas.md) * [Service Provider Reimbursement](use-cases/use-cases/service-provider-reimbursement.md) +## 🗄 DEPLOYMENT + +* [Deployment Architecture](deployment/deployment-architecture.md) +* [Infrastructure Setup](deployment/infrastructure-setup/README.md) + * [K8s Cluster Requirements](deployment/infrastructure-setup/k8s-cluster-requirements.md) + * [Wireguard Server Setup](deployment/infrastructure-setup/wireguard-server-setup.md) + * [Rancher Setup](deployment/infrastructure-setup/rancher.md) + * [NFS Server Setup](deployment/infrastructure-setup/nfs-server.md) + * [OpenG2P K8s Cluster Setup](deployment/infrastructure-setup/k8s-cluster.md) + * [Loadbalancer Setup](deployment/infrastructure-setup/loadbalancer-setup.md) +* [External Components Setup](deployment/external-components-setup/README.md) + * [PostgreSQL Server Deployment](deployment/external-components-setup/postgresql-server-deployment.md) + * [Keycloak Deployment](deployment/external-components-setup/keycloak-deployment.md) + * [Minio Deployment](deployment/external-components-setup/minio-deployment.md) + * [ODK Central Deployment](deployment/external-components-setup/odk-central-deployment.md) + * [Kafka Deployment](deployment/external-components-setup/kafka-deployment.md) + * [Logging & OpenSearch Deployment](deployment/external-components-setup/logging-and-opensearch-deployment.md) + * [Keymanager Deployment](deployment/external-components-setup/keymanager-deployment.md) + * [e-Signet Deployment](deployment/external-components-setup/e-signet-deployment.md) +* [OpenG2P Modules Deployment](deployment/openg2p-modules-deployment/README.md) + * [PBMS Deployment](deployment/openg2p-modules-deployment/pbms-deployment/README.md) + * [Post Install Configuration](deployment/openg2p-modules-deployment/pbms-deployment/post-install-instructions.md) + * [Social Registry Deployment](deployment/openg2p-modules-deployment/social-registry-deployment.md) + * [GCTB Deployment](deployment/openg2p-modules-deployment/gctb-deployment.md) + * [SPAR Deployment](deployment/openg2p-modules-deployment/spar-deployment/README.md) + * [SPAR Post Installation Configuration](deployment/openg2p-modules-deployment/spar-deployment/spar-post-installation-configuration.md) + * [Reporting](deployment/openg2p-modules-deployment/reporting.md) + ## 👨💻 DEVELOPER ZONE * [Getting Started](developer-zone/getting-started-1/README.md) @@ -188,22 +216,9 @@ * [Documentation Guidelines](guides/documentation-guides/documentation-guidelines.md) * [OpenG2P Module Doc Template](guides/documentation-guides/openg2p-module-doc-template.md) * [Deployment Guides](guides/deployment-guide/README.md) - * [Deployment Architecture](guides/deployment-guide/deployment-architecture.md) - * [Deployment on Kubernetes](guides/deployment-guide/deployment-on-kubernetes/README.md) - * [K8s Infrastructure Setup](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/README.md) - * [K8s Cluster Requirements](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/k8s-cluster-requirements.md) - * [K8s Cluster Setup](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/cluster-setup.md) - * [Rancher Server Setup](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/rancher-server-setup.md) - * [NFS Server Setup](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/nfs-server-setup.md) - * [SSL Certificates using Letsencrypt](guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/ssl-certificates-using-letsencrypt.md) - * [PostgreSQL Server](guides/deployment-guide/deployment-on-kubernetes/postgresql-server.md) - * [PBMS Deployment](guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/README.md) - * [Post Install Configuration](guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/post-install-instructions.md) - * [Social Registry Deployment](guides/deployment-guide/deployment-on-kubernetes/social-registry-deployment.md) - * [GCTB Deployment](guides/deployment-guide/deployment-on-kubernetes/gctb-deployment.md) - * [SPAR Deployment](guides/deployment-guide/deployment-on-kubernetes/spar-deployment.md) * [Giving Access to Users](guides/deployment-guide/access-to-deployed-setup.md) * [Packaging OpenG2P Docker](guides/deployment-guide/packaging-openg2p-docker.md) + * [SSL Certificates using Letsencrypt](guides/deployment-guide/ssl-certificates-using-letsencrypt.md) ## BLOG diff --git a/guides/deployment-guide/deployment-architecture.md b/deployment/deployment-architecture.md similarity index 100% rename from guides/deployment-guide/deployment-architecture.md rename to deployment/deployment-architecture.md diff --git a/deployment/external-components-setup/README.md b/deployment/external-components-setup/README.md new file mode 100644 index 00000000..97005bd4 --- /dev/null +++ b/deployment/external-components-setup/README.md @@ -0,0 +1,16 @@ +# External Components Setup + +## Introduction + +This guide provides instructions to deploy external components on the Kubernetes (K8s) cluster upon which OpenG2P components reply (Refer to [Deployment Architecture](../deployment-architecture.md)). + +| Module/Component | Comments | +| ------------------------------------------------------------ | ---------------------------------------------------------------------------------------- | +| [PostgreSQL](postgresql-server-deployment.md) | Required for all components. A single server instance may be used housing all databases. | +| [Keycloak](keycloak-deployment.md) | Required for PBMS, Social Registry | +| [MinIO](minio-deployment.md) | Required for PBMS and GCTB only | +| [ODK Central](odk-central-deployment.md) | Required for Registration Toolkit | +| [Kafka](kafka-deployment.md) | Required for Monitoring & Reporting | +| [Logging & OpenSearch](logging-and-opensearch-deployment.md) | Required for Monitoring & Reporting | +| [MOSIP Key Manager](keymanager-deployment.md) | Required for PBMS, Social Registry | +| [e-Signet](e-signet-deployment.md) | Required for SPAR and optionally for PBMS | diff --git a/deployment/external-components-setup/e-signet-deployment.md b/deployment/external-components-setup/e-signet-deployment.md new file mode 100644 index 00000000..7626268c --- /dev/null +++ b/deployment/external-components-setup/e-signet-deployment.md @@ -0,0 +1,35 @@ +# e-Signet Deployment + +## Introduction + +This doc provides instructions on installing e-Signet on the OpenG2P cluster. + +This is only required for sandbox/pilot environments. Or when e-Signet is not present or is not provided by the ID Provider. If an e-Signet instance is already available, OpenG2P Modules can just connect to that instance. + +This doc only provides instructions to install e-Signet with Mock ID System (for integration with real ID system, refer to [e-Signet docs](https://docs.esignet.io)). + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](postgresql-server-deployment.md) +* [Keycloak](keycloak-deployment.md) for API Authentication +* [Keymanager](keymanager-deployment.md) + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/esignet](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/esignet) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, e-Signet can be accessed at https://esignet.openg2p.sandbox.net, depending on the hostname given above. + +To seed more data of beneficiaries into the mock ID system APIs, use the APIs available at https://esignet.openg2p.sandbox.net/v1/mock-identity-system/swagger-ui/index.html. + +Or edit and use this script [https://github.com/OpenG2P/openg2p-data/blob/develop/scripts/upload\_data\_to\_mock\_esignet.py](https://github.com/OpenG2P/openg2p-data/blob/develop/scripts/upload\_data\_to\_mock\_esignet.py) to upload data. TODO: elaborate. diff --git a/deployment/external-components-setup/kafka-deployment.md b/deployment/external-components-setup/kafka-deployment.md new file mode 100644 index 00000000..b619b053 --- /dev/null +++ b/deployment/external-components-setup/kafka-deployment.md @@ -0,0 +1,24 @@ +# Kafka Deployment + +## Introduction + +Skip this if the [realtime reporting framework](https://github.com/openg2p/openg2p-reporting) is not being used. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/kafka](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/kafka) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, Kafka UI can be accessed at https://kafka.openg2p.sandbox.net, depending on the hostname given above. diff --git a/deployment/external-components-setup/keycloak-deployment.md b/deployment/external-components-setup/keycloak-deployment.md new file mode 100644 index 00000000..e8b58f83 --- /dev/null +++ b/deployment/external-components-setup/keycloak-deployment.md @@ -0,0 +1,25 @@ +# Keycloak Deployment + +## Introduction + +Keycloak is used in OpenG2P to provide single sign-on to some of the apps. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](postgresql-server-deployment.md) + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/keycloak](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/keycloak) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, Keycloak Admin console will be accessible at https://keycloak.openg2p.sandbox.net, depending on the hostname given above. diff --git a/deployment/external-components-setup/keymanager-deployment.md b/deployment/external-components-setup/keymanager-deployment.md new file mode 100644 index 00000000..3f4fccc4 --- /dev/null +++ b/deployment/external-components-setup/keymanager-deployment.md @@ -0,0 +1,27 @@ +# Keymanager Deployment + +## Introduction + +MOSIP's Keymanager component is used by some OpenG2P modules (like PBMS and social registry) to store keys and perform cryptography operations. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](postgresql-server-deployment.md) +* [Keycloak](keycloak-deployment.md) for API Authentication +* HSM. By default, Softhsm will be installed, unless real HSM is available. + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/keymanager](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/keymanager) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, Keymanager APIs will be accessible at https://openg2p.sandbox.net/v1/keymanager, depending on the hostname given above. diff --git a/deployment/external-components-setup/logging-and-opensearch-deployment.md b/deployment/external-components-setup/logging-and-opensearch-deployment.md new file mode 100644 index 00000000..2ac5a0b9 --- /dev/null +++ b/deployment/external-components-setup/logging-and-opensearch-deployment.md @@ -0,0 +1,67 @@ +# Logging & OpenSearch Deployment + +## Introduction + +Logs from different components present on the cluster will be pulled into OpenSearch to display dashboards and compute reports. Fluentd is used to pull capture logs and put into OpenSearch. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [Keycloak](keycloak-deployment.md) for Authentication and Sign-in to UI + +## Installation + +Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/logging](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/logging) directory. + +### Install OpenSearch (and related components) + +* Run this to install OpenSearch and related components. + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` +* After installation is successful, OpenSearch Dashboards will be accessible at https://opensearch.openg2p.sandbox.net, depending on the hostname given above. + +### Install Rancher Logging (Fluentd) + +1. On Rancher UI, navigate to Apps (or Apps & Marketplace) -> Charts +2. Search and install Logging from the list, with default values. + +### Add _Index State Policy_ on OpenSearch + +* Run this to add ISM Policy (This is responsible for automatically deleting logstash indices after 3 days. Configure the minimum age to delete indices, in the same script below.) + + ``` + ./opensearch-ism-script.sh + ``` + +### Configure Rancher FluentD + +* Run this to create _ClusterOutput_ (This is responsible for redirecting all logs to OpenSearch.) + + ``` + kubectl apply -f clusterflow-opensearch.yaml + ``` +* Run this to create a _ClusterFlow_ (This is responsible for filtering OpenG2P service logs, from the logs of all pods.) + + ``` + kubectl apply -f clusterflow-all.yaml + ``` + +### Filters + +Note the filters applied in [clusterflow-all.yaml](https://github.com/OpenG2P/openg2p-deployment/blob/main/kubernetes/logging/clusterflow-all.yaml). You may update the same for your install if required, and rerun the apply command. + +### Dashboards + +* TODO + +### TraceId + +* TODO + +### Troubleshooting + +* TODO diff --git a/deployment/external-components-setup/minio-deployment.md b/deployment/external-components-setup/minio-deployment.md new file mode 100644 index 00000000..da34b5e8 --- /dev/null +++ b/deployment/external-components-setup/minio-deployment.md @@ -0,0 +1,30 @@ +# Minio Deployment + +## Introduction + +MinIO is used by some components of OpenG2P store documents. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [Keycloak](keycloak-deployment.md) for Authentication and Sign-in to UI + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/minio](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/minio) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, MinIO console will be accessible at https://minio.openg2p.sandbox.net, depending on the hostname given above. + +Once OpenG2P PBMS is installed, do the following: + +* Navigate to OpenG2P Documents (From OpenG2P Menu) -> Document Store. +* Configure URL and password for this backend service (Like `http://minio.minio:9000`). Password and account-id/username can be obtained from the secrets in minio namespace. diff --git a/deployment/external-components-setup/odk-central-deployment.md b/deployment/external-components-setup/odk-central-deployment.md new file mode 100644 index 00000000..2a6d260f --- /dev/null +++ b/deployment/external-components-setup/odk-central-deployment.md @@ -0,0 +1,42 @@ +# ODK Central Deployment + +## Introduction + +ODK is used mainly by the Registration Toolkit to collect data offline and online. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](postgresql-server-deployment.md) + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/odk-central](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/odk-central) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME="openg2p.sandbox.net" \ + ./install.sh + ``` +* Note: The above helm chart uses the following docker images built from [https://github.com/getodk/central/tree/v2023.1.0](https://github.com/getodk/central/tree/v2023.1.0), since ODK Central doesn't provide pre-built docker images for these. + + ``` + openg2p/odk-central-backend:v2023.1.0 + openg2p/odk-central-frontend:v2023.1.0 + openg2p/odk-central-enketo:v2023.1.0 + ``` + +## Post-installation + +After installation is successful, ODK Central will be accessible at https://odk.openg2p.sandbox.net, depending on the hostname given above. + +To create the first user, do this (Subsequent users can be created through UI.): + +* Exec into the service pod, and create a user (and promote if required). + + ```bash + kubectl exec -it -- odk-cmd -u user-create + kubectl exec -it -- odk-cmd -u user-promote + ``` + diff --git a/deployment/external-components-setup/postgresql-server-deployment.md b/deployment/external-components-setup/postgresql-server-deployment.md new file mode 100644 index 00000000..d7dff345 --- /dev/null +++ b/deployment/external-components-setup/postgresql-server-deployment.md @@ -0,0 +1,25 @@ +# PostgreSQL Server Deployment + +## Introduction + +This guide provides instructions to install PostgreSQL Server on the Kubernetes cluster. However, if you already have PostgresSQL server installed, or are using Cloud hosted Postgres, then you may skip the server installation. The instructions to initialize OpenG2P component databases are provided as part of the component installation instructions. + +## Databases + +Module/component-wise listing of databases is given below + +
Module/ComponentDatabase Name
PBMSopeng2pdb
Keycloakkeycloakdb
ODKodkdb
SPARspardb
G2P Cash Transfer Bridgegctbdb
MOSIP Key Managermosip_keymgr
+ +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/postgresql](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/postgresql) directory. +* Run: + + ```bash + ./install.sh + ``` diff --git a/deployment/infrastructure-setup/README.md b/deployment/infrastructure-setup/README.md new file mode 100644 index 00000000..52bb7571 --- /dev/null +++ b/deployment/infrastructure-setup/README.md @@ -0,0 +1,9 @@ +# Infrastructure Setup + +| Infra | Comments | +| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| [Wireguard](wireguard-server-setup.md) | Only one per all the environments | +| [Rancher](rancher.md) | Only one per all the environments | +| [NFS Server](nfs-server.md) | One for each environment like sandbox, pilot, staging, production | +| [OpenG2P K8s Cluster](k8s-cluster.md) | One for each environment | +| [Loadbalancer](loadbalancer-setup.md) | One for each environment. For non cloud-native Kubernetes clusters either create a VM with Nginx or create a cloud LB. | diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/k8s-cluster-requirements.md b/deployment/infrastructure-setup/k8s-cluster-requirements.md similarity index 100% rename from guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/k8s-cluster-requirements.md rename to deployment/infrastructure-setup/k8s-cluster-requirements.md diff --git a/deployment/infrastructure-setup/k8s-cluster.md b/deployment/infrastructure-setup/k8s-cluster.md new file mode 100644 index 00000000..3ad93640 --- /dev/null +++ b/deployment/infrastructure-setup/k8s-cluster.md @@ -0,0 +1,165 @@ +# OpenG2P K8s Cluster Setup + +## Introduction + +OpenG2P modules and components are recommended to be run on Kubernetes (K8s), because of ease-of-use, management, and security features that K8s provides. + +This document provides instructions to set up a K8s Cluster on which OpenG2P Modules and other components can be installed. + +## Prerequisites + +* Hardware that meets [K8s Cluster Requirements](k8s-cluster-requirements.md). +* The following tools are installed on all the nodes and the client machine. + * `wget` , `curl` , `kubectl` , `istioctl` , `helm` , `jq` + +## Firewall Requirements + +* Set up firewall rules on each node according to the following table. The exact method to set up the firewall rules will vary from cloud to cloud and on-prem. (For example on AWS, EC2 security groups can be used. For on-prem cluster, ufw can be used. Etc.) + +
ProtocolPortShould be accessible by onlyDescription
TCP22SSH
TCP80Postgres ports
TCP443Postgres ports
TCP5432Postgres port
TCP9345RKE2 agent nodesKubernetes API
TCP6443RKE2 agent nodesKubernetes API
UDP8472RKE2 server and agent nodesRequired only for Flannel VXLAN
TCP10250RKE2 server and agent nodeskubelet
TCP2379RKE2 server nodesetcd client port
TCP2380RKE2 server nodesetcd peer port
TCP30000:32767RKE2 server and agent nodesNodePort port range
+ +* For example; this is how you can use `ufw` to set up the firewall on each cluster node. + * SSH into each node, and change to superuser. + * Run the following command for each rule in the above table. + + ``` + ufw allow from to any port proto + ``` + * Example: + + ``` + ufw allow from any to any port 22 proto tcp + ufw allow from 10.3.4.0/24 to any port 9345 proto tcp + ``` + * Enable ufw: + + ``` + ufw enable + ufw default deny incoming + ``` +* Additional Reference: [RKE2 Networking Requirements](https://docs.rke2.io/install/requirements#networking) + +## Installation + +### On AWS Cloud + +If you are using AWS only to get EC2 nodes, and you want to set up the K8s cluster manually, move to the [On-premises Setup](k8s-cluster.md#on-premises-cluster-on-prem). + +TODO + +### On-premises Cluster (on-prem) + +#### K8s Cluster Setup + +The following section uses [RKE2](https://docs.rke2.io) to set up the K8s cluster. + +* Decide the number of K8s Control plane nodes(server nodes) and worker nodes(agent nodes). + * Choose an odd number of control-plane nodes. For example, for a 3-node k8s cluster, choose 1 control-plane node and 2 worker nodes. For a 7-node k8s cluster, choose 3 control-plane nodes and 4 worker nodes. +* The following setup has to be done on each node on the cluster: + * SSH into the node. + * Create the rke2 config directory: + + ``` + mkdir -p /etc/rancher/rke2 + ``` + * Create a `config.yaml` file in the above directory, using one of the following config file templates: + * For the first control-plane node, use [rke2-server.conf.primary.template](https://github.com/OpenG2P/openg2p-deployment/blob/main/kubernetes/rke2/rke2-server.conf.primary.template). + * For subsequent control-plane nodes, use [rke2-server.conf.subsequent.template](https://github.com/OpenG2P/openg2p-deployment/blob/main/kubernetes/rke2/rke2-server.conf.primary.template). + * For worker nodes, use [rke2-agent.conf.template](https://github.com/OpenG2P/openg2p-deployment/blob/main/kubernetes/rke2/rke2-agent.conf.template). + * Edit the above `config.yaml` file with the appropriate names, IPs, and tokens. + * Run this to download rke2. + + ``` + curl -sfL https://get.rke2.io | sh - + ``` + * Run this to start rke2: + * On the control-plane node, run: + + ``` + systemctl enable rke2-server + systemctl start rke2-server + ``` + * On the worker node, run: + + ``` + systemctl enable rke2-agent + systemctl start rke2-agent + ``` +* To export KUBECONFIG, run (only on control-plane nodes): + * ``` + echo -e 'export PATH="$PATH:/var/lib/rancher/rke2/bin"\nexport KUBECONFIG="/etc/rancher/rke2/rke2.yaml"' >> ~/.bashrc + source ~/.bashrc + ``` + * ``` + kubectl get nodes + ``` +* Additional Reference: [RKE2 High Availability Installation](https://docs.rke2.io/install/ha). + +#### Cluster import into Rancher. + +This section assumes a Rancher server has already been set up and operational. [Rancher Server Setup](rancher.md) in case not already done. + +* Navigate to Cluster Management section in Rancher. +* Click on `Import Existing` cluster. And follow the steps to import the new OpenG2P cluster. +* After importing, download kubeconfig for the new cluster from rancher (top right on the main page), to access the cluster through kubectl from user's machine (client), without SSH. + +#### NFS provisioner setup + +This section assumes an NFS Server has already been set up and operational for providing storage volumes to this K8s cluster, with requirements as given in [NFS Server Setup](nfs-server.md)This section assumes an NFS server has already been set up and operational, which meets the requirements, as given in [NFS Server Setup](nfs-server.md). This NFS server is used to provide persistent storage volumes to this K8s cluster. + +#### Longhorn setup + +Recommended to use NFS provisioner. Ignore this section if using NFS. + +Use this to install Longhorn. [Longhorn Install as a Rancher App](https://longhorn.io/docs/1.3.2/deploy/install/install-with-rancher/) + +#### Istio setup + +* The following setup can be done from the client machine. This install Istio Operator, Istio Service Mesh, Istio Ingressgateway components. +* From [kubernetes/istio](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/istio) directory, configure the istio-operator.yaml, and run; + + ``` + istioctl operator init + kubectl apply -f istio-operator.yaml + ``` + + * If an external Loadbalancer is being used, then use the `istio-operator-external-lb.yaml` file. + + ``` + kubectl apply -f istio-operator-external-lb.yaml + ``` + * Configure the operator.yaml with any further configuration. +* Gather Wildcard TLS certificate and key and run; + + ``` + kubectl create secret tls tls-openg2p-ingress -n istio-system \ + --cert= \ + --key= + ``` +* Create istio gateway for all hosts using this command: + + ``` + kubectl apply -f istio-gateway.yaml + ``` + + * If using external loadbalancer/external TLS termination, use the `istio-gateway-no-tls.yaml` file. + + ``` + kubectl apply -f istio-gateway-no-tls.yaml + ``` + +#### Adding new nodes + +* From [kubernetes/rke2](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/rke2) directory, take either the `rke2-server.conf.subsequent.template` or `rke2-agent.conf.template` based on whether the new node is control plane node or Worker node. Copy this file to `/etc/rancher/rke2/config.yaml` in the new node. +* Configure the the config.yaml with relevant values. +* Run this to download rke2. + + ``` + curl -sfL https://get.rke2.io | sh - + ``` +* Run this to start rke2 node: + + ``` + systemctl enable rke2-server + systemctl start rke2-server + ``` diff --git a/deployment/infrastructure-setup/loadbalancer-setup.md b/deployment/infrastructure-setup/loadbalancer-setup.md new file mode 100644 index 00000000..1f047d19 --- /dev/null +++ b/deployment/infrastructure-setup/loadbalancer-setup.md @@ -0,0 +1,10 @@ +# Loadbalancer Setup + +## Introduction + +## Installation + +### On AWS + +### On-prem + diff --git a/deployment/infrastructure-setup/nfs-server.md b/deployment/infrastructure-setup/nfs-server.md new file mode 100644 index 00000000..931b5ab3 --- /dev/null +++ b/deployment/infrastructure-setup/nfs-server.md @@ -0,0 +1,21 @@ +# NFS Server Setup + +## Introduction + +NFS-based storage is recommended for providing persistent storage volumes to Kubernetes Clusters and backing up data of sandbox/pilot environments. + +## Prerequisites + +* One Virtual machine running on the same network as the rest of the nodes, and is accessible by them. For recommended configuration of the VM refer to [Cluster Requirements](k8s-cluster-requirements.md). +* Use this [Storage size estimator](k8s-cluster-requirements.md#storage-requirements-for-pilot-environments) to decide storage requirements. + +## Installation + +* Download/copy this install script from [https://github.com/mosip/k8s-infra/blob/main/nfs/install-nfs-server.sh](https://github.com/mosip/k8s-infra/blob/main/nfs/install-nfs-server.sh) into the NFS Server VM. +* Edit the script to change the local path for NFS Storage, under the variable `nfsStorage`. +* Run this (with root privileges): + + ``` + ./install-nfs-server.sh + ``` +* Make sure to edit the firewall rules of this VM to enable incoming traffic to the NFS server port `tcp 2049` and disable incoming traffic on all other ports (excluding SSH). diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/rancher-server-setup.md b/deployment/infrastructure-setup/rancher.md similarity index 57% rename from guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/rancher-server-setup.md rename to deployment/infrastructure-setup/rancher.md index d5c39051..7ea218f5 100644 --- a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/rancher-server-setup.md +++ b/deployment/infrastructure-setup/rancher.md @@ -1,20 +1,48 @@ -# Rancher Server Setup +# Rancher Setup ## Introduction Rancher is used to manage multiple clusters. Being a critical component of cluster administration it is highly recommended that Rancher itself runs on a Kubernetes cluster with sufficient replication for high availability and avoiding a single point of failure. -## Kubernetes cluster setup +## Prerequisites -* Set up a new RKE2 cluster. Refer to the [K8s Cluster Setup](cluster-setup.md) guide. - * Do not remove the stock ingress controller in the server config. - * No need to install Istio. +* One Virtual machine running on the same network as the rest of the nodes, and has access to them. For recommended configuration of the VM refer to [Cluster Requirements](k8s-cluster-requirements.md). + +## Installation using Kubernetes + +### Kubernetes cluster setup + +* SSH into the node. +* Create the rke2 config directory: + + ``` + mkdir -p /etc/rancher/rke2 + ``` +* Create a `config.yaml` file in the above directory, using this config file template; [rke2-server.conf.primary.template](https://github.com/OpenG2P/openg2p-deployment/blob/main/kubernetes/rke2/rke2-server.conf.primary.template). +* Edit the above config.yaml file with the appropriate names and IPs. IMPORTANT: Remove the section for disabling ingress-nginx in this config file. + + ``` + curl -sfL https://get.rke2.io | sh - + ``` +* Start rke2 using this + + ``` + systemctl enable rke2-server + ``` +* Download and install `kubectl` and `helm`. And execute this: + * ``` + echo -e 'export PATH="$PATH:/var/lib/rancher/rke2/bin"\nexport KUBECONFIG="/etc/rancher/rke2/rke2.yaml"' >> ~/.bashrc + source ~/.bashrc + ``` + * ``` + kubectl get nodes + ``` {% hint style="info" %} It is recommended to set up a double-node cluster for high availability. However, for the non-production environments, you may create a single node cluster to conserve resources {% endhint %} -## Rancher installation +### Rancher installation * To install Rancher use this (hostname to be edited in the below command): @@ -36,11 +64,11 @@ It is recommended to set up a double-node cluster for high availability. However --key=path/to/key/file ``` -## Longhorn Setup +### Longhorn Setup * Install[ Longhorn as a Rancher App](https://longhorn.io/docs/1.3.2/deploy/install/install-with-rancher/). -## Keycloak setup +### Keycloak setup * From [kubernetes/rancher](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/rancher) folder, run the following to install Keycloak (hostname to be edited in the below command). * ```bash @@ -55,6 +83,10 @@ It is recommended to set up a double-node cluster for high availability. However -f keycloak-values.yaml ``` -## Integrate Rancher and Keycloak +### Integrate Rancher and Keycloak Integrate Rancher and Keycloak using [Rancher Auth - Keycloak (SAML)](https://docs.ranchermanager.rancher.io/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-saml) guide. + +## Installation using Docker + +Refer to [Installing Rancher using Docker](https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker) guide. diff --git a/deployment/infrastructure-setup/wireguard-server-setup.md b/deployment/infrastructure-setup/wireguard-server-setup.md new file mode 100644 index 00000000..a1a93550 --- /dev/null +++ b/deployment/infrastructure-setup/wireguard-server-setup.md @@ -0,0 +1,27 @@ +# Wireguard Server Setup + +## Introduction + +Wireguard is the recommended VPN to get private channel access to your OpenG2P clusters and resources. Wireguard is a fast secure & open-source VPN, with P2P traffic encryption. + +The rest of the document talks about setting up a Wireguard bastion host (Wireguard server) to enable a private channel to your network. + +## Prerequisites + +* One Virtual machine running on the same network as the rest of the nodes, and has access to them. For recommended configuration of the VM refer to [Cluster Requirements](k8s-cluster-requirements.md). +* Docker installed on the VM. + +## Installation + +* Clone the [openg2p-deployment](https://github.com/OpenG2P/openg2p-deployment) repo and navigate to the [kubernetes/wireguard](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/wireguard) directory. +* Run this with root privileges: + + ```bash + ./wg.sh + ``` +* For Example: + + ```bash + ./wg.sh wireguard 10.15.0.0/16 51820 200 172.16.0.0/24 + ``` +* Make sure to edit the firewall rules of this VM to enable incoming traffic on the above UDP port (Default 51820) and disable incoming traffic on all other ports (excluding SSH). diff --git a/deployment/openg2p-modules-deployment/README.md b/deployment/openg2p-modules-deployment/README.md new file mode 100644 index 00000000..a47508c7 --- /dev/null +++ b/deployment/openg2p-modules-deployment/README.md @@ -0,0 +1,13 @@ +# OpenG2P Modules Deployment + +## Introduction + +This guide provides instructions to deploy OpenG2P components on the Kubernetes (K8s) cluster (Refer to [Deployment Architecture](../deployment-architecture.md)). The following components are installed: + +| Module/Component | Comments | +| -------------------------- | --------------------------------------------------------------------- | +| [PBMS](pbms-deployment/) | Helm chart that installs Odoo, SMTP server | +| [SPAR](spar-deployment/) | Helm chart that installs ID Mapper, Self Service Portal, SPAR Service | +| [GCTB](gctb-deployment.md) | Helm chart | +| [Reporting](reporting.md) | Helm charts | + diff --git a/deployment/openg2p-modules-deployment/gctb-deployment.md b/deployment/openg2p-modules-deployment/gctb-deployment.md new file mode 100644 index 00000000..bd3f3da2 --- /dev/null +++ b/deployment/openg2p-modules-deployment/gctb-deployment.md @@ -0,0 +1,37 @@ +# GCTB Deployment + +## Introduction + +Refer to [GCTB Concepts](../../platform/modules/g2p-cash-transfer-bridge/) to understand more. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](./#postgresql) + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/g2p-cash-transfer-bridge](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/g2p-cash-transfer-bridge) directory. +* Run: (This installs the reference package dockers) + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + ./install.sh + ``` +* Clone the [https://github.com/OpenG2P/g2p-cash-transfer-bridge](https://github.com/OpenG2P/g2p-cash-transfer-bridge/) repo and navigate to [gctb-mojaloop-sdk-payment-backend](https://github.com/OpenG2P/g2p-cash-transfer-bridge/tree/develop/gctb-mojaloop-sdk-payment-backend). And run: + + ```bash + kubectl -n gctb create cm gctb-mojaloop-scripts --from-file=payment_backend.py + kubectl apply -f k8s-mojaloop-payment-backend.yaml + ``` +* Navigate to [gctb-simple-mpesa-payment-backend](https://github.com/OpenG2P/g2p-cash-transfer-bridge/tree/develop/gctb-simple-mpesa-payment-backend). + + ```bash + kubectl -n gctb create cm gctb-simple-mpesa-scripts --from-file=payment_backend.py + kubectl apply -f k8s-simple-mpesa-payment-backend.yaml + ``` + +## Post-installation + +TODO diff --git a/deployment/openg2p-modules-deployment/pbms-deployment/README.md b/deployment/openg2p-modules-deployment/pbms-deployment/README.md new file mode 100644 index 00000000..e5e94959 --- /dev/null +++ b/deployment/openg2p-modules-deployment/pbms-deployment/README.md @@ -0,0 +1,39 @@ +# PBMS Deployment + +## Introduction + +OpenG2P PBMS is based on Odoo. + +If you want to customize the Odoo addons in your OpenG2P PBMS, create a custom-packaged PBMS docker, using [Packaging Instructions](../../../guides/deployment-guide/packaging-openg2p-docker.md). \[Optional] + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](../#postgresql) +* [MinIO](../#minio) + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/openg2p](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/openg2p) directory. +* Run: (This installs the reference package dockers) + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + ./install.sh + ``` + + * If use already have a custom-packaged docker image or tag use: + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + OPENG2P_ODOO_IMAGE_REPO= \ + OPENG2P_ODOO_IMAGE_TAG= \ + ./install.sh + ``` + +## Post-installation + +After installation is successful, PBMS will be accessible directly at https://openg2p.sandbox.net, depending on the hostname given above. + +Refer to [Post Install Configuration](post-install-instructions.md) diff --git a/guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/post-install-instructions.md b/deployment/openg2p-modules-deployment/pbms-deployment/post-install-instructions.md similarity index 100% rename from guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/post-install-instructions.md rename to deployment/openg2p-modules-deployment/pbms-deployment/post-install-instructions.md diff --git a/deployment/openg2p-modules-deployment/reporting.md b/deployment/openg2p-modules-deployment/reporting.md new file mode 100644 index 00000000..596921b7 --- /dev/null +++ b/deployment/openg2p-modules-deployment/reporting.md @@ -0,0 +1,57 @@ +# Reporting + +## Introduction + +There are two models of generating Reporting and visualizing dashboards in OpenG2P currently. One is through [Apache Superset](https://superset.apache.org/) (which performs direct SQL queries on the Database to generate reports). Second is through [reporting framework](https://github.com/openg2p/openg2p-reporting), extended from MOSIP (which replicates all the data into OpenSearch in real-time). Weigh the pros and cons of both approaches here, before choosing one model. + +TODO + +## Installation using Superset + +WIP + +### Prerequisites + +* [PostgreSQL](../external-components-setup/postgresql-server-deployment.md) +* [Keycloak](../external-components-setup/keycloak-deployment.md) for Authentication and Sign-in to OpenSearch Dashboards + +### Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/superset](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/superset) directory. +* Run: + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + ./install.sh + ``` + +### Post-installation + +After installation is successful, Superset can be accessed at https://superset.openg2p.sandbox.net, depending on the hostname given above. + +Follow instructions given here to install sample [dashboards](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/superset/dashboards). + +## Installation using Reporting Framework + +WIP + +### Prerequisites + +* [PostgreSQL](../external-components-setup/postgresql-server-deployment.md) +* [Kafka](../external-components-setup/kafka-deployment.md) +* [Keycloak](../external-components-setup/keycloak-deployment.md) for Authentication and Sign-in to OpenSearch Dashboards +* [OpenSearch](../external-components-setup/logging-and-opensearch-deployment.md) + +### Installation + +* Clone the [https://github.com/openg2p/openg2p-reporting](https://github.com/openg2p/openg2p-reporting) repository, and navigate to [scripts](https://github.com/openg2p/openg2p-reporting/tree/develop/scripts) folder. +* Run: + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + ./install.sh + ``` + +### Post-installation + +* Import Sample Dashboards from [dashboards](https://github.com/openg2p/openg2p-reporting/tree/develop/dashboards) folder into OpenSearch Dashboards through UI. diff --git a/guides/deployment-guide/deployment-on-kubernetes/social-registry-deployment.md b/deployment/openg2p-modules-deployment/social-registry-deployment.md similarity index 100% rename from guides/deployment-guide/deployment-on-kubernetes/social-registry-deployment.md rename to deployment/openg2p-modules-deployment/social-registry-deployment.md diff --git a/deployment/openg2p-modules-deployment/spar-deployment/README.md b/deployment/openg2p-modules-deployment/spar-deployment/README.md new file mode 100644 index 00000000..7a27f33d --- /dev/null +++ b/deployment/openg2p-modules-deployment/spar-deployment/README.md @@ -0,0 +1,29 @@ +# SPAR Deployment + +## Introduction + +SPAR deployment comprises of mainly 3 microservices, SPAR Service (which serves the self-service APIs), SPAR G2P Connect ID Mapper (which SPAR Service will connect to), and Self Service Portal UI (which deals JS content for frontend.) The following script will install all three of them. + +## Prerequisites + +* The following utilities/tools must be present on the user's machine. + * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. +* [PostgreSQL](../#postgresql) +* SPAR Self Service Portal needs an e-Signet instance to allow login through national ID. To install eSignet on the OpenG2P K8s cluster with mock ID system, use the [e-Signet guide](../../external-components-setup/e-signet-deployment.md). + +## Installation + +* Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/openg2p/openg2p-deployment) repo and navigate to [kubernetes/social-payments-account-registry](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/social-payments-account-registry) directory. +* Configure the values.yaml in this folder according to the components needed. Go over the comments to check what can be added/edited/removed. +* Run: + + ```bash + SANDBOX_HOSTNAME=openg2p.sandbox.net \ + ./install.sh + ``` + +## Post-installation + +After installation, SPAR Self Service portal will be accessible at https://spar.openg2p.sandbox.net, SPAR Service APIs will be accessible at https://spar.openg2p.sandbox.net/spar/v1, and SPAR ID Mapper APIs will be accessible at https://spar.openg2p.sandbox.net/mapper/v1, depending on the hostname given above. + +Follow [SPAR Post Installation](spar-post-installation-configuration.md) Guide to finish setup. diff --git a/deployment/openg2p-modules-deployment/spar-deployment/spar-post-installation-configuration.md b/deployment/openg2p-modules-deployment/spar-deployment/spar-post-installation-configuration.md new file mode 100644 index 00000000..65674f00 --- /dev/null +++ b/deployment/openg2p-modules-deployment/spar-deployment/spar-post-installation-configuration.md @@ -0,0 +1,16 @@ +# SPAR Post Installation Configuration + +## Post-installation + + + +Onboard SPAR on eSignet: + +* Create OIDC Client for SPAR in e-Signet. Follow the method suggested by the ID Provider. + * If using mock e-Signet, use this API to create OIDC client [https://esignet.dev.openg2p.net/v1/esignet/swagger-ui/index.html#/client-management-controller/createOAuthClient](https://esignet.dev.openg2p.net/v1/esignet/swagger-ui/index.html#/client-management-controller/createOAuthClient). +* During OIDC client creation, you will be asked for (or given) a client ID and private key JWK as client secret. +* Edit the SPAR DB, `login_provider` table and modify the `authorization_parameters` row of the first entry, with: + * appropriate URLs for `authorize_endpoint` , `token_endpoint` , `validate_endpoint`, `jwks_endpoint`, and `redirect_uri` fields. + * above client ID under the `client_id` field. + * and above private key jwk under the `client_assertion_jwk` field. +* Seed/edit metadata of banks, wallets, branches, etc for the SPAR self-service portal in database. TODO: Elaborate. diff --git a/guides/deployment-guide/README.md b/guides/deployment-guide/README.md index f5ca38a0..dd19bde2 100644 --- a/guides/deployment-guide/README.md +++ b/guides/deployment-guide/README.md @@ -2,6 +2,6 @@ ## Introduction -To deploy OpenG2P for sandbox, staging and production environments refer to the [Deployment on Kubernetes](deployment-on-kubernetes/) guide. +To deploy OpenG2P for sandbox, staging and production environments refer to the [Deployment on Kubernetes](../../deployment/openg2p-modules-deployment/) guide. To install OpenG2P on your work machine for development refer to the [Getting Started](../../developer-zone/getting-started-1/) guide in the Developer Zone. diff --git a/guides/deployment-guide/deployment-on-kubernetes/README.md b/guides/deployment-guide/deployment-on-kubernetes/README.md deleted file mode 100644 index ad460da3..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# Deployment on Kubernetes - -## Introduction - -The guide here provides instructions to deploy OpenG2P components on Kubernetes (K8s) cluster (Refer to [Deployment Architecture](../deployment-architecture.md)). The following components are installed: - -| Module/Component | Comments | -| -------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -|

Kubernetes cluster

Rancher

NFS server

| Required for all components as this is the common infrastructure used by all. | -| PostgreSQL | Required for all components. A single server instance may be used housing all databases. | -| Keycloak | Required for PBMS, Social Registry | -| MinIO | Required for PBMS and GCTB only | -| ODK Central | Required for Registration Toolkit | -| Kafka | Required for Monitoring & Reporting | -| Logging & OpenSearch | Required for Monitoring & Reporting | -| MOSIP Key Manager | Required for PBMS, Social Registry | -| e-Signet | Required for SPAR and optionally for PBMS | -| OpenG2P PBMS | Helm chart that installs Odoo, SMTP server | -| OpenG2P SPAR | Helm chart that installs ID Mapper, Self Service Portal, SPAR Service | -| OpenG2P GCTB | Helm chart | -| Reporting | Helm charts | - -## Prerequisites - -* K8s infrastructure is set up as given [here](k8s-infrastructure-setup/). -* The following utilities/tools must be present on the user's machine. - * `kubectl`, `istioctl`, `helm`, `jq`, `curl`, `wget`, `git`, `bash`, `envsubst`. - -## Installation - -Clone the [https://github.com/openg2p/openg2p-deployment](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0) repository, and continue the installation of each of the following components from the [kubernetes](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes) directory. - -Choose and install the components needed for your cluster from the following. - -### Kubernetes Infra Setup - -TODO - -### PostgreSQL - -* Navigate to [kubernetes/postgresql ](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/postgresql)directory. -* Run: - - ```bash - ./install.sh - ``` - -### Keycloak - -* Prerequisites: - * [PostgreSQL](./#postgresql) \[REQUIRED]. -* Navigate to [kubernetes/keycloak](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/keycloak) directory. -* Run: - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` -* After installation is successful, Keycloak Admin console will be accessible at https://keycloak.openg2p.sandbox.net, depending on the hostname given above. - -### MinIO - -* Navigate to [kubernetes/minio](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/minio) directory. -* Run: - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` -* After installation is successful, MinIO console will be accessible at https://minio.openg2p.sandbox.net, depending on the hostname given above. -* Post-installation: - * Once OpenG2P PBMS is installed, do the following: - * Navigate to OpenG2P Documents (From OpenG2P Menu) -> Document Store. - * Configure URL and password for this backend service (Like `http://minio.minio:9000`). Password and account-id/username can be obtained from the secrets in minio namespace. - -### ODK Central - -* Prerequisites: - * [PostgreSQL](./#postgresql) \[REQUIRED]. -* Navigate to [kubernetes/odk-central](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/odk-central) directory. -* Run the following to install ODK helm chart. - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` -* After installation is successful, ODK Central will be accessible at https://odk.openg2p.sandbox.net, depending on the hostname given above. -* Note: The above helm chart uses the following docker images built from [https://github.com/getodk/central/tree/v2023.1.0](https://github.com/getodk/central/tree/v2023.1.0), since ODK Central doesn't provide pre-built docker images for these. - - ``` - openg2p/odk-central-backend:v2023.1.0 - openg2p/odk-central-frontend:v2023.1.0 - openg2p/odk-central-enketo:v2023.1.0 - ``` -* Post-installation: - * Exec into the service pod, and create a user (and promote if required). - - ```bash - kubectl exec -it -- odk-cmd -u user-create - kubectl exec -it -- odk-cmd -u user-promote - ``` - -### Kafka - -* Navigate to [kubernetes/kafka](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/kafka) directory. -* Run: - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` -* After installation is successful, Kafka UI can be accessed at https://kafka.openg2p.sandbox.net, depending on the hostname given above. - -### Logging and OpenSearch - -* Navigate to [kubernetes/logging](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/logging) directory. -* Run: - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` -* After installation is successful, OpenSearch Dashboards will be accessible at https://opensearch.openg2p.sandbox.net, depending on the hostname given above. -* Post-installation: TODO - -### Keymanager Deployment - -TODO - -### Mock e-Signet Deployment - -TODO - -### PBMS - -TODO - -### Social Payments Account Registry Deployment - -TODO - -### G2P Cash Transfer Bridge Deployment - -TODO - -### Reporting - -* Prerequisites: - * Kafka \[REQUIRED] - * PostgreSQL \[REQUIRED] - * OpenG2P \[REQUIRED] - * Logging \[REQUIRED]. (At least Elasticsearch is required) -* Clone [https://github.com/OpenG2P/openg2p-reporting](https://github.com/OpenG2P/openg2p-reporting/tree/1.1.0). -* Navigate to [scripts](https://github.com/OpenG2P/openg2p-reporting/tree/1.1.0/scripts) directory inside the above reporting repo. -* Run the following to install reporting - - ```sh - ./install.sh - ``` -* Do the following to import the dashboards present in [dashboards](https://github.com/OpenG2P/openg2p-reporting/tree/1.1.0/dashboards) folder: - * Navigate to Kibana Stack Management -> Kibana Section -> Saved Objects. - * Import all files in [dashboards](https://github.com/OpenG2P/openg2p-reporting/tree/1.1.0/dashboards) folder. - -### All - -WIP. If you wish to install all the components, run this from the [kubernetes](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes) directory.: - -``` -SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install-all.sh -``` - -This includes the following components : TODO diff --git a/guides/deployment-guide/deployment-on-kubernetes/gctb-deployment.md b/guides/deployment-guide/deployment-on-kubernetes/gctb-deployment.md deleted file mode 100644 index c1df2745..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/gctb-deployment.md +++ /dev/null @@ -1,2 +0,0 @@ -# GCTB Deployment - diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/README.md b/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/README.md deleted file mode 100644 index b8be31c3..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# K8s Infrastructure Setup - diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/cluster-setup.md b/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/cluster-setup.md deleted file mode 100644 index f8b7ee95..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/cluster-setup.md +++ /dev/null @@ -1,152 +0,0 @@ -# K8s Cluster Setup - -## Introduction - -The following guide uses [RKE2](https://docs.rke2.io) to set up the Kubernetes (K8s) cluster. - -## Prerequisites - -* The requirements for setting up the cluster are met as given [here](k8s-cluster-requirements.md). -* The following tools are installed on all the nodes and the client machine. - * `ufw` , `wget` , `curl` , `kubectl` , `istioctl` , `helm` , `jq` - -## Firewall setup - -* Set up firewall rules on each node. The following uses `ufw` to setup firewall. - - * SSH into each node, and change to superuser. - * Run the following command for each rule in the following table - - ``` - ufw allow from to any port proto - ``` - - * Example - - ``` - ufw allow from any to any port 22 proto tcp - ufw allow from 10.3.4.0/24 to any port 9345 proto tcp - ``` - - * Enable ufw. - - ```bash - ufw enable - ufw default deny incoming - ``` - - * Additional Reference: [RKE2 Networking Requirements](https://docs.rke2.io/install/requirements#networking) - -
ProtocolPortShould be accessible by onlyDescription
TCP22SSH
TCP80Postgres ports
TCP443Postgres ports
TCP5432Postgres port
TCP9345RKE2 agent nodesKubernetes API
TCP6443RKE2 agent nodesKubernetes API
UDP8472RKE2 server and agent nodesRequired only for Flannel VXLAN
TCP10250RKE2 server and agent nodeskubelet
TCP2379RKE2 server nodesetcd client port
TCP2380RKE2 server nodesetcd peer port
TCP30000:32767RKE2 server and agent nodesNodePort port range
- -## K8s setup - -* The following setup has to be done for each cluster node. -* Choose odd number of server nodes. Example if there are 3 nodes, choose 1 server node and two agent nodes. If there are 7 nodes, choose 3 server nodes and 4 agent nodes. -* Clone the [https://github.com/OpenG2P/openg2p-deployment](https://github.com/OpenG2P/openg2p-deployment) and go to [kuberenetes/rke2](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/rke2) directory. -* For the first server node: - * Configure `rke2-server.conf.primary.template`, - * SSH into the node. Place the file to this path: `/etc/rancher/rke2/config.yaml`. Create the directory if not present already. `mkdir -p /etc/rancher/rke2` . - * Run this to download rke2. - - ``` - curl -sfL https://get.rke2.io | sh - - ``` - * Run this to start rke2 server: - - ``` - systemctl enable rke2-server - systemctl start rke2-server - ``` -* For subsequent server and agent nodes: - * Configure `rke2-server.conf.subsequent.template` or `rke2-agent.conf.template`, with relevant ips for each node. - * SSH into each node place the relevant file to this path: `/etc/rancher/rke2/config.yaml`, based on whether its a worker node, or control-plane node. (If worker use agent file. If control-plane use server file). - * Run this to get download rke2. - - ``` - curl -sfL https://get.rke2.io | sh - - ``` - * To start rke2, use this - - ``` - systemctl enable rke2-server - systemctl start rke2-server - ``` - - or, based on server or agent. - - ``` - systemctl enable rke2-agent - systemctl start rke2-agent - ``` -* Execute these commands on a server node. - * ``` - echo -e 'export PATH="$PATH:/var/lib/rancher/rke2/bin"\nexport KUBECONFIG="/etc/rancher/rke2/rke2.yaml"' >> ~/.bashrc - source ~/.bashrc - ``` - * ``` - kubectl get nodes - ``` -* Additional Reference: [RKE2 High Availabilty Installation](https://docs.rke2.io/install/ha) - -## Cluster import into Rancher. - -* This section assumes a Rancher server has already been setup and operational. [Rancher Server Setup](rancher-server-setup.md) in case not already done. -* Navigate to Cluster Management section in Rancher. -* Click on `Import Existing` cluster. And follow the steps to import the newly created cluster. -* After Rancher import, do not use the the kubeconfig from server anymore. Use it only via downloading kubeconfig from rancher. - -## Longhorn setup - -* Use this to install longhorn. [Longhorn Install as a Rancher App](https://longhorn.io/docs/1.3.2/deploy/install/install-with-rancher/) - -## Istio setup - -* The following setup can be done from the client machine. This install Istio Operator, Istio Service Mesh, Istio Ingressgateway components. -* From [kuberenetes/istio](https://github.com/OpenG2P/openg2p-deployment/tree/main/kubernetes/istio) directory, configure the istio-operator.yaml, and run; - - ``` - istioctl operator init - kubectl apply -f istio-operator.yaml - ``` - - * If an external Loadbalancer is being used, then use the `istio-operator-external-lb.yaml` file. - - ``` - kubectl apply -f istio-operator-external-lb.yaml - ``` - * Configure the operator.yaml with any further configuration. -* Gather Wildcard TLS certificate and key and run; - - ``` - kubectl create secret tls tls-openg2p-ingress -n istio-system \ - --cert= \ - --key= - ``` -* Create istio gateway for all hosts using this command: - - ``` - kubectl apply -f istio-gateway.yaml - ``` - - * If using external loadbalancer/external TLS termination, use the `istio-gateway-no-tls.yaml` file. - - ``` - kubectl apply -f istio-gateway-no-tls.yaml - ``` - -## Adding new nodes - -* From [kuberenetes/rke2](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/rke2) directory, take either the `rke2-server.conf.subsequent.template` or `rke2-agent.conf.template` based on whether the new node is control plane node or Worker node. Copy this file to `/etc/rancher/rke2/config.yaml` in the new node. -* Configure the the config.yaml with relevant values. -* Run this to download rke2. - - ``` - curl -sfL https://get.rke2.io | sh - - ``` -* Run this to start rke2 node: - - ``` - systemctl enable rke2-server - systemctl start rke2-server - ``` diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/nfs-server-setup.md b/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/nfs-server-setup.md deleted file mode 100644 index a46f0336..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/nfs-server-setup.md +++ /dev/null @@ -1,5 +0,0 @@ -# NFS Server Setup - -## Introduction - -NFS-based storage is recommended for backing up DB and other data in developer/UAT/pilot environments. diff --git a/guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/README.md b/guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/README.md deleted file mode 100644 index 46e0055b..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/pbms-deployment/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# PBMS Deployment - -## Prerequisites - -The following components are required to be present on the cluster. - -* [PostgreSQL](../#postgresql) \[REQUIRED] -* [MinIO](../#minio) \[Optional] - -If you want to customize the Odoo addons in your OpenG2P PBMS, create a custom-packaged PBMS docker, using [Packaging Instructions](../../packaging-openg2p-docker.md). \[Optional] - -## Installation - -* Navigate to [kubernetes/openg2p](https://github.com/OpenG2P/openg2p-deployment/tree/1.1.0/kubernetes/openg2p) directory. -* Run: (This installs the reference package dockers) - - ```bash - SANDBOX_HOSTNAME=openg2p.sandbox.net \ - ./install.sh - ``` - - * If use already have a custom-packaged docker image or tag use: - - ```bash - OPENG2P_HOSTNAME=openg2p.sandbox.net \ - OPENG2P_ODOO_IMAGE_REPO= \ - OPENG2P_ODOO_IMAGE_TAG= \ - ./install.sh - ``` -* Post-installation: Refer to [Post Install Configuration](post-install-instructions.md) diff --git a/guides/deployment-guide/deployment-on-kubernetes/postgresql-server.md b/guides/deployment-guide/deployment-on-kubernetes/postgresql-server.md deleted file mode 100644 index 0a103f73..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/postgresql-server.md +++ /dev/null @@ -1,19 +0,0 @@ -# PostgreSQL Server - -## Introduction - -This guide provides instructions to install PostgreSQL Server on the Kubernetes cluster. However, if you already have PostgresSQL server installed, or are using Cloud hosted Postgres, then you may skip the server installation. The instructions to initialize OpenG2P component databases are provided as part of the component installation instructions. - -## Databases - -Module/component-wise listing of databases is given below - -| Module/Component | Database Name | -| ------------------------ | -------------- | -| PBMS | `openg2pdb` | -| Keycloak | `keycloakdb` | -| ODK | `odkdb` | -| SPAR | `spardb` | -| G2P Cash Transfer Bridge | `gctbdb` | -| MOSIP Key Manager | `mosip_keymgr` | - diff --git a/guides/deployment-guide/deployment-on-kubernetes/spar-deployment.md b/guides/deployment-guide/deployment-on-kubernetes/spar-deployment.md deleted file mode 100644 index a5a81af3..00000000 --- a/guides/deployment-guide/deployment-on-kubernetes/spar-deployment.md +++ /dev/null @@ -1,2 +0,0 @@ -# SPAR Deployment - diff --git a/guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/ssl-certificates-using-letsencrypt.md b/guides/deployment-guide/ssl-certificates-using-letsencrypt.md similarity index 100% rename from guides/deployment-guide/deployment-on-kubernetes/k8s-infrastructure-setup/ssl-certificates-using-letsencrypt.md rename to guides/deployment-guide/ssl-certificates-using-letsencrypt.md diff --git a/platform/modules/g2p-cash-transfer-bridge/README.md b/platform/modules/g2p-cash-transfer-bridge/README.md index 395af805..7d57d44f 100644 --- a/platform/modules/g2p-cash-transfer-bridge/README.md +++ b/platform/modules/g2p-cash-transfer-bridge/README.md @@ -22,8 +22,6 @@ The module will support the following functionalities at a high level ## Architecture - -

G2P cash transfer bridge

## Concepts @@ -60,7 +58,7 @@ Currently available payment backends: Configuration Guide - TODO Link -[GCTB Kubernetes Deployment guide](../../../guides/deployment-guide/deployment-on-kubernetes/#g2p-cash-transfer-bridge-deployment) +[GCTB Kubernetes Deployment guide](../../../deployment/openg2p-modules-deployment/#g2p-cash-transfer-bridge-deployment) GCTB Local Installation Guide - TODO diff --git a/platform/modules/social-payments-account-registry-spar.md b/platform/modules/social-payments-account-registry-spar.md index 096fee81..0e54c0fc 100644 --- a/platform/modules/social-payments-account-registry-spar.md +++ b/platform/modules/social-payments-account-registry-spar.md @@ -6,7 +6,7 @@ description: Work in progress ## Introduction -The Social Payments Account Registry (SPAR) maintains a mapping of a user ID and Financial Address (FA) like bank code, account details, mobile wallet number etc., primarily aimed at cash transfers in a social benefit delivery system. The SPAR offers a user-facing portal for adding/updating FSP account details after authentication. +The Social Payments Account Registry (SPAR) maintains a mapping of a user ID and Financial Address (FA) like bank code, account details, mobile wallet number, etc., primarily aimed at cash transfers in a social benefit delivery system. The SPAR offers a user-facing portal for adding/updating FSP account details after authentication. @@ -23,7 +23,11 @@ The SPAR is compliant with [G2P Connect interfaces](https://github.com/G2P-Conne * One ID mapped to 1 FA * Multiple IDs may be added for the same user\* * G2P Connect APIs to query and update FA -* Bulk upload by Admin or Financial Service Providers (FSPs) like bank after authentication +* Bulk upload by Admin or Financial Service Providers (FSPs) like bank, or Govt Department after authentication +* Notification to the user via SMS/email - Planned +* Change log - TBD +* Transaction log - Planned +* Signature verification for clients (partners) - Planned, via integrations with Partnermanager & Keymanager with a common library in OpenG2P. ## Concepts @@ -76,7 +80,7 @@ TODO - Technical Concept links. Configuration Guide - TODO Link -[SPAR Kubernetes Deployment Guide](../../guides/deployment-guide/deployment-on-kubernetes/#social-payments-account-registry-deployment) +[SPAR Kubernetes Deployment Guide](../../deployment/openg2p-modules-deployment/spar-deployment/) ## How-To Guides @@ -88,16 +92,18 @@ SPAR API Usage Guide - TODO ### SPAR Service REST API Docs -* Stoplight Link for [Social Payments Account Registry](https://openg2p.stoplight.io/docs/social-payments-account-regsitry). +* Stoplight Link for [Social Payments Account Registry](https://openg2p.stoplight.io/docs/social-payments-account-registry). * Swagger UI using [OpenAPI for SPAR](https://validator.swagger.io/?url=https://raw.githubusercontent.com/OpenG2P/social-payments-account-registry/develop/api-docs/generated/openapi.json). -* Swagger UI for [Sunbird-RC's Financial Address Mapper](https://validator.swagger.io/?url=https://raw.githubusercontent.com/Sunbird-RC/g2p-mapper-registry/main/services/mapper-service/swagger.yml) (which is used by SPAR, by default unless configured otherwise) -* Swagger UI for [G2P-Connect Financial Address Mapper](https://validator.swagger.io/?url=https://raw.githubusercontent.com/g2p-connect/specs/draft/release/yaml/mapper\_core\_api\_v1.0.0.yaml) (which Sunbird-RC's Financial Address Mapper implements) +* Swagger UI for [SPAR G2PConnect ID Mapper](https://validator.swagger.io/?url=https://raw.githubusercontent.com/OpenG2P/social-payments-account-registry/develop/spar-g2pconnect-id-mapper/api-docs/generated/openapi.json) (which is used by SPAR, by default unless configured otherwise) +* Swagger UI for [G2P-Connect Financial Address Mapper](https://validator.swagger.io/?url=https://raw.githubusercontent.com/g2p-connect/specs/draft/release/yaml/mapper\_core\_api\_v1.0.0.yaml). +* Swagger UI for [Sunbird-RC's Financial Address Mapper](https://validator.swagger.io/?url=https://raw.githubusercontent.com/Sunbird-RC/g2p-mapper-registry/main/services/mapper-service/swagger.yml) (Alternative impl of G2P Connect ID Mapper) ## Source Code * Social Payments Account Registry Source Code - [https://github.com/OpenG2P/social-payments-account-registry](https://github.com/OpenG2P/social-payments-account-registry). * SPAR UI Components Source Code - [https://github.com/OpenG2P/spar-ui](https://github.com/OpenG2P/spar-ui). -* Sunbird-RC's Financial Address Mapper Source code - [https://github.com/Sunbird-RC/g2p-mapper-registry](https://github.com/Sunbird-RC/g2p-mapper-registry). SPAR uses this ID Mapper implementation by default. +* SPAR G2P Connect ID Mapper Source Code - [https://github.com/OpenG2P/social-payments-account-registry/tree/develop/spar-g2pconnect-id-mapper](https://github.com/OpenG2P/social-payments-account-registry/tree/develop/spar-g2pconnect-id-mapper). +* Sunbird-RC's Financial Address Mapper Source code - [https://github.com/Sunbird-RC/g2p-mapper-registry](https://github.com/Sunbird-RC/g2p-mapper-registry). Alternative impl of ID Mapper, which can be switched with SPAR G2P Connect ID Mapper. ## Roadmap @@ -112,7 +118,6 @@ Onboarding of consumer apps (like OpenG2P) * Linking of SPAR PSUT with Application PSUT. * Consent page for users to map token for a time period specifically for an app (like OpenG2P) * Automatic deletion of records based on expiry set -* Notification to the user via SMS/email * Maintaining linkage status (reflected on the portal for the user) #### Expiry handling diff --git a/platform/releases/1.1.0/release-notes.md b/platform/releases/1.1.0/release-notes.md index 19d8b4e2..95bf4ee2 100644 --- a/platform/releases/1.1.0/release-notes.md +++ b/platform/releases/1.1.0/release-notes.md @@ -32,7 +32,7 @@ OpenG2P 1.1.0 focuses on the core needs of our stakeholders from start to finish ## Build and deploy * To build and run this release as a developer refer to the guide [here](../../../developer-zone/getting-started-1/). -* To deploy this release on Kubernetes refer to the guide [here](../../../guides/deployment-guide/deployment-on-kubernetes/). +* To deploy this release on Kubernetes refer to the guide [here](../../../deployment/openg2p-modules-deployment/). ## Test report