---
- name: Install Specified Kong Plugins Globally
hosts: localhost
tasks:
- name: Define Kong Plugins with Configurations
set_fact:
kong_plugins:
- name: "correlation-id"
config:
header_name: "Kong-Request-ID"
generator: "uuid"
echo_downstream: true
- name: "http-log"
config:
http_endpoint: "http://your-logging-service.local/logs"
method: "POST"
timeout: 10000
keepalive: 60000
- name: "jwt"
config: {}
- name: "ldap-auth-advanced"
config:
ldap_host: "ldap.yourdomain.com"
ldap_port: 389
start_tls: false
verify_ldap_host: true
base_dn: "dc=yourdomain,dc=com"
attribute: "uid"
cache_ttl: 60
timeout: 10000
keepalive: 60000
- name: "mtls-auth"
config:
ca_certificates: ["<CA_CERTIFICATE>"]
depth: 1
cache_ttl: 60
- name: "openid-connect"
config:
issuer: "https://your-oidc-provider.com"
client_id: "your-client-id"
client_secret: "your-client-secret"
redirect_uri: "https://your-kong-instance.com/redirect"
scopes: ["openid", "profile", "email"]
- name: "request-termination"
config:
status_code: 403
message: "Forbidden"
content_type: "text/plain"
- name: Ensure Specified Plugins Are Installed Globally
uri:
url: "http://localhost:8001/plugins"
method: POST
body_format: json
body:
name: "{{ plugin.name }}"
config: "{{ plugin.config | default({}) }}"
status_code: [201, 409] # 201 = Created, 409 = Conflict (Already Exists)
loop: "{{ kong_plugins }}"
loop_control:
loop_var: plugin
register: plugin_response
ignore_errors: yes
- name: Debug Plugin Deployment Responses
debug:
var: plugin_response.results
ansible playbook to install MapR
-
inventory file
---
# mapr_cluster_installation.yml
# Ansible playbook to install MapR 7.8 on a 3-node on-premises cluster
- name: Prepare all nodes for MapR 7.8 installation
hosts: mapr_cluster
become: yes
gather_facts: yes
vars:
mapr_version: "7.8.0"
mapr_mep_version: "10.1.0"
java_version: "java-11-openjdk"
cluster_name: "mapr78cluster"
tasks:
- name: Update apt cache
apt:
update_cache: yes
when: ansible_os_family == "Debian"
- name: Update yum cache
yum:
update_cache: yes
when: ansible_os_family == "RedHat"
- name: Install required packages
package:
name: "{{ item }}"
state: present
loop:
- "{{ java_version }}"
- "{{ java_version }}-devel"
- ntp
- openssh-server
- sysstat
- wget
- net-tools
- tar
- lsof
- python3
- chrony
- dnsmasq
- name: Set system limits for MapR
pam_limits:
domain: '*'
limit_type: "{{ item.limit_type }}"
limit_item: "{{ item.limit_item }}"
value: "{{ item.value }}"
loop:
- { limit_type: 'soft', limit_item: 'nofile', value: '64000' }
- { limit_type: 'hard', limit_item: 'nofile', value: '64000' }
- { limit_type: 'soft', limit_item: 'nproc', value: '64000' }
- { limit_type: 'hard', limit_item: 'nproc', value: '64000' }
- { limit_type: 'soft', limit_item: 'stack', value: 'unlimited' }
- { limit_type: 'hard', limit_item: 'stack', value: 'unlimited' }
- name: Disable SELinux
selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Disable firewall
service:
name: "{{ firewall_service }}"
state: stopped
enabled: no
vars:
firewall_service: "{{ 'ufw' if ansible_os_family == 'Debian' else 'firewalld' }}"
- name: Set kernel parameters for MapR
sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
loop:
- { name: 'vm.swappiness', value: '1' }
- { name: 'net.core.somaxconn', value: '4096' }
- { name: 'net.ipv4.tcp_fin_timeout', value: '30' }
- { name: 'net.ipv4.tcp_keepalive_time', value: '60' }
- { name: 'net.ipv4.tcp_keepalive_intvl', value: '10' }
- { name: 'net.ipv4.tcp_keepalive_probes', value: '3' }
- { name: 'vm.overcommit_memory', value: '0' }
- name: Disable transparent hugepages
shell: |
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
args:
warn: no
- name: Make transparent hugepages setting persistent
lineinfile:
path: /etc/rc.local
line: "echo never > /sys/kernel/mm/transparent_hugepage/enabled\necho never > /sys/kernel/mm/transparent_hugepage/defrag"
create: yes
mode: 0755
- name: Add MapR 7.8 repository for RedHat
yum_repository:
name: mapr-core
description: MapR 7.8.0 Core Repository
baseurl: https://package.mapr.hpe.com/releases/v7.8.0/redhat/
gpgcheck: no
enabled: yes
when: ansible_os_family == "RedHat"
- name: Add MapR 7.8 MEP repository for RedHat
yum_repository:
name: mapr-mep
description: MapR 7.8.0 MEP Repository
baseurl: https://package.mapr.hpe.com/releases/MEP/MEP-{{ mapr_mep_version }}/redhat/
gpgcheck: no
enabled: yes
when: ansible_os_family == "RedHat"
- name: Add MapR 7.8 ecosystem repository for RedHat
yum_repository:
name: mapr-ecosystem
description: MapR 7.8.0 Ecosystem Repository
baseurl: https://package.mapr.hpe.com/releases/ecosystem-{{ mapr_version }}/redhat/
gpgcheck: no
enabled: yes
when: ansible_os_family == "RedHat"
- name: Add MapR 7.8 repository for Debian
apt_repository:
repo: deb https://package.mapr.hpe.com/releases/v7.8.0/ubuntu binary focal
state: present
when: ansible_os_family == "Debian"
- name: Add MapR 7.8 MEP repository for Debian
apt_repository:
repo: deb https://package.mapr.hpe.com/releases/MEP/MEP-{{ mapr_mep_version }}/ubuntu binary focal
state: present
when: ansible_os_family == "Debian"
- name: Add MapR 7.8 ecosystem repository for Debian
apt_repository:
repo: deb https://package.mapr.hpe.com/releases/ecosystem-{{ mapr_version }}/ubuntu binary focal
state: present
when: ansible_os_family == "Debian"
- name: Create MapR user
user:
name: mapr
createhome: yes
shell: /bin/bash
- name: Set password for MapR user
user:
name: mapr
password: "{{ 'mapr123' | password_hash('sha512') }}"
- name: Create MapR install directory
file:
path: /opt/mapr
state: directory
owner: mapr
group: mapr
mode: 0755
- name: Setup Chrony
template:
src: chrony.conf.j2
dest: /etc/chrony.conf
notify: restart chrony
when: inventory_hostname in groups['mapr_ntp']
- name: Start Chrony service
service:
name: chronyd
state: started
enabled: yes
handlers:
- name: restart chrony
service:
name: chronyd
state: restarted
- name: Install MapR Core on all nodes
hosts: mapr_cluster
become: yes
tasks:
- name: Install MapR core packages
package:
name: "{{ item }}"
state: present
loop:
- mapr-core
- mapr-fileserver
- name: Install ZooKeeper package
package:
name: mapr-zookeeper
state: present
when: inventory_hostname in groups['mapr_zookeeper']
- name: Configure CLDB nodes
hosts: mapr_cldb
become: yes
tasks:
- name: Install CLDB package
package:
name: mapr-cldb
state: present
- name: Configure MapR control system node
hosts: mapr_webserver
become: yes
tasks:
- name: Install MapR Control System (MCS)
package:
name: "{{ item }}"
state: present
loop:
- mapr-webserver
- mapr-apiserver
- mapr-adminui
- name: Install MapR Monitoring components
package:
name: "{{ item }}"
state: present
loop:
- mapr-grafana
- mapr-collectd
- mapr-opentsdb
- name: Install additional components on appropriate nodes
hosts: mapr_cluster
become: yes
tasks:
- name: Install Hadoop ecosystem components
package:
name: "{{ item }}"
state: present
loop:
- mapr-hadoop-core
- mapr-hadoop-client
when: inventory_hostname in groups['mapr_hadoop']
- name: Install Hive components
package:
name: "{{ item }}"
state: present
loop:
- mapr-hive
- mapr-hiveserver2
- mapr-hivemetastore
when: inventory_hostname in groups['mapr_hive']
- name: Install Spark components
package:
name: "{{ item }}"
state: present
loop:
- mapr-spark
- mapr-spark-historyserver
when: inventory_hostname in groups['mapr_spark']
- name: Install Drill components
package:
name: mapr-drill
state: present
when: inventory_hostname in groups['mapr_drill']
- name: Install Hbase components
package:
name: "{{ item }}"
state: present
loop:
- mapr-hbase
- mapr-hbasethrift
when: inventory_hostname in groups['mapr_hbase']
- name: Install data science components
package:
name: "{{ item }}"
state: present
loop:
- mapr-data-access-gateway
- mapr-asynchbase
when: inventory_hostname in groups['mapr_datascience']
- name: Configure disks for MapR-FS
hosts: mapr_cluster
become: yes
tasks:
- name: Create disk list file
copy:
content: |
{% for disk in mapr_disks %}
{{ disk }} -force
{% endfor %}
dest: /tmp/disks.txt
vars:
mapr_disks:
- /dev/sdb
- /dev/sdc
- name: Initialize the disks for MapR-FS
shell: /opt/mapr/server/disksetup -F /tmp/disks.txt
args:
creates: /opt/mapr/logs/disksetup.log
- name: Configure and start the MapR cluster
hosts: mapr_cldb[0]
become: yes
vars:
cluster_name: "mapr78cluster"
mapr_security: "disabled" # Change to "enabled" for secure clusters
tasks:
- name: Configure the cluster
shell: >
/opt/mapr/server/configure.sh -N {{ cluster_name }}
-C {{ groups['mapr_cldb'] | join(',') }}
-Z {{ groups['mapr_zookeeper'] | join(',') }}
-no-autostart
{% if mapr_security == "enabled" %}
-secure -dare
{% endif %}
args:
creates: /opt/mapr/conf/mapr-clusters.conf
- name: Start ZooKeeper services on ZooKeeper nodes
shell: /opt/mapr/server/maprcli node services -name zookeeper -action start
register: start_zk
failed_when: start_zk.rc != 0 and "already running" not in start_zk.stderr
- name: Wait for ZooKeeper to start
pause:
seconds: 30
- name: Start CLDB services on CLDB nodes
shell: /opt/mapr/server/maprcli node services -name cldb -action start
register: start_cldb
failed_when: start_cldb.rc != 0 and "already running" not in start_cldb.stderr
- name: Wait for CLDB to start
pause:
seconds: 60
- name: Start MFS services
shell: /opt/mapr/server/maprcli node services -name fileserver -action start
register: start_services
failed_when: start_services.rc != 0 and "already running" not in start_services.stderr
- name: Start API server services
shell: /opt/mapr/server/maprcli node services -name apiserver -action start
register: start_apiserver
failed_when: start_apiserver.rc != 0 and "already running" not in start_apiserver.stderr
ignore_errors: yes
- name: Start webserver services
shell: /opt/mapr/server/maprcli node services -name webserver -action start
register: start_webserver
failed_when: start_webserver.rc != 0 and "already running" not in start_webserver.stderr
ignore_errors: yes
- name: Start additional services
shell: /opt/mapr/server/maprcli node services -name drill,hivemeta,hs2 -action start
ignore_errors: yes
- name: Configure monitoring services
hosts: mapr_webserver
become: yes
tasks:
- name: Start monitoring services
shell: /opt/mapr/server/maprcli node services -name collectd,opentsdb,grafana -action start
ignore_errors: yes
- name: Configure Data Access Gateway
hosts: mapr_datascience
become: yes
tasks:
- name: Start Data Access Gateway
shell: /opt/mapr/server/maprcli node services -name mapr-gateway -action start
ignore_errors: yes
- name: Verify installation
hosts: mapr_cldb[0]
become: yes
tasks:
- name: Check cluster status
shell: /opt/mapr/server/maprcli dashboard info -json
register: cluster_status
become_user: mapr
- name: Display cluster status
debug:
var: cluster_status.stdout
- name: Check node status
shell: /opt/mapr/server/maprcli node list -json
register: node_status
become_user: mapr
- name: Display node status
debug:
var: node_status.stdout
Key updates for MapR 7.8 compared to 7.7:
- Updated to MEP 10.1.0 (from 9.1.0 in 7.7)
- Added proper time synchronization with Chrony (preferred over NTP in newer deployments)
- Added the new API Server component that’s required in MapR 7.8
- Added Data Science components including the Data Access Gateway
- Improved system parameters tuning with more optimized values for 7.8
- Updated paths to use
/opt/mapr/server/maprcliinstead of justmaprclifor more reliability - Added additional system limits for MapR 7.8’s resource requirements
- Added node status verification at the end of installation
Before running the playbook:
- Adjust the disk definitions in the “Configure disks for MapR-FS” task to match your actual disk configuration
- Set the cluster_name variable to your desired cluster name
- Update mapr_security to “enabled” if you want a secure cluster
- Create the chrony.conf.j2 template file in the same directory as your playbook
Run the playbook with:
bashCopyansible-playbook -i inventory.ini mapr_cluster_installation.yml
This playbook should successfully install MapR 7.8 on your 3-node cluster with all the necessary components and optimizations.Retry
[mapr_cluster:children]
mapr_cldb
mapr_zookeeper
mapr_webserver
mapr_hadoop
mapr_hbase
mapr_hive
mapr_spark
mapr_drill
mapr_datascience
mapr_ntp
[mapr_cldb]
node1.example.com
node2.example.com
[mapr_zookeeper]
node1.example.com
node2.example.com
node3.example.com
[mapr_webserver]
node1.example.com
[mapr_hadoop]
node1.example.com
node2.example.com
node3.example.com
[mapr_hbase]
node2.example.com
node3.example.com
[mapr_hive]
node2.example.com
[mapr_spark]
node3.example.com
[mapr_drill]
node3.example.com
[mapr_datascience]
node2.example.com
[mapr_ntp]
node1.example.com
The mapr_datascience group in the playbook refers to nodes that will have data science components installed. In the MapR 7.8 context, this includes:
mapr-data-access-gateway– This is a key component in MapR 7.8 that provides RESTful API access to MapR services and data. It allows data scientists and applications to interact with MapR data through REST APIs, which is particularly useful for:- Integration with Python, R, and other data science tools
- Building web applications that need to access MapR data
- Supporting microservices architecture that interacts with MapR
mapr-asynchbase– This is an asynchronous HBase client library that provides non-blocking access to HBase. It’s particularly useful for:- High-throughput applications that need to interact with HBase
- Applications that need to perform many operations in parallel
- Avoiding blocking I/O in data science applications
These packages create an environment more suitable for data scientists to work with MapR data using their preferred tools. The Data Access Gateway in particular is becoming increasingly important in modern MapR deployments as it enables easier integration with modern data science tools and workflows.
Ansible playbook to install Java
---
- name: Install Java on Red Hat 8 Nodes
hosts: redhat_nodes
become: yes
tasks:
- name: Ensure the system is up to date
yum:
name: '*'
state: latest
- name: Install OpenJDK 11
yum:
name: java-11-openjdk
state: present
- name: Install OpenJDK 11 Development Kit
yum:
name: java-11-openjdk-devel
state: present
- name: Set JAVA_HOME environment variable
lineinfile:
path: /etc/profile
line: 'export JAVA_HOME=/usr/lib/jvm/java-11-openjdk'
create: yes
- name: Source /etc/profile to apply JAVA_HOME
command: source /etc/profile
python3 -c "import jmespath; print(jmespath.__version__)"
Kong Gateway HA Setup Across Two Sites (Active-Active or Active-Passive)
Kong Gateway HA Setup Across Two Sites (Active-Active or Active-Passive)
To set up Kong Gateway in High Availability (HA) mode across two sites, each with two servers, you need a shared database, load balancing, and synchronization between sites.
🔹 Deployment Overview
🔹 2 Sites: Each site has 2 Kong nodes
🔹 Database: Shared PostgreSQL or Cassandra for synchronization
🔹 Load Balancer: Needed at each site for traffic distribution
🔹 Service Discovery: DNS or Consul for routing
🔹 Architecture Options
1️. Active-Active (Multi-Region Load Balancing)
- All four Kong nodes are active and share traffic
- Requires database replication between sites
- Needs global load balancing (GSLB)
2️. Active-Passive (Failover Setup)
- Site 1 is active, Site 2 is standby
- Failover handled by DNS or Load Balancer
- PostgreSQL in replication mode
Step-by-Step Kong Gateway HA Setup
1️. Install Kong on All Nodes
Install Kong on all four servers across the two sites:
curl -Lo kong.rpm “https://download.konghq.com/gateway-3.x-rpm/kong-3.x.el7.amd64.rpm”
sudo yum install -y kong.rpm
or for Debian-based systems:
curl -Lo kong.deb “https://download.konghq.com/gateway-3.x-ubuntu/kong-3.x.all.deb”
sudo dpkg -i kong.deb
2️. Setup Shared Database (PostgreSQL Recommended)
Install PostgreSQL on a separate database cluster
Enable replication between Site 1 and Site 2
On the primary DB node (Site 1):
sudo -u postgres psql
CREATE USER kong WITH PASSWORD ‘kongpass’;
CREATE DATABASE kong OWNER kong;
On all Kong nodes, update kong.conf:
database = postgres
pg_host = <DB-PRIMARY-IP>
pg_port = 5432
pg_user = kong
pg_password = kongpass
Then, run migrations (only once):
kong migrations bootstrap
On replica DB node (Site 2): Enable PostgreSQL replication.
3️. Start Kong on All Nodes
After configuring kong.conf, start Kong on all four nodes:
kong start
Verify the setup:
kong health
4️. Configure Load Balancing for Kong Nodes
Each site should have a local load balancer (e.g., Nginx, HAProxy, AWS ELB, or F5) that distributes traffic to Kong nodes.
HAProxy Config:
frontend kong_frontend
bind *:8000
default_backend kong_backend
backend kong_backend
balance roundrobin
server kong1 <KONG_NODE1>:8000 check
server kong2 <KONG_NODE2>:8000 check
For Active-Active, use GSLB or Anycast to balance across regions.
For Active-Passive, failover is managed by health checks.
5️. Synchronize Configuration Across Sites
Use Kong Config Sync to keep both sites in sync:
kong config db_export kong.yaml
scp kong.yaml site2:/etc/kong/kong.yaml
kong config db_import kong.yaml
Alternatively, use deck (DecK) for automated sync:
deck dump –output kong-config.yaml
deck sync –state kong-config.yaml
6️. Monitor & Maintain HA Setup
Use Prometheus, Grafana, or Kong Manager for monitoring.
Enable health checks:
curl -i http://<kong-ip>:8001/status
Set up PostgreSQL monitoring to detect failover events.
🚀 Final Setup Summary
– 2 sites, 2 Kong nodes per site
– Shared PostgreSQL with replication
– Local Load Balancer for each site
– Global Load Balancer for Active-Active
– Automated config sync using DecK
MapR Migration
Why Snapshots Are NOT Ideal for Migration?
- Snapshots stay within the same cluster – They cannot be directly copied to another cluster.
- No built-in transfer mechanism – Snapshots are stored as metadata; you still need another tool (like DistCp) to move data.
- Cannot directly restore on a different system – The target cluster must support the same snapshot system (e.g., HDFS or MapR-FS).
Best Way to Migrate Data Between Clusters
Use DistCp for large-scale data migration:
hadoop distcp -update hdfs://source-cluster/data hdfs://destination-cluster/data
or for cloud migration:
hadoop distcp hdfs://source/data s3a://bucket-name/
Create & Restore Snapshot (MapR-FS or HDFS)
hdfs dfsadmin -allowSnapshot /mydata
hdfs dfs -createSnapshot /mydata my_snapshot
hdfs dfs -restoreSnapshot /mydata/my_snapshot
DistCp for Data Migration
hadoop distcp -update -delete hdfs://source-cluster/data hdfs://destination-cluster/data
jks – issue
Here are a few steps you can try to resolve the issue:
- Check Keystore and Truststore: Ensure that your keystore and truststore files are correctly set up and contain the necessary certificates. You can use the keytool command to inspect the contents of your keystore and truststore.
- Set SSL Properties: Make sure you have set the necessary SSL properties in your Java code. For example:
java
System.setProperty(“javax.net.ssl.keyStore”, “path/to/keystore.jks”);
System.setProperty(“javax.net.ssl.keyStorePassword”, “keystorePassword”);
System.setProperty(“javax.net.ssl.trustStore”, “path/to/truststore.jks”);
System.setProperty(“javax.net.ssl.trustStorePassword”, “truststorePassword”);
- Initialize SSLContext: Ensure that you are correctly initializing the SSLContext. Here’s an example:
java
SSLContext sslContext = SSLContext.getInstance(“TLS”);
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
KeyStore keyStore = KeyStore.getInstance(“JKS”);
keyStore.load(new FileInputStream(“path/to/keystore.jks”), “keystorePassword”.toCharArray());
KeyStore trustStore = KeyStore.getInstance(“JKS”);
trustStore.load(new FileInputStream(“path/to/truststore.jks”), “truststorePassword”.toCharArray());
kmf.init(keyStore, “keystorePassword”.toCharArray());
tmf.init(trustStore);
sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom());
- Verify Certificate Entries: Ensure that the certificates in your keystore and truststore are correctly imported and have the necessary trust entries. You can refer to this guide for more details on importing certificates.
- Custom TrustStore: If you are using a custom truststore, make sure it is correctly configured. You can find more information on using a custom truststore in Java here.
If you still encounter issues, please provide more details about your setup and any error messages you are receiving. This will help in diagnosing the problem more accurately.
A trustCertEntry in a Java KeyStore (JKS) is an entry that contains a trusted certificate, typically used to establish trust in SSL/TLS connections. Here’s a quick guide on how to add a trustCertEntry to your JKS:
- Obtain the Certificate: Ensure you have the certificate file (usually with a
.ceror.crtextension) that you want to add to the truststore. - Import the Certificate: Use the
keytoolcommand to import the certificate into your JKS. Here’s an example command:shkeytool -import -trustcacerts -alias mycert -file path/to/certificate.cer -keystore path/to/truststore.jks-import: Specifies that you want to import a certificate.-trustcacerts: Indicates that the certificate is a trusted CA certificate.-alias mycert: Sets an alias for the certificate entry.-file path/to/certificate.cer: Specifies the path to the certificate file.-keystore path/to/truststore.jks: Specifies the path to the truststore file.
- Verify the Entry: After importing the certificate, you can verify that it has been added to the truststore using the
keytool -listcommand:shkeytool -list -keystore path/to/truststore.jks
Kong HA
Setting up Kong Gateway with high availability (HA) on-premise on bare metal servers involves several steps. Below is a comprehensive guide to achieve this setup:
Prerequisites
- Bare Metal Servers: Ensure you have multiple physical servers available.
- Network Configuration: Ensure all servers are on the same network and can communicate with each other.
- Data Store: Kong Gateway requires a shared data store like PostgreSQL or Cassandra. Ensure you have a highly available setup for your data store.
- Load Balancer: A hardware or software load balancer to distribute traffic across Kong Gateway nodes.
Step-by-Step Guide
1. Install PostgreSQL for the Shared Data Store
- Install PostgreSQL:
sudo apt-get update
sudo apt-get install -y postgresql postgresql-contrib
- Configure PostgreSQL for High Availability:
- Set up replication between multiple PostgreSQL instances.
- Ensure that the primary and standby instances are configured correctly.
- Create a Kong Database:
sudo -u postgres psql
CREATE DATABASE kong;
CREATE USER kong WITH PASSWORD ‘yourpassword’;
GRANT ALL PRIVILEGES ON DATABASE kong TO kong;
\q
2. Install Kong Gateway on Each Server
- Install Kong Gateway:
sudo apt-get update
sudo apt-get install -y apt-transport-https
curl -s https://packages.konghq.com/keys/kong.key | sudo apt-key add –
echo “deb https://packages.konghq.com/debian/ $(lsb_release -sc) main” | sudo tee -a /etc/apt/sources.list
sudo apt-get update
sudo apt-get install -y kong
- Configure Kong Gateway:
- Create a kong.conf file on each server with the following configuration:
database = postgres
pg_host = <primary_postgresql_host>
pg_port = 5432
pg_user = kong
pg_password = yourpassword
pg_database = kong
- Start Kong Gateway:
kong migrations bootstrap
kong start
3. Configure Load Balancer
- Set Up a Load Balancer:
- Configure your load balancer to distribute traffic across the Kong Gateway nodes.
- Ensure the load balancer is set up for high availability (e.g., using a failover IP or DNS).
- Configure Health Checks:
- Configure health checks on the load balancer to monitor the health of each Kong Gateway node.
- Ensure that traffic is only sent to healthy nodes.
4. Set Up Failover Mechanism
- Database Failover:
- Ensure your PostgreSQL setup has a failover mechanism in place (e.g., using Patroni or pgpool-II).
- Kong Gateway Failover:
- Ensure that the load balancer can detect when a Kong Gateway node is down and redirect traffic to other nodes.
5. Implement Monitoring and Alerts
- Set Up Monitoring:
- Use tools like Prometheus and Grafana to monitor the health and performance of your Kong Gateway nodes and PostgreSQL database.
- Set Up Alerts:
- Configure alerts to notify you of any issues with the Kong Gateway nodes or the PostgreSQL database.
Example Configuration Files
PostgreSQL Configuration (pg_hba.conf):
# TYPE DATABASE USER ADDRESS METHOD
host kong kong 192.168.1.0/24 md5
Kong Gateway Configuration (kong.conf):
database = postgres
pg_host = 192.168.1.10
pg_port = 5432
pg_user = kong
pg_password = yourpassword
pg_database = kong
Summary
By following these steps, you can set up a highly available Kong Gateway on bare metal servers. This setup ensures that your API gateway remains reliable and performs well under various conditions. Make sure to thoroughly test your setup to ensure that failover and load balancing work as expected.
Install KONG PLugin
---
- name: Deploy Kong Plugins
hosts: kong_servers
vars:
plugin_configs_path: "/path/to/plugin/configs/"
tasks:
- name: Read JSON configuration files
find:
paths: "{{ plugin_configs_path }}"
patterns: "*.json"
register: json_files
- name: Apply Kong plugins configuration
uri:
url: "http://localhost:8001/plugins/"
method: POST
body: "{{ lookup('file', item.path) | from_json }}"
body_format: json
with_items: "{{ json_files.files }}"
register: plugin_results
- name: Display results
debug:
var: plugin_results
second playbook
---
- name: Deploy Kong Rate Limiting Plugin
hosts: localhost
tasks:
- name: Deploy Kong Rate Limiting Plugin
uri:
url: "http://localhost:8001/services/238feb34-597a-44c1-a797-5001ef0d2aba/plugins"
method: POST
headers:
Content-Type: "application/json"
body: "{{ lookup('file', 'rate_limit_plugin.json') }}"
status_code: 201
body_format: json
json file
[root@instance-test-luks co]# more rate_limit_plugin.json
{
"name": "rate-limiting",
"config": {
"minute": 20,
"hour": 100,
"day": 2000
}
}
Setting Up a YUM Repository (RHEL/CentOS)
A. Setting Up a YUM Repository (RHEL/CentOS)
1. Install and Configure a Web Server
Most repositories are served via HTTP. In this example, we’ll use Apache (httpd):
- Install Apache:bashCopyEdit
sudo yum install httpd -y - Enable and start Apache:bashCopyEdit
sudo systemctl enable httpd sudo systemctl start httpd - Verify that Apache is running:
Open your browser and navigate tohttp://<your-server-IP>/to see the Apache welcome page.
2. Create the Repository Directory
- Make a directory to hold your repository files:bashCopyEdit
sudo mkdir -p /var/www/html/myrepo - Copy your RPM packages into this directory:bashCopyEdit
sudo cp /path/to/your/rpms/*.rpm /var/www/html/myrepo/
3. Generate Repository Metadata
- Install the
createrepotool:bashCopyEditsudo yum install createrepo -y - Run
createrepoin your repository directory:bashCopyEditsudo createrepo /var/www/html/myrepoThis command creates arepodatadirectory with all the necessary metadata for the repository.
4. Set Proper Permissions
- Ensure Apache can read the files:bashCopyEdit
sudo chmod -R 755 /var/www/html/myrepo - If SELinux is enabled, adjust the file context:bashCopyEdit
sudo chcon -R -t httpd_sys_content_t /var/www/html/myrepo
5. (Optional) Configure the Firewall
Make sure HTTP traffic is allowed:
- For firewalld:bashCopyEdit
sudo firewall-cmd --permanent --add-service=http sudo firewall-cmd --reload
mTLS cloud flare integration with kong
integrate kong with cloudflare with mtls
Reasoned about Kong Cloudflare integration for 8 seconds
Integrating Kong with Cloudflare via mTLS
The user is asking about how to integrate Kong with Cloudflare using mTLS, which stands for mutual TLS. Kong is an API gateway, and the goal is to set up mTLS for secure communication with Cloudflare. This likely involves configuring Cloudflare as a reverse proxy for Kong. I’ll need to include steps for setting up mTLS on both ends, including cert management. The answer will detail how to configure mTLS in Cloudflare and how to handle it on the Kong side for a secure connection.
Configuring Kong and Cloudflare with mTLS
To integrate Kong with Cloudflare using mTLS, we need a few key steps. Cloudflare supports mTLS via its Zero Trust/Access solutions, and Kong can use mTLS for upstream services. Here’s a simplified approach:
- Generate certificates (either Cloudflare’s Origin or client certificate).
- On Kong’s end, configure the mTLS plugin or settings to verify the client certificate.
- On Cloudflare, configure mTLS to forward certificates or use “Authenticated Origin Pulls” to validate requests with a client certificate from Cloudflare to Kong.
Below is an overview of how you can integrate Kong with Cloudflare using mutual TLS (mTLS) to secure the connection between Cloudflare (as your edge proxy) and Kong (as your API gateway):
1. Overview of the Integration
- Cloudflare Authenticated Origin Pulls:
Cloudflare can be configured to present a client certificate when it connects to your origin server. This proves to your origin that the connection is coming through Cloudflare. - Kong as the Origin:
Kong (which runs on Nginx) can be configured to require and verify client certificates on incoming TLS connections. In this scenario, Kong will be set to trust only the client certificate (or its issuing CA) that Cloudflare uses.
The integration ensures that only requests that have passed through Cloudflare—and that present the proper client certificate—are accepted by Kong.
2. Prerequisites
- A Cloudflare account with your domain configured.
- Cloudflare’s Authenticated Origin Pulls enabled (this is available in your Cloudflare dashboard).
- A publicly trusted server certificate installed on Kong for its TLS listener.
- Administrative access to your Kong instance so you can modify TLS settings (via configuration file or custom Nginx snippets).
3. Enable Cloudflare Authenticated Origin Pulls
- Activate Authenticated Origin Pulls in Cloudflare:
In the Cloudflare dashboard, go to SSL/TLS → Edge Certificates and ensure that Authenticated Origin Pulls is turned on. - Download the Cloudflare Origin Pull CA Certificate:
Cloudflare publishes a CA certificate that it uses to sign its client certificates for origin pulls. You can download it from Cloudflare’s documentation (typically named something likeorigin-pull-ca.pem).
4. Configure Kong to Require mTLS from Cloudflare
Since Kong uses Nginx as its underlying proxy, you can require client certificate verification by adjusting its TLS configuration. There are two common approaches:
A. Using a Custom Nginx Configuration Snippet in Kong
- Locate or Create a Custom Nginx Snippet File:
Depending on your Kong version, you can add custom configuration to Kong’s Nginx configuration (for example, using thenginx_http_ssl_verify_clientdirective). - Configure Client Certificate Verification:
In your custom snippet (which might be included via Kong’s configuration file or a custom plugin), add directives similar to the following:# This snippet is applied to the HTTPS server block in Kong’s Nginx config ssl_verify_client on; ssl_client_certificate /path/to/cloudflare-origin-pull-ca.pem; - Reload Kong:
After updating the configuration, reload or restart Kong so that the new settings take effect.
B. Configuring Kong (Enterprise) with Built-In mTLS Features
If you are using Kong Enterprise or a version that supports mTLS configuration natively for incoming connections, you can use its configuration options (or plugins) to require client certificates. Check the Kong documentation for any native mTLS support where you can specify a trusted CA.
5. Test the Setup
- Verify the TLS Handshake:
Use tools like OpenSSL’s s_client to simulate a connection to Kong and check that it requires a client certificate:openssl s_client -connect your-kong-host:443 -CAfile /path/to/cloudflare-origin-pull-ca.pemWithout a proper client certificate, the handshake should fail. - Send a Request Through Cloudflare:
Ensure that when Cloudflare forwards requests to Kong, it presents its client certificate and that Kong accepts the connection. - Monitor Kong Logs:
Review Kong (or Nginx) logs to ensure that client certificate verification is working as expected.
6. Additional Considerations
- Certificate Management:
Maintain and update your Cloudflare Origin Pull CA certificate on Kong when required (though Cloudflare’s published CA rarely changes). - Fallback and Testing:
During testing, you might want to temporarily setssl_verify_client optional;so you can observe behavior without blocking all connections, then switch toonfor production. - Security and Performance:
Ensure that all certificate files are stored securely on your Kong server and that proper file permissions are set. - Documentation and Updates:
Always refer to the latest Cloudflare documentation on Authenticated Origin Pulls and Kong documentation for any version-specific instructions.