371 sudo apt install curl gnupg2 ca-certificates lsb-release ubuntu-keyring
372 curl https://nginx.org/keys/nginx_signing.key | gpg --dearmor | sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
373 gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg
374 echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
https://nginx.org/packages/ubuntu `lsb_release -cs` nginx" | sudo tee /etc/apt/sources.list.d/nginx.list
375 echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
https://nginx.org/packages/mainline/ubuntu `lsb_release -cs` nginx" | sudo tee /etc/apt/sources.list.d/nginx.list
376 cd /etc/apt/sources.list
377 ll
378 cd /etc/apt/sources.list.d/
379 ll
380 cat nginx.list
381 echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" | sudo tee /etc/apt/preferences.d/99nginx
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 443 ssl;
server_name self.tuan.name.vn;
ssl_certificate /etc/nginx/cert/ca.crt;
ssl_certificate_key /etc/nginx/cert/ca.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
proxy_pass http://192.168.22.212:8080;
# Cấu hình quan trọng cho WebSocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server {
listen 443 ssl;
http2 on;
server_name self2.tuan.name.vn;
ssl_certificate /etc/nginx/cert/ca.crt;
ssl_certificate_key /etc/nginx/cert/ca.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
proxy_pass https://gcloud.samsungsds.com;
# Cấu hình quan trọng cho WebSocket
proxy_http_version 2;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host 'xxxxxx';
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
https://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables
log_format custom_ssl_log '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'Protocol: $ssl_protocol | HTTP: $server_protocol '
'Cipher: $ssl_cipher';
'\n [SSL Session] ID: $ssl_session_id | Reused: $ssl_session_reused '
'\n [SSL Protocol] Proto: $ssl_protocol | Cipher: $ssl_cipher | Curves: $ssl_curves '
'\n [Client Cert] Subject: $ssl_client_s_dn | Issuer: $ssl_client_i_dn | Serial: $ssl_client_serial '
'\n [Client Cert Status] Verify: $ssl_client_verify | Fingerprint: $ssl_client_fingerprint '
'\n [Server Info] SNI: $ssl_server_name | Early Data: $ssl_early_data '
'\n ----------------------------------------------------------------------';echo 'H4sIACH4sWkCA+0ba3PbuDGf9Stwint+xKRESX7UGo/HdexzZpzGE+Xaa21HhUhQwhkEeASoR6rkt3cBPkRKtJ0PF7fXCIkoCvvAYrFY7AIwH1I+tV3B/RffrDShHBwcmG8oy9/7nf39F86es9dpH3SaHeeFQd/fQ80Xz1BiqXCE0IvvtLxEPxFOIqyIhwYzxLU5aGugQ5uK2kvUI6RUqaYK+SJCakRQUhUDMRUcyRGOCGKU39dqsSSg06UymUwsDyvcrYXUW4GiRhTzhmnKBni3NhHRPYn6YSRcIiWRBgnHSuSgiNGAqj4XPmUE7e/ttfe6NZD5SmAPBcKLGZE1yl0We2SpLaLcpK1GimcRjgeMeI0dMxmADxkTriT6d00TBDFTtI9dl4Qq5SF414BSYYCKE1drQmaifK7VRkqFKQsX9COJWu52rHzrMOEkCfdMV8ola0i5IfQ1jOXoEbBHGJ5VgWFExiCmEveEywLY91PyWUhkf4TlqB/gaV/ST0aOVrNzuIIwiN17ohKc/U4CdhkFfRnagfBmGQNn/y3oUiO8RG/fvD03r5VjAkqmAbFNMwlLj/gY9N7XVQU0HIaMusboGsJVRFlSRQQHeTtXYoguRBRgZSqYGPZ987PUmstE7PlMG+3mRkQCoUgfe16ELJT9NGZ8s6FArj4TLmZ3qA6w32IiVR1tgOdQsUQbpr+DmQL1wBAqwNHD3o+ITyIS1bPfmlsfDwEDaE2N6/cjGK38R2ZDfNinYVo91bJPcOQRT79V1MIMUSKtV1FM+ulQ5CyAMw1dEXMVFRobU0lVztD1OHRRhJtFLcIEGZpf2vClBIRh2fQy4yFRJKIVMGqMcdSA2nSqGSxbY4HkPG+o17tKLFQy0J+UMK59rXIRmwFzvO4K2MXuKDUJ43a8I2By5DSDbgUnbasyFzdt9DX1fUqsS8JYgDkKcYQDomC4tY5fX54jl4YjEsmYwrDmTL2RQXzAlaRQOyQLW3wrPlHGMHrDgXlAPAqetuw4c+ZmHF3BCrPzw1Vv7Nit9Lu96F0qXVGQ8zMQ24Jn79Q6Pe85rUPrp7O3Vu/ytLW3f5RA3z8CyymhKoO2DztlykpYQnl2eQr/W03r+t3VP5x2c69AuQp7WJoHW8t1+u6sd416CoMbSO3TDHhaUeX7CuA+eEHqz4rgiEjBxisr1qFt/pnvjt1BY8yod7zflGWqoq1qhylzOc/yJQGNCPZS4/o7GfSEtkngAOSSIBj46SxZZnA2Z+NwGAEJ2lisK3ldsp4UPCRKId0cUK/nTk6m1Z9riyaK7m7DNF/wJ4SRANrIeb1Eb67HHaSRk4XYhQkzIMj4OiwtKnPULx9vmtaf7btXG7kgwPe42F69u8R5v8CZEwhDwJkB90GEtZLgN+Ye+i0Gem+pnVPrAlv+kWnOtHNbvyk2dXdbL7f2M6dT5IkAUwhXkjHgMO1Nj7hQutmIhCAL9AwaBqz3F2fooNX+M5IzrvB0RfGLYgSI+T0XE15f1bgZ1FzHmdJBykJlWee+ibFgqRSBtuqLHC21JSoTocA2YGVis8Q+d/XyCHGE1iJVC3v48nFr9wbd3qq7ne2drZsfXm786cfNnVf2x/6/5l+MLv+JrU/W3avjx4Dz2/rWDTABRtOWox9tC557f9GP1/r14Bweh039enFxN7+FsiBYRdjeua1vb59sdf/nRNJaSvS1m72s1fY1atOfjTzoya17t9rRlCfoOzD5aEIl2dUTkWGXFI04m3T1hzgVJl2aBSQrrSxFnYUVW4PtRcz/AJbUMcAiQYDA/v8n/0v6hseYMt29BpniIGQEFBL8brsCT+T/bQgIlvJ/p+M0m+v8/znG36SFaUjBqFSEr6bmnU5bx1BIz+hW91HUm6Oju6MK/DT91MttCb9gb2lYJYRCFcXkEpPJpGihjTAeQCpYmUm4JFLU13kiKcfqDLIBwt1oFqoGo+OSyTf8mDFI1ClPgvgKZv17MvtKZmFEx4BdZgUZGmjOK8n3NKuCTGlfJXHjiKrZYwn10nZOI6PJtjgSTuzpRG85k0vQTCpXSKMHsQ8Z7/Ge07pHPovl6NgJHs0PvzJBXI2TdTJu4upGIRhOFoUQy1JapG3wqNFwWgd2E/45R21wOd0lIklUPw2rLoVUEK7Bc4FUrd4l5RpOhYUkX4ggyKNaWMzS5ag0ZEtchmY3jqV8PuvtLBkPkphVgiY8GkE2UPtvT9sduzRxv6f5l+R+Ko4q1NhuOkZ7EiyuQJ7tGfVh9qWDevnhw/VXDOdhc3XczFhlgOIA2SvOtGi4S4ZW6G7RHVTOrLS7X9W7xPQ/v1iXP1D8l8W23yL6ezL+M+/l+K/Vcpot1LLtxjePT7/z+O8Jr/As4++091bj/9Y6/n+e87/Ts7fnFqxvjBE+JLV8Afj4BTXsCWHMMntaEPMFZIGXLQ8mXM9j837BfrrrReCPN/9LGcIznf87zb3O8vxvd5zOev4/y/zPhjzdV5Y1vSmdJkO/WL/0etZ1JFR6jrHY7na6+oidHA/AYdzXEWYTPJPdMvGZ4BC1KuvDLCTWuzA5HNfEXEhOfb+S7L0+No1IZF0LyO2LZ9lAZkUZdDIi3PLAMZmTj0pOWfO9tIcLjvV0K9GSkYs2JWH+ZpIopvEtmpgPPPSFhSMEnRwcoc2YS+wTi3JGOdnsIl8fGlqYQzKsRCRTTt1KYa5JFFBzJCmXelan+mwQWFiuGIlIHW9tV3LoqYi6oMwIcxkCXt4vVA/w1MJDctx29tr7Or/NIv9ePHid5I4LlpB92EjfM5ALZw++/tbeOvlh4e+3UwfvET4DUrZ259+D/y9uPzzX/S+n1V71/85+a+3/n8X/+3hMYbhteCz8wTFqFOqzjQkx7HOh+r6IuZfcpjB7GZEYCCX11bASg0X1E/RYSggbd5G5HlHwSTtI+6QjV8qtWzvA4fbJ/NfCa0hOhvOQD+dD6s9Byjl4w/mIUHc+IYNwrmCBOZkHYXsedPAcY3cuhsN5QD16Mp/gMUA680CMNXIAPzQz6DEgjec+g+pgvL2RCk6mIQUHjQ68bEtuPNxFPiwvclVcgH06mSvlwwfahG8i1HwC3W2d5BwLbv3U7KWatTISzDplTEysdxGFiYnqO+mpViYBlFyI4Sca1vRj+bqXrutDSD5brtNblJSYu3eYz9JKyKRDiNvHhKH9tMrcwUpoFZmqRsj0ibl5hcFIXqYBK93E+lWCDkoVkLRLN6KhKlVHUr5apsVKBKaSBrCKNUCD+ld37Z2f3f8vNrF/xzae8v/tZmvZ/7cOWmv//xwlOf8wR/X6kKUU46fFsZ1uLcEz9+76g9nyIYsppYtLJsy81kTmVCA9ZjH38qpPFIyXymmyXGTleObn9A7Ug82uUBRuYWUUq3eqKuh+sd4TzKw314uWCneLKggWN3RygopbPpUt5aT6rUy4dP/zCfLr5C6ohGEKyBO45qQrO+p6nCtkG4CZDp3OPQpDlV5/y8Yq1e3Srbi0mMtzWVPcq0Yq40UgzqN4ax/+e/l/iNS+SRtP+X+n2V72/06ztd7/eY6SnedNJhPbg5hTYQhoCebJqacIgphTNWsoIZhMTulP0sNou2mHo1B/jn3MJPlxUZ8e2Bv/UPpxrO+nFxFhUlMOkQf3yPTYPO2RClgFCrTBBti9vwTwQ2wynOtcpiETA0hnTS9t4crwLL+u8BDGu5Dw11yWwekdCdvcULgSw/PkwCwRZAkHvv8q1IVOcirhixsTleBp7nfh5QkM4+4rcXTdmfk7gDdhtRT+m/As+ZuAB+B/S/5AoBrq8SshEs5rJ/oHLv8Bd95X1gA4AAA=' | base64 --decode | tee /etc/nginx/nginxconfig.io-example.com.tar.gz > /dev/null
https://www.vultr.com/docs/how-to-compile-nginx-from-source-on-ubuntu-16-04
Install package require:
yum install perl gcc patch -y
wget https://github.com/PCRE2Project/pcre2/releases/download/pcre2-10.45/pcre2-10.45.tar.gz
tar -zxf pcre2-10.45.tar.gz
wget https://zlib.net/fossils/zlib-1.3.1.tar.gz
tar -zxf zlib-1.3.1.tar.gz
wget http://www.openssl.org/source/openssl-1.1.1w.tar.gz
wget https://github.com/openssl/openssl/releases/download/openssl-3.5.5/openssl-3.5.5.tar.gz
tar -zxf openssl-1.1.1w.tar.gz
#or https://github.com/openssl/openssl/releases/download/OpenSSL_1_1_1w/openssl-1.1.1w.tar.gz
wget https://nginx.org/download/nginx-1.29.6.tar.gz
tar -zxf nginx-1.29.6.tar.gz
#Other module if NEED
# wget https://github.com/chobits/ngx_http_proxy_connect_module/archive/refs/tags/v0.0.7.tar.gz
# tar -xvzf v0.0.7.tar.gz
Compile Nginx with ngx_http_proxy_connect_module-0.0.7
cd nginx-1.29.6
patch -p1 < ../ngx_http_proxy_connect_module-0.0.7/patch/proxy_connect_rewrite_102101.patch
./configure --prefix=/etc/nginx --with-pcre=../pcre2-10.45 --with-zlib=../zlib-1.3.1 --with-http_ssl_module --with-http_v2_module --with-stream --add-dynamic-module=../ngx_http_proxy_connect_module-0.0.7 --with-openssl=../openssl-1.1.1w --with-http_realip_module --with-http_stub_status_module --with-stream --with-stream_realip_module --with-stream_ssl_module --with-stream_ssl_preread_module
make
make install
Create systemd file
cat << EOF > /etc/systemd/system/nginx.service
[Unit]
Description=A high performance web server and a reverse proxy server
After=network.target
[Service]
Type=forking
PIDFile=/etc/nginx/logs/nginx.pid
ExecStartPre=/etc/nginx/sbin/nginx -t -q -g 'daemon on; master_process on;'
ExecStart=/etc/nginx/sbin/nginx -g 'daemon on; master_process on;'
ExecReload=/etc/nginx/sbin/nginx -g 'daemon on; master_process on;' -s reload
ExecStop=-/sbin/start-stop-daemon --quiet --stop --retry QUIT/5 --pidfile /run/nginx.pid
TimeoutStopSec=5
KillMode=mixed
[Install]
WantedBy=multi-user.target
EOF
Start and enable NGINX service:
systemctl start nginx.service
systemctl enable nginx.serviceContents
0.1 Cài đặt 1 node Kafka - ZooKeeper
0.2 Cài đặt 3 node Kafka - ZooKeeper
0.3 Cài dặt 1 node Kafka
- Kraf
0.4 Cài dặt 3 node Kafka
- Kraf
0.1 Cài
đặt 1 node Kafka - ZooKeeper
Cài đặt Kafka và zookeeper
apt-get update && sudo apt-get install -y openjdk-17-jdk
curl -L https://dlcdn.apache.org/kafka/3.9.1/kafka_2.13-3.9.1.tgz -o /home/tuanda/kafka.tgz
mkdir -p /home/tuanda/kafka && tar -xvzf /home/tuanda/kafka.tgz --strip 1 -C /home/tuanda/kafka
ln -s
cat << EOF > /etc/systemd/system/zookeeper.service
[Unit]
Requires=network.target remote-fs.target
After=network.target remote-fs.target
[Service]
Type=simple
User=root
ExecStart=/home/tuanda/kafka/bin/zookeeper-server-start.sh /home/tuanda/kafka/config/zookeeper.properties
ExecStop=/home/tuanda/kafka/bin/zookeeper-server-stop.sh
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
EOF
cat << EOF > /etc/systemd/system/kafka.service
[Unit]
Requires=zookeeper.service
After=zookeeper.service
[Service]
Type=simple
User=root
ExecStart=/bin/sh -c '/home/tuanda/kafka/bin/kafka-server-start.sh /home/tuanda/kafka/config/server.properties
> /home/tuanda/kafka/kafka.log 2>&1'
ExecStop=/home/tuanda/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl enable zookeeper
sudo systemctl start zookeeper
sudo systemctl enable kafka
sudo systemctl start kafka
Cài đặt Kafdrop
cd /opt
sudo curl -L https://github.com/obsidiandynamics/kafdrop/releases/download/4.0.2/kafdrop-4.0.2.jar -o /opt/kafdrop-4.0.2.jar
cat << EOF > /etc/systemd/system/kafdrop.service
[Unit]
Description=Web UI for administration of Kafka clusters
Requires=kafka.service
After=kafka.service
[Service]
User=root
WorkingDirectory=/opt/
ExecStart=/usr/bin/java --add-opens=java.base/sun.nio.ch=ALL-UNNAMED -jar kafdrop-4.0.2.jar --kafka.brokerConnect=ubuntu-host:9092
StartLimitInterval=0
RestartSec=10
Restart=always
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable kafdrop.service
sudo systemctl start kafdrop.service
access to http://192.168.88.12:9000/
0.2 Cài
đặt 3 node Kafka - ZooKeeper
Tham khảo:
Cấu hình zookeeper=================
mkdir -p /data/kafka/logs ; chown -R tuanda:tuanda
/data/kafka/logs
mkdir -p /data/zookeeper/logs ; chown -R tuanda:tuanda
/data/zookeeper/logs
root@master01: cat /home/tuanda/kafka/config/zookeeper.properties
dataDir=/data/zookeeper
dataLogDir=/data/zookeeper/logs
clientPort=2181
maxClientCnxns=0
admin.enableServer=false
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.22.212:2888:3888
server.2=192.168.22.213:2888:3888
server.3=192.168.22.214:2888:3888
root@worker01:~# cat /home/tuanda/kafka/config/zookeeper.properties | grep -v '#' | grep -v ^4
dataDir=/data/zookeeper
dataLogDir=/data/zookeeper/logs
clientPort=2181
maxClientCnxns=0
admin.enableServer=false
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.22.212:2888:3888
server.2=192.168.22.213:2888:3888
server.3=192.168.22.214:2888:3888
root@worker02:~# cat /home/tuanda/kafka/config/zookeeper.properties | grep -v '#' | grep -v ^4
dataDir=/data/zookeeper
dataLogDir=/data/zookeeper/logs
clientPort=2181
maxClientCnxns=0
admin.enableServer=false
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.22.212:2888:3888
server.2=192.168.22.213:2888:3888
server.3=192.168.22.214:2888:3888
Chú ý nhớ làm 3 lệnh này trên 3 server. Nếu ko sẽ ko start
đc Zookeeper
echo 1 > /data/zookeeper/myid
echo 2 > /data/zookeeper/myid
echo 3 > /data/zookeeper/myid
Cấu hình Kafka=================
root@master01:/home/tuanda/kafka/config# cat /home/tuanda/kafka/config/server.properties | grep -v '#' | grep -v ^$
broker.id=1
listeners=PLAINTEXT://0.0.0.0:9092
advertised.listeners=PLAINTEXT://192.168.22.212:9092
zookeeper.connect=192.168.22.212:2181,192.168.22.213:2181,192.168.22.214:2181/kafka
log.dirs=/data/kafka/logs
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
root@worker01:~# cat /home/tuanda/kafka/config/server.properties | grep -v '#' | grep -v ^$
broker.id=2
listeners=PLAINTEXT://0.0.0.0:9092
advertised.listeners=PLAINTEXT://192.168.22.213:9092
zookeeper.connect=192.168.22.212:2181,192.168.22.213:2181,192.168.22.214:2181/kafka
log.dirs=/data/kafka/logs
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
root@worker02:~# cat /home/tuanda/kafka/config/server.properties | grep -v '#' | grep -v ^$
broker.id=3
listeners=PLAINTEXT://0.0.0.0:9092
advertised.listeners=PLAINTEXT://192.168.22.214:9092
zookeeper.connect=192.168.22.212:2181,192.168.22.213:2181,192.168.22.214:2181/kafka
log.dirs=/data/kafka/logs
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
sudo systemctl stop zookeeper
sudo systemctl stop kafka
ps aux | grep zookeeper
ps aux | grep kafka
sudo systemctl restart zookeeper
sudo systemctl restart kafka
sudo systemctl start zookeeper
sudo systemctl start kafka
Cấu hình Kafdrop=================
curl -L
https://github.com/obsidiandynamics/kafdrop/releases/download/4.0.2/kafdrop-4.0.2.jar
-o /opt/kafdrop-4.0.2.jar
# cat /etc/systemd/system/kafdrop.service
[Unit]
Description=Web UI for administration of Kafka clusters
Requires=kafka.service
After=kafka.service
[Service]
User=root
WorkingDirectory=/opt/
ExecStart=/usr/bin/java --add-opens=java.base/sun.nio.ch=ALL-UNNAMED -jar kafdrop-4.0.2.jar --kafka.brokerConnect=192.168.22.212:9092,192.168.22.213:9092,192.168.22.24:9092 --server.port=9010 --management.server.port=9010
StartLimitInterval=0
RestartSec=10
Restart=always
[Install]
WantedBy=multi-user.target
sudo systemctl daemon-reload
sudo systemctl enable kafdrop.service
sudo systemctl start kafdrop.service

0.3 Cài dặt 1 node Kafka - Kraf
Hoặc: https://blog.codefarm.me/install-kafka-with-kraft/
adduser kafka
su - kafka
wget https://downloads.apache.org/kafka/3.8.0/kafka_2.13-3.8.0.tgz
tar -xzvf kafka_2.13-3.8.0.tgz
#1. Create cluster IP
KAFKA_CLUSTER_ID=$(bin/kafka-storage.sh random-uuid)
echo $KAFKA_CLUSTER_ID
#2. Format storage
bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties
#3. start test
/home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/kraft/server.propertie
#4. Create systemd
# vim /lib/systemd/system/kafka-server.service
[Unit]
Description=Apache Kafka Service with KRaft Mode
Requires=network.target
After=network.target
[Service]
Type=simple
User=kafka
Group=kafka
ExecStart=/home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/kraft/server.propertie
ExecStop=/home/kafka/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal
Environment="KAFKA_HEAP_OPTS=-Xmx512m -Xms512m"
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
systemctl enable kafka-server
systemctl start kafka-server
0.4 Cài dặt 3 node Kafka - Kraf
URL: https://jainsaket-1994.medium.com/setup-kafka-cluster-with-kraft-561f281b8e2a
https://viblo.asia/p/cai-dat-cluster-kafka-su-dung-kraft-vlZL9AYeLQK
·
if we are configuring controller node, then we need to select /etc/kafka/config/kraft/controller.properties
·
if we are configuring broker node, then we need to select /etc/kafka/config/kraft/broker.properties
·
if we are configuring both broker and controller node, then we need to select
/etc/kafka/config/kraft/server.properties
sudo adduser kafka
mkdir -p /data/kafka
chown -R kafka:kafka /data/kafka
sudo su - kafka
wget https://archive.apache.org/dist/kafka/3.9.0/kafka_2.13-3.9.0.tgz
tar xzf kafka_2.13-3.9.0.tgz
ln -s kafka_2.13-3.9.0 kafka
#vim /home/kafka/kafka/config/kraft/server.properties
node.id=1
process.roles=broker,controller
controller.quorum.voters=1@192.168.22.212:9093,2@192.168.22.213:9093,3@192.168.22.214:9093
listeners=PLAINTEXT://:9092,CONTROLLER://:9093
inter.broker.listener.name=PLAINTEXT
controller.listener.names=CONTROLLER
log.dirs=/data/kafka
#vim /home/kafka/kafka/config/kraft/server.properties
node.id=2
process.roles=broker,controller
controller.quorum.voters=1@192.168.22.212:9093,2@192.168.22.213:9093,3@192.168.22.214:9093
listeners=PLAINTEXT://:9092,CONTROLLER://:9093
inter.broker.listener.name=PLAINTEXT
controller.listener.names=CONTROLLER
log.dirs=/data/kafka
#vim /home/kafka/kafka/config/kraft/server.properties
node.id=3
process.roles=broker,controller
controller.quorum.voters=1@192.168.22.212:9093,2@192.168.22.213:9093,3@192.168.22.214:9093
listeners=PLAINTEXT://:9092,CONTROLLER://:9093
inter.broker.listener.name=PLAINTEXT
controller.listener.names=CONTROLLER
log.dirs=/data/kafka
#Để đảm bảo tính sẵng sàng cho cụm, chỉnh sửa các thông số sau
để dảm bảo rằng tất cả các node trên cụm đều được đồng bộ metadata, tại đây
mình set là 3 vì cụm mình có 3 node và đảm bảo mỗi node đều giữ metadata.
sed -i -e 's/offsets.topic.replication.factor=1/offsets.topic.replication.factor=3/g' /home/kafka/kafka/config/kraft/server.properties
sed -i -e 's/transaction.state.log.replication.factor=1/transaction.state.log.replication.factor=3/g' /home/kafka/kafka/config/kraft/server.properties
sed -i -e 's/transaction.state.log.min.isr=1/transaction.state.log.min.isr=3/g' /home/kafka/kafka/config/kraft/server.properties
# Chỉ gen ID 1 lần
KAFKA_CLUSTER_ID=$(/home/kafka/kafka/bin/kafka-storage.sh random-uuid)
echo $KAFKA_CLUSTER_ID
dNbju0IZTRqaT37XtyTU0Q
# Định dạng (Format) bộ nhớ trên cả 3 node:
/home/kafka/kafka/bin/kafka-storage.sh format -t dNbju0IZTRqaT37XtyTU0Q -c /home/kafka/kafka/config/kraft/server.properties
cat << EOF > /etc/systemd/system/kafka.service
[Unit]
Requires=zookeeper.service
After=zookeeper.service
[Service]
Type=simple
User=kafka
ExecStart=/home/kafka/kafka/bin/kafka-server-start.sh
/home/kafka/kafka/config/kraft/server.properties
ExecStop=/home/kafka/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kafka
service kafka restart
service kafka status
#check kafka cluster
cd /home/kafka/kafka
/home/kafka/kafka/bin/kafka-metadata-quorum.sh --bootstrap-controller 192.168.22.214:9093 describe --status
root@master01:/data/kafka# tree
.
├── __cluster_metadata-0
│ ├── 00000000000000000000.index
│ ├── 00000000000000000000.log
│ ├── 00000000000000000000.timeindex
│ ├── leader-epoch-checkpoint
│ ├── partition.metadata
│ └── quorum-state
├── bootstrap.checkpoint
├── cleaner-offset-checkpoint
├── log-start-offset-checkpoint
├── meta.properties
├── recovery-point-offset-checkpoint
└── replication-offset-checkpoint
0.5 Tuning cho Kafka
#vim /etc/sysctl.conf
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.default.accept_redirects = 0
net.ipv6.conf.all.accept_redirects = 0
net.ipv6.conf.default.accept_source_route = 0
net.ipv6.conf.all.accept_ra = 0
net.ipv6.conf.default.accept_ra = 0
net.ipv6.conf.all.accept_source_route = 0
net.ipv6.conf.all.forwarding = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.ipv4.conf.default.accept_redirects = 0
kernel.randomize_va_space=2
net.ipv4.tcp_syncookies=1
net.ipv4.conf.all.rp_filter=1
net.ipv4.conf.default.rp_filter=1
net.ipv4.conf.default.accept_source_route=0
net.ipv4.icmp_echo_ignore_broadcasts=1
net.ipv4.ip_forward=0
net.ipv4.conf.all.send_redirects=0
net.ipv4.conf.default.send_redirects=0
net.ipv4.tcp_max_orphans=256
net.ipv4.conf.all.log_martians=1
net.ipv4.tcp_keepalive_intvl=25
net.ipv4.tcp_keepalive_probes=5
net.ipv4.tcp_fin_timeout=20
net.ipv4.tcp_max_syn_backlog=8192
net.core.somaxconn=8192
net.core.netdev_max_backlog=8192
net.ipv4.ip_local_port_range=10000 65000
fs.file-max=1000000
net.core.wmem_max=12582912
net.core.rmem_max=12582912
net.ipv4.tcp_rmem=10240 87380 12582912
net.ipv4.tcp_wmem=10240 87380 12582912
net.ipv4.tcp_mem=578522 704696 957044
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_max_tw_buckets=0
fs.suid_dumpable=0
vm.swappiness=1
# vim /etc/security/limits.conf
kafka soft nofile unlimited
kafka hard nofile unlimited
kafka soft nproc unlimited
kafka hard nproc unlimited
root soft nofile 95000
root hard nofile 95000
root soft nproc 95000
root hard nproc 95000
#Disable Transparent Huge Pages
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo never > /sys/kernel/mm/transparent_hugepage/enabled
#These steps for CentOS/RHEL
1. Sử dụng cơ bản
bin/kafka-topics.sh --bootstrap-server localhost:9092 --list
bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic <tên_topic> --partitions 3 --replication-factor 1
bin/kafka-topics.sh --bootstrap-server localhost:9092 --describe --topic <tên_topic>
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic <tên_topic> --from-beginning
bin/kafka-topics.sh --bootstrap-server localhost:9092 --alter --topic <tên_topic> --partitions 5
bin/kafka-configs.sh --bootstrap-server localhost:9092 --alter --entity-type topics --entity-name <tên_topic> --add-config retention.ms=86400000
bin/kafka-topics.sh --delete --topic <tên_topic> --bootstrap-server localhost:9092
bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092
#This is my first event
#This is my second event
bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092
#This is my first event
#This is my second event
2. Security Cho Kafka
2.3 ACL:
Không có nhận xét nào:
Đăng nhận xét