欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 健康 > 养生 > Ubuntu2204搭建ceph17

Ubuntu2204搭建ceph17

2024/10/24 16:31:40 来源:https://blog.csdn.net/m0_56363537/article/details/137840425  浏览:    关键词:Ubuntu2204搭建ceph17

Ceph

    • 环境初始化
    • 搭建Ceph

本次实验基于VMware17

节点IP
storage01192.168.200.161
storage01192.168.200.162
storage01192.168.200.163

环境初始化

初始化基础环境,三节点执行

#!/bin/bash# 定义节点信息
NODES=("192.168.200.161 storage01 root" "192.168.200.162 storage02 root" "192.168.200.163 storage03 root")# 定义当前节点的密码(默认集群统一密码)
HOST_PASS="000000"# 时间同步的目标节点
TIME_SERVER=storage01# 时间同步的地址段
TIME_SERVER_IP=192.160.200.0/24# 欢迎界面
cat > /etc/motd <<EOF#################################    Welcome  to  openstack    #################################
EOF# 修改主机名
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')# 获取当前节点的主机名和 IPcurrent_ip=$(hostname -I | awk '{print $1}')current_hostname=$(hostname)# 检查当前节点与要修改的节点信息是否匹配if [[ "$current_ip" == "$ip" && "$current_hostname" != "$hostname" ]]; thenecho "Updating hostname to $hostname on $current_ip..."hostnamectl set-hostname "$hostname"if [ $? -eq 0 ]; thenecho "Hostname updated successfully."elseecho "Failed to update hostname."fibreakfi
done# 遍历节点信息并添加到 hosts 文件
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')# 检查 hosts 文件中是否已存在相应的解析if grep -q "$ip $hostname" /etc/hosts; thenecho "Host entry for $hostname already exists in /etc/hosts."else# 添加节点的解析条目到 hosts 文件sudo sh -c "echo '$ip $hostname' >> /etc/hosts"echo "Added host entry for $hostname in /etc/hosts."fi
doneif [[ ! -s ~/.ssh/id_rsa.pub ]]; thenssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi# 检查并安装 sshpass 工具
if ! which sshpass &> /dev/null; thenecho "sshpass 工具未安装,正在安装 sshpass..."sudo apt-get install -y sshpass
fi# 遍历所有节点进行免密操作
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')user=$(echo "$node" | awk '{print $3}')# 使用 sshpass 提供密码,并自动确认密钥sshpass -p "$HOST_PASS" ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.pub "$user@$hostname"
done# 时间同步
apt install -y chrony
if [[ $TIME_SERVER_IP == *$(hostname -I)* ]]; then# 配置当前节点为时间同步源sed -i '20,23s/^/#/g' /etc/chrony/chrony.confecho "server $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.confecho "allow $TIME_SERVER_IP" >> /etc/chrony/chrony.confecho "local stratum 10" >> /etc/chrony/chrony.conf
else# 配置当前节点同步到目标节点sed -i '20,23s/^/#/g' /etc/chrony/chrony.confecho "pool $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.conf
fi# 重启并启用 chrony 服务
systemctl restart chronyd
systemctl enable chronydecho "###############################################################"
echo "#################      集群初始化成功     #####################"
echo "###############################################################"

搭建Ceph

配置离线源

tar zxvf ceph_quincy.tar.gz -C /opt/cp /etc/apt/sources.list{,.bak}cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph_quincy/debs/
EOFapt-get clean all
apt-get update

配置时间同步

# 可配置开启
timedatectl set-ntp true# 配置上海时区
timedatectl set-timezone Asia/Shanghai# 系统时钟与硬件时钟同步
hwclock --systohc

所有节点安装docker

apt -y install docker-ce

01节点安装cephadm和ceph工具

apt install -y cephadm ceph-common

所有节点导入镜像

docker load -i cephadm_images_v17.tar

01节点配置仓库

# 导入镜像
docker load -i registry.tar# 启动
docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef

所有节点配置地址

cat >> /etc/docker/daemon.json << EOF
{
"insecure-registries":["192.168.200.161:5000"]
}
EOFsystemctl daemon-reload
systemctl restart docker

01节点推送

docker tag 0912465dcea5 192.168.200.161:5000/ceph:v17
docker push 192.168.200.161:5000/ceph:v17
cd /etc/ceph

01节点初始化集群

cephadm --image 192.168.200.161:5000/ceph:v17 bootstrap --mon-ip 192.168.200.161 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull

修改HTTPS端口号(往下选做)

ceph config set mgr mgr/dashboard/ssl_server_port 5050

关闭dashboard证书认证

ceph config set mgr mgr/dashboard/ssl false 

指定 dashboard 监听地址

ceph config set mgr mgr/dashboard/server_addr 0.0.0.0

指定 dashboard 监听端口

ceph config set mgr mgr/dashboard/server_port 5050

重启dashboard模块生效(往上选做)

ceph mgr module disable dashboardceph mgr module enable dashboard

加入集群

ssh-copy-id -f -i /etc/ceph/ceph.pub storage02ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
ceph orch host add storage02ceph orch host add storage03

查看集群

root@storage01:/etc/ceph# ceph -scluster:id:     4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_WARN1 stray daemon(s) not managed by cephadmOSD count 0 < osd_pool_default_size 3services:mon: 2 daemons, quorum storage02,storage03 (age 10m)mgr: storage03.lnyuay(active, since 68s)osd: 0 osds: 0 up, 0 indata:pools:   0 pools, 0 pgsobjects: 0 objects, 0 Busage:   0 B used, 0 B / 0 B availpgs:root@storage01:/etc/ceph#

界面访问:IP:8443

有问题可删除集群

cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force

查看可用磁盘设备

ceph orch device ls
root@storage01:/etc/ceph# ceph orch device ls
HOST       PATH      TYPE  DEVICE ID   SIZE  AVAILABLE  REFRESHED  REJECT REASONS
storage01  /dev/sdb  hdd               107G  Yes        19m ago
storage02  /dev/sdb  hdd               107G  Yes        12m ago
storage03  /dev/sdb  hdd               107G  Yes        12m ago

创建OSD

ceph orch daemon add osd storage01:/dev/sdb
ceph orch daemon add osd storage02:/dev/sdb
ceph orch daemon add osd storage03:/dev/sdb

查看验证

root@storage01:/etc/ceph# ceph -scluster:id:     4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_OKservices:mon: 3 daemons, quorum storage03,storage01,storage02 (age 54s)mgr: storage01.gitwte(active, since 110s)osd: 3 osds: 3 up (since 5m), 3 in (since 5m)data:pools:   1 pools, 1 pgsobjects: 2 objects, 449 KiBusage:   62 MiB used, 300 GiB / 300 GiB availpgs:     1 active+cleanroot@storage01:/etc/ceph# ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL    USED  RAW USED  %RAW USED
hdd    300 GiB  300 GiB  62 MiB    62 MiB       0.02
TOTAL  300 GiB  300 GiB  62 MiB    62 MiB       0.02--- POOLS ---
POOL  ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr   1    1  449 KiB        2  1.3 MiB      0     95 GiB
root@storage01:/etc/ceph#

CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据

ceph osd pool create cephfs-metadata 16 16ceph osd pool create cephfs-data 32 32ceph fs new cephfs cephfs-metadata cephfs-dataceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"root@storage01:/etc/ceph# ceph -scluster:id:     4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_OKservices:mon: 3 daemons, quorum storage03,storage02,storage01 (age 10s)mgr: storage01.gitwte(active, since 10m)mds: 1/1 daemons up, 2 standbyosd: 3 osds: 3 up (since 14m), 3 in (since 14m)rgw: 3 daemons active (3 hosts, 1 zones)data:volumes: 1/1 healthypools:   7 pools, 177 pgsobjects: 216 objects, 457 KiBusage:   104 MiB used, 300 GiB / 300 GiB availpgs:     177 active+cleanroot@storage01:/etc/ceph#

存储对象存储

ceph orch apply rgw myorg cn-east-1 --placement="3 storage01 storage02 storage03"
root@storage01:/etc/ceph# ceph orch ls
NAME                       PORTS        RUNNING  REFRESHED  AGE  PLACEMENT
alertmanager               ?:9093,9094      1/1  47s ago    36m  count:1
crash                                       3/3  3m ago     36m  *
grafana                    ?:3000           1/1  47s ago    36m  count:1
mds.cephfs                                  3/3  3m ago     5m   storage01;storage02;storage03;count:3
mgr                                         1/1  47s ago    11m  storage01
mon                                         3/1  3m ago     55s  storage01
node-exporter              ?:9100           3/3  3m ago     36m  *
osd.all-available-devices                     3  3m ago     15m  *
prometheus                 ?:9095           1/1  47s ago    36m  count:1
rgw.myorg                  ?:80             3/3  3m ago     4m   storage01;storage02;storage03;count:3
root@storage01:/etc/ceph#

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com