CEPH环境搭建04

本节主要是测试ceph的三种存储方式之一对象存储。

1、创建s3用户

radosgw-admin user create --uid=s3user --display-name=s3user  --system
{
"user_id": "s3user",
"display_name": "s3user",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [],
"keys": [
{
"user": "s3user",
"access_key": "6IUA1DMFDTP5BG9ZMIR8",
"secret_key": "zdoRS2yWL6EsNEBa4xuOSFMPn0lMvPJVMIYZJirP"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"system": "true",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw"
}

2、创建swift用户

sudo radosgw-admin subuser create --uid=s3user --subuser=s3user:swift --access=full
{
"user_id": "s3user",
"display_name": "s3user",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [
{
"id": "s3user:swift",
"permissions": "full-control"
}
],
"keys": [
{
"user": "s3user",
"access_key": "6IUA1DMFDTP5BG9ZMIR8",
"secret_key": "zdoRS2yWL6EsNEBa4xuOSFMPn0lMvPJVMIYZJirP"
}
],
"swift_keys": [
{
"user": "s3user:swift",
"secret_key": "2wou5DxQ6WiBYyHf8qb3QIMX9BnhhBd5Njlj6LJX"
}
],
"caps": [],
"op_mask": "read, write, delete",
"system": "true",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw"
}

3、为swift用户分配key

sudo radosgw-admin key create --subuser=s3user:swift --key-type=swift --gen-secret
{
"user_id": "s3user",
"display_name": "s3user",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [
{
"id": "s3user:swift",
"permissions": "full-control"
}
],
"keys": [
{
"user": "s3user",
"access_key": "6IUA1DMFDTP5BG9ZMIR8",
"secret_key": "zdoRS2yWL6EsNEBa4xuOSFMPn0lMvPJVMIYZJirP"
}
],
"swift_keys": [
{
"user": "s3user:swift",
"secret_key": "itNAiqvaLSvixsd7aPMb1fs7F3M4zxuPjt3C7dd4"
}
],
"caps": [],
"op_mask": "read, write, delete",
"system": "true",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw"
}

4、通过s3接口新增一个bucket

sudo apt-get install python-boto

vi s3test.py

import boto.s3.connection

access_key = '6IUA1DMFDTP5BG9ZMIR8'
secret_key = 'zdoRS2yWL6EsNEBa4xuOSFMPn0lMvPJVMIYZJirP'

conn = boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host='ceph01', port=7480,
is_secure=False, calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)

bucket = conn.create_bucket('my-new-bucket')
for bucket in conn.get_all_buckets():
print "{name} {created}".format(
name=bucket.name,
created=bucket.creation_date,
)

5、通过swift接口查询bucket

sudo apt-get install python-pip
sudo pip install --upgrade setuptools
sudo pip install --upgrade python-swiftclient

swift -V 1 -A http://172.16.172.101:7480/auth -U s3user:swift -K 'itNAiqvaLSvixsd7aPMb1fs7F3M4zxuPjt3C7dd4' list

CEPH环境搭建03

本节主要是测试ceph的三种存储方式之一块存储。

1、创建存储池以及rbd

sudo ceph osd pool ls
sudo ceph osd pool create rbd

sudo rados df

sudo rbd ls
sudo rbd create --size 1024 rbd/r1

2、将映像映射到块设备

#直接映射会报一个错
#sudo rbd map r1
#rbd: sysfs write failed
#RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable r1 object-map fast-diff deep-flatten".
#In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address

#修正错误,重新映射
sudo rbd feature disable r1 object-map fast-diff deep-flatten
sudo rbd map r1
/dev/rbd0

#查看映射情况
sudo rbd showmapped
id pool image snap device
0  rbd  r1    -    /dev/rbd0

3、初始化块设备

#fdis初始化设备
sudo fdisk -l /dev/rbd0

#格式化分区为ext4
sudo mkfs.ext4 -m0 /dev/rbd0

#挂载块设备
sudo mkdir -p /mnt/rbd/r1
sudo mount -t ext4 /dev/rbd0 /mnt/rbd/r1

4、进行一些基本操作

sudo ls /mnt/rbd/r1

sudo vi /mnt/rbd/r1/hi.txt

sudo cat /mnt/rbd/r1/hi.txt
hello rbd

CEPH环境搭建02

本节主要是将存储设备加入ceph进行管理,并测试ceph的三种存储方式之一cephfs。

1、查看设备状态

sudo ceph osd status
ID  HOST     USED  AVAIL  WR OPS  WR DATA  RD OPS  RD DATA  STATE
0  ceph01  1027M   298G      0        0       0        0   exists,up
1  ceph02  1027M   298G      0        0       0        0   exists,up
2  ceph03  1027M   298G      0        0       0        0   exists,up
3  ceph04  1027M   298G      0        0       0        0   exists,up

sudo ceph orch device ls
HOST    PATH      TYPE   SIZE  DEVICE                             AVAIL  REJECT REASONS
ceph01  /dev/sdb  hdd    300G  VBOX_HARDDISK_VB434b1565-528a303a  True
ceph01  /dev/sda  hdd    300G  VBOX_HARDDISK_VB3eec2162-9aed4ffc  False  locked
ceph02  /dev/sdb  hdd    300G  VBOX_HARDDISK_VBa6445865-c497aa8e  True
ceph02  /dev/sda  hdd    300G  VBOX_HARDDISK_VB64e04201-60c7209f  False  locked
ceph03  /dev/sdb  hdd    300G  VBOX_HARDDISK_VB20fd0c04-b14ef3fa  True
ceph03  /dev/sda  hdd    300G  VBOX_HARDDISK_VB6f4439ab-85f80c78  False  locked
ceph04  /dev/sdb  hdd    300G  VBOX_HARDDISK_VB2c293541-3183e992  True
ceph04  /dev/sda  hdd    300G  VBOX_HARDDISK_VBd81d45d4-a88d6ff3  False  locked

2、存储设备加入osd管理

sudo ceph orch apply osd --all-available-devices
Scheduled osd update...

sudo ceph orch device ls --refresh
HOST    PATH      TYPE   SIZE  DEVICE                             AVAIL  REJECT REASONS
ceph01  /dev/sda  hdd    300G  VBOX_HARDDISK_VB3eec2162-9aed4ffc  False  locked
ceph01  /dev/sdb  hdd    300G  VBOX HARDDISK_VB434b1565-528a303a  False  LVM detected, Insufficient space (<5GB) on vgs, locked
ceph02  /dev/sda  hdd    300G  VBOX_HARDDISK_VB64e04201-60c7209f  False  locked
ceph02  /dev/sdb  hdd    300G  VBOX HARDDISK_VBa6445865-c497aa8e  False  LVM detected, locked, Insufficient space (<5GB) on vgs
ceph03  /dev/sda  hdd    300G  VBOX_HARDDISK_VB6f4439ab-85f80c78  False  locked
ceph03  /dev/sdb  hdd    300G  VBOX HARDDISK_VB20fd0c04-b14ef3fa  False  locked, Insufficient space (<5GB) on vgs, LVM detected
ceph04  /dev/sda  hdd    300G  VBOX_HARDDISK_VBd81d45d4-a88d6ff3  False  locked
ceph04  /dev/sdb  hdd    300G  VBOX HARDDISK_VB2c293541-3183e992  False  LVM detected, locked, Insufficient space (<5GB) on vgs

3、查看osd状态

sudo ceph osd df
ID  CLASS  WEIGHT   REWEIGHT  SIZE     RAW USE  DATA     OMAP  META   AVAIL    %USE  VAR   PGS  STATUS
0    hdd  0.29300   1.00000  300 GiB  1.0 GiB  3.4 MiB   0 B  1 GiB  299 GiB  0.33  1.00    0      up
1    hdd  0.29300   1.00000  300 GiB  1.0 GiB  3.4 MiB   0 B  1 GiB  299 GiB  0.33  1.00    1      up
2    hdd  0.29300   1.00000  300 GiB  1.0 GiB  3.4 MiB   0 B  1 GiB  299 GiB  0.33  1.00    1      up
3    hdd  0.29300   1.00000  300 GiB  1.0 GiB  3.4 MiB   0 B  1 GiB  299 GiB  0.33  1.00    1      up
TOTAL  1.2 TiB  4.0 GiB   14 MiB   0 B  4 GiB  1.2 TiB  0.33
MIN/MAX VAR: 1.00/1.00  STDDEV: 0
neohope@ceph01:~$ sudo ceph osd utilization
avg 0.75
stddev 0.433013 (expected baseline 0.75)
min osd.0 with 0 pgs (0 * mean)
max osd.1 with 1 pgs (1.33333 * mean)
neohope@ceph01:~$ sudo ceph osd pool stats
pool device_health_metrics id 1
nothing is going on

sudo ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME        STATUS  REWEIGHT  PRI-AFF
-1         1.17200  root default
-3         0.29300      host ceph01
0    hdd  0.29300          osd.0        up   1.00000  1.00000
-5         0.29300      host ceph02
1    hdd  0.29300          osd.1        up   1.00000  1.00000
-7         0.29300      host ceph03
2    hdd  0.29300          osd.2        up   1.00000  1.00000
-9         0.29300      host ceph04
3    hdd  0.29300          osd.3        up   1.00000  1.00000

sudo ceph pg stat
1 pgs: 1 active+clean; 0 B data, 14 MiB used, 1.2 TiB / 1.2 TiB avail

4、创建cephfs

sudo ceph fs volume ls
[]
sudo ceph fs volume create  v1
sudo ceph fs volume ls
[
{
"name": "v1"
}
]

sudo ceph fs subvolumegroup create v1 g1
sudo ceph fs subvolumegroup ls v1

sudo ceph fs subvolume create v1 sv1
sudo ceph fs subvolume ls v1

sudo ceph fs ls
name: v1, metadata pool: cephfs.v1.meta, data pools: [cephfs.v1.data ]

5、挂载cephfs

sudo apt-get insatll ceph-fuse

# 挂载cephfs
sudo mkdir -p /mnt/ceph/ceph_fuse
sudo ceph-fuse /mnt/ceph/ceph_fuse
ceph-fuse[24512]: starting ceph client
2020-05-18 05:57:36.039818 7f7d221a2500 -1 init, newargv = 0x559708e0e2e0 newarg                                                                                                             c=9
ceph-fuse[24512]: starting fuse

# 查看挂载情况
sudo mount | grep ceph
ceph-fuse on /mnt/ceph/ceph_fuse type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)

# 可以当作本地磁盘做一些日常操作
sudo ls /mnt/ceph/ceph_fuse/
volumes
sudo ls /mnt/ceph/ceph_fuse/volumes
g1  _nogroup
sudo ls /mnt/ceph/ceph_fuse/volumes/g1

sudo vi /mnt/ceph/ceph_fuse/volumes/g1/hi.txt
sudo cat /mnt/ceph/ceph_fuse/volumes/g1/hi.txt
hello ceph fuse

CEPH环境搭建01

1、初始环境

准备四个节点(每个节点的hosts和hostname要进行相应修改):

ceph-0001 172.16.172.101
ceph-0002 172.16.172.102
ceph-0003 172.16.172.103
ceph-0004 172.16.172.104

每个节点都执行:

sudo apt-get update
sudo apt-get install docker.io

2、在主节点安装cephadmin

#官方推荐的方法有问题
#sudo ./cephadm add-repo --release octopus
#The key(s) in the keyring /etc/apt/trusted.gpg.d/ceph.release.gpg are ignored as the file has an unsupported filetype.

sudo rm /etc/apt/trusted.gpg.d/ceph.release.gpg
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
echo deb http://download.ceph.com/debian-octopus/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
sudo apt-get update

sudo ./cephadm install

sudo cephadm install ceph-common

3、初始化

sudo mkdir -p /etc/ceph

sudo cephadm bootstrap --mon-ip 172.16.172.101 --allow-overwrite
INFO:cephadm:Verifying podman|docker is present...
INFO:cephadm:Verifying lvm2 is present...
INFO:cephadm:Verifying time synchronization is in place...
INFO:cephadm:Unit systemd-timesyncd.service is enabled and running
INFO:cephadm:Repeating the final host check...
INFO:cephadm:podman|docker (/usr/bin/docker) is present
INFO:cephadm:systemctl is present
INFO:cephadm:lvcreate is present
INFO:cephadm:Unit systemd-timesyncd.service is enabled and running
INFO:cephadm:Host looks OK
INFO:root:Cluster fsid: 7bffaaf6-9688-11ea-ac24-080027b4217f
INFO:cephadm:Verifying IP 172.16.172.101 port 3300 ...
INFO:cephadm:Verifying IP 172.16.172.101 port 6789 ...
INFO:cephadm:Mon IP 172.16.172.101 is in CIDR network 172.16.172.0/24
INFO:cephadm:Pulling latest docker.io/ceph/ceph:v15 container...
INFO:cephadm:Extracting ceph user uid/gid from container image...
INFO:cephadm:Creating initial keys...
INFO:cephadm:Creating initial monmap...
INFO:cephadm:Creating mon...
INFO:cephadm:Waiting for mon to start...
INFO:cephadm:Waiting for mon...
INFO:cephadm:Assimilating anything we can from ceph.conf...
INFO:cephadm:Generating new minimal ceph.conf...
INFO:cephadm:Restarting the monitor...
INFO:cephadm:Setting mon public_network...
INFO:cephadm:Creating mgr...
INFO:cephadm:Wrote keyring to /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Wrote config to /etc/ceph/ceph.conf
INFO:cephadm:Waiting for mgr to start...
INFO:cephadm:Waiting for mgr...
INFO:cephadm:mgr not available, waiting (1/10)...
INFO:cephadm:mgr not available, waiting (2/10)...
INFO:cephadm:mgr not available, waiting (3/10)...
INFO:cephadm:Enabling cephadm module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 5...
INFO:cephadm:Setting orchestrator backend to cephadm...
INFO:cephadm:Generating ssh key...
INFO:cephadm:Wrote public SSH key to to /etc/ceph/ceph.pub
INFO:cephadm:Adding key to root@localhost's authorized_keys...
INFO:cephadm:Adding host ceph01...
INFO:cephadm:Deploying mon service with default placement...
INFO:cephadm:Deploying mgr service with default placement...
INFO:cephadm:Deploying crash service with default placement...
INFO:cephadm:Enabling mgr prometheus module...
INFO:cephadm:Deploying prometheus service with default placement...
INFO:cephadm:Deploying grafana service with default placement...
INFO:cephadm:Deploying node-exporter service with default placement...
INFO:cephadm:Deploying alertmanager service with default placement...
INFO:cephadm:Enabling the dashboard module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 13...
INFO:cephadm:Generating a dashboard self-signed certificate...
INFO:cephadm:Creating initial admin user...
INFO:cephadm:Fetching dashboard port number...
INFO:cephadm:Ceph Dashboard is now available at:

URL: https://localhost:8443/
User: admin
Password: mdbewc14gq

INFO:cephadm:You can access the Ceph CLI with:

sudo /usr/sbin/cephadm shell --fsid 7bffaaf6-9688-11ea-ac24-080027b4217f -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

INFO:cephadm:Please consider enabling telemetry to help improve Ceph:

ceph telemetry on

For more information see:

https://docs.ceph.com/docs/master/mgr/telemetry/

INFO:cephadm:Bootstrap complete.

4、此时可以通过最后给出的信息,登录管理页面了

5、修改配置文件

sudo vi /etc/ceph/ceph.conf

[global]
fsid = a4547d9d-f1a1-4753-b5cc-df0e043ebc65
mon_initial_members = ceph01
#原本生成的mon_host好像有些问题
#mon_host = [v2:ceph01:3300/0,v1:ceph:6789/0]
mon_host = 172.16.172.101
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 172.16.172.0/24

6、查看ceph状态

sudo ceph status
cluster:
id:     7bffaaf6-9688-11ea-ac24-080027b4217f
health: HEALTH_WARN
Reduced data availability: 1 pg inactive
OSD count 0 < osd_pool_default_size 3

services:
mon: 1 daemons, quorum ceph01 (age 35m)
mgr: ceph01.lreqdw(active, since 33m)
osd: 0 osds: 0 up, 0 in

data:
pools:   1 pools, 1 pgs
objects: 0 objects, 0 B
usage:   0 B used, 0 B / 0 B avail
pgs:     100.000% pgs unknown
1 unknown

7、三个子节点做好准备

#在ceph01
#将ceph.pub拷贝到其他三个节点
scp /etc/ceph/ceph.pub  neohope@ceph02:~/authorized_keys

#在ceph02
#启用root
sudo passwd -u root
#设置好root的authorized_keys
mv authorized_keys  /root/.ssh/
cd /root/.ssh/
chown root:root authorized_keys
chmod 0600 authorized_keys
#允许root ssh登录
sudo sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
sudo service ssh restart

#在ceph01
#获取私钥
sudo ceph config-key get mgr/cephadm/ssh_identity_key > ceph.pem
chmod 0600 ceph.pem
#测试root登录
ssh  -i ceph.pem root@ceph02

8、三个子节点加入网络

sudo ceph orch host add ceph02
Added host 'ceph02'

sudo ceph orch host add ceph03
Added host 'ceph03'

sudo ceph orch host add ceph04
Added host 'ceph04'

sudo ceph orch host ls
HOST    ADDR    LABELS  STATUS
ceph01  ceph01
ceph02  ceph02
ceph03  ceph03
ceph04  ceph04

9、设置mon

ceph orch apply mon 4
ceph orch apply mon ceph01,ceph02,ceph03,ceph04
sudo ceph status