ceph RBD block storage general operation

Keywords: Ceph Kubernetes

View pools
Create a pool containing 64 PGs and 64 PGPS      

[root@node-1 ~]# ceph osd pool create ceph-demo 64  64

pool 'ceph-demo' created

[root@node-1 ~]# ceph osd lspools

1 ceph-demo

View pg and pgp

[root@node-1 ~]# ceph osd pool get ceph-demo pg_num
pg_num: 64
[root@node-1 ~]# ceph osd pool get ceph-demo pgp_num
pgp_num: 64
#The number of copies is 3 by default
[root@node-1 ~]# ceph osd pool get ceph-demo size
size: 3

Adjust pg and pgp

[root@node-1 ~]# ceph osd pool set ceph-demo pg_num 128
set pool 1 pg_num to 128
[root@node-1 ~]# ceph osd pool set ceph-demo pgp_num 128
set pool 1 pgp_num to 128
see
[root@node-1 ~]# ceph -s
  cluster:
    id:     c16b9145-7216-4058-8bfb-c9b7b2b702de
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node-1,node-2,node-3 (age 2w)
    mgr: node-1(active, since 2w), standbys: node-2, node-3
    osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
 
  data:
    pools:   1 pools, 128 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 147 GiB / 150 GiB avail
    pgs:     128 active+clean
 

Create rbd

 rbd create -p ceph-demo --image rbd-demo.img --size 10G
 see
[root@node-1 ~]# rbd -p ceph-demo ls
rbd-demo.img

 
[root@node-1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 1143ee2e8a3a
        block_name_prefix: rbd_data.1143ee2e8a3a
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Tue Nov 17 14:57:35 2020
        access_timestamp: Tue Nov 17 14:57:35 2020
        modify_timestamp: Tue Nov 17 14:57:35 2020


delete rbd
rbd rm -p ceph-demo --image rbd-demo.img

Mounting block equipment    
Disable new features, 3.10 Kernel does not support    
rbd feature disable ceph-demo/rbd-demo.img deep-flatten
rbd feature disable ceph-demo/rbd-demo.img fast-diff
rbd feature disable ceph-demo/rbd-demo.img object-map
rbd feature disable ceph-demo/rbd-demo.img exclusive-lock    

see
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 1143ee2e8a3a
        block_name_prefix: rbd_data.1143ee2e8a3a
        format: 2
        features: layering
        op_features: 
        flags: 
        create_timestamp: Tue Nov 17 14:57:35 2020
        access_timestamp: Tue Nov 17 14:57:35 2020
        modify_timestamp: Tue Nov 17 14:57:35 2020
        
establish
 establish
[root@node-1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
 see
[root@node-1 ~]# rbd device list
id pool      namespace image        snap device    
0  ceph-demo           rbd-demo.img -    /dev/rbd0 

[root@node-1 ~]# fdisk -l
Disk /dev/rbd0: 10.7 GB, 10737418240 bytes, 20971520 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes

Then you can format and partition
mkfs.ext4 /dev/rbd0
lsblk
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo
df -h

Capacity expansion      

 Current 10 G
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 1143ee2e8a3a
        block_name_prefix: rbd_data.1143ee2e8a3a
        format: 2
        features: layering
        op_features: 
        flags: 
        create_timestamp: Tue Nov 17 14:57:35 2020
        access_timestamp: Tue Nov 17 14:57:35 2020
        modify_timestamp: Tue Nov 17 14:57:35 2020
        
[root@node-1 ~]# rbd resize ceph-demo/rbd-demo.img --size 20G
Resizing image: 100% complete...done.

After capacity expansion, it becomes 20 G
[root@node-1 ~]# rbd -p ceph-demo info rbd-demo.img
rbd image 'rbd-demo.img':
        size 20 GiB in 5120 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 1143ee2e8a3a
        block_name_prefix: rbd_data.1143ee2e8a3a
        format: 2
        features: layering
        op_features: 
        flags: 
        create_timestamp: Tue Nov 17 14:57:35 2020
        access_timestamp: Tue Nov 17 14:57:35 2020
        modify_timestamp: Tue Nov 17 14:57:35 2020
 However, it only expands the underlying layer, but does not expand the disk partition
resize2fs /dev/rbd0

Error is not recommended for volume reduction, which is easy to cause data loss

Alarm troubleshooting

Health status details
[root@node-1 ~]# ceph health detail
HEALTH_WARN application not enabled on 1 pool(s)
POOL_APP_NOT_ENABLED application not enabled on 1 pool(s)
    application not enabled on pool 'ceph-demo'
   
 use 'ceph osd pool application enable <pool-name> 
<app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or 
freeform for custom applications.
    A resource pool is not enabled application,The solution is to enable and specify the application type
 solve
[root@node-1 ~]# ceph osd pool application enable ceph-demo rbd
enabled application 'rbd' on pool 'ceph-demo'

[root@node-1 ~]# ceph osd pool application get ceph-demo
{
    "rbd": {}
}

View status

[root@node-1 ~]# ceph -s
  cluster:
    id:     c16b9145-7216-4058-8bfb-c9b7b2b702de
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node-1,node-2,node-3 (age 2w)
    mgr: node-1(active, since 2w), standbys: node-2, node-3
    osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
 
  data:
    pools:   1 pools, 128 pgs
    objects: 4 objects, 35 B
    usage:   3.0 GiB used, 147 GiB / 150 GiB avail
    pgs:     128 active+clean

Ceph cluster maintenance

1. Physical machine shutdown maintenance

  • Migrate relevant virtual machines / services to other nodes (openstack and other nodes need to migrate virtual machines);

Perform operations on MON or nodes with admin permission:

#   Set OSD lock
for i in noout nobackfill norecovery;do ceph osd set $i;done
 #   After the machine maintenance is completed, power up and resynchronize the data
for i in noout nobackfill norecovery;do ceph osd unset $i;done

2. Control Ceph with systemctl

# start/stop all ceph-mds@.service instances at once
systemctl start/stop/restart ceph-mds.target
# start/stop all ceph-mgr@.service instances at once
systemctl start/stop/restart ceph-mgr.target
# start/stop all ceph-mon@.service instances at once
systemctl start/stop/restart ceph-mon.target
# start/stop all ceph-osd@.service instances at once
systemctl start/stop/restart ceph-osd.target
# start/stop all ceph-radosgw@.service instances at once
systemctl start/stop/restart ceph-radosgw.target
# start/stop all ceph*@.service instances at once
systemctl start/stop/restart ceph.target
 

3. Ceph common monitoring commands

#  Check cluster status
ceph health
ceph status
ceph quorum_status
ceph mon_status
#  Observe what is happening in the cluster
ceph -w/-s
 
#  Check cluster usage
ceph df
#  inspect   OSD   state
ceph osd stat
ceph osd dump
#  Print   CRUSH   tree
ceph osd tree
 
#  inspect   Mon   state
ceph mon stat
ceph mon dump
#  Check MON quorum status
ceph quorum_status -f json-pretty
 
#  inspect   MDS   state
ceph mds stat
ceph mds dump
-----------------------------------

# Monitoring PG
ceph pg dump
# View the OSD contained in the Acting Set or Up Set of the specified PG
ceph pg map {pg-num}

View the monitoring status of the machine

ceph health1
View the real-time running status of ceph
ceph -w1

Check status information

ceph -s1

View ceph storage

ceph df1

Delete all ceph packets of a node

ceph-deploy purge node1
ceph-deploy purgedata node112

Create administrative user
Create an admin user for ceph and a key for the admin user. Save the key to the / etc/ceph Directory:

ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' > /etc/ceph/ceph.client.admin.keyring
or
ceph auth get-or-create client.admin mds 'allow' osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.admin.keyring123

Create a user and a key for osd.0

ceph auth get-or-create osd.0 mon 'allow rwx' osd 'allow *' -o /var/lib/ceph/osd/ceph-0/keyring1
 

Create a user and a key for mds.node1

ceph auth get-or-create mds.node1 mon 'allow rwx' osd 'allow *' mds 'allow *' -o /var/lib/ceph/mds/ceph-node1/keyring1

View the authenticated users and related key s in the ceph cluster

ceph auth list1

Delete an authenticated user in the cluster

ceph auth del osd.01

View the detailed configuration of the cluster
ceph daemon mon.node1 config show | more1

View cluster health status details

ceph health detail1

View the directory where the ceph log is located

ceph-conf --name mon.node1 --show-config-value log_file1

mon command

ceph mon stat#View mon status information

ceph mon dump#View ceph mapping information

ceph mon remove node1 #Delete a mon node    ceph-deploy   mon   destroy   {host-name   [host-name]...}

ceph mon add node1 node1_ip #Add a mon node    ceph-deploy   mon   create   {host-name   [host-name]...}  

mon Nodal/var/lib/ceph/mon/ceph-node2/store.db Consistent file content, add mon Note: change the configuration directory configuration file first, and then push it to all nodes

ceph-deploy --overwrite-conf config push  node1 node2 node3

mds command

ceph mds stat #View msd status

ceph mds dump #Mapping information for msd

ceph mds rm 0 mds.node1#Delete an mds node

ceph-deploy mds create {host-name}[:{daemon-name}] [{host-name}[:{daemon-name}] ...]

osd command

ceph osd stat #View osd status

ceph osd dump #osd mapping information

ceph osd tree#View osd tree

ceph osd down 0   #down the osd.0 node

ceph osd rm 0#Delete an osd hard disk from the cluster

ceph osd crush remove osd.4#Delete tag

ceph osd getmaxosd#View the maximum number of OSDs

ceph osd setmaxosd 10#Set the number of OSDs

ceph osd out osd.3#Evict an osd node from the cluster

ceph osd in osd.3#Add the expelled osd to the cluster

ceph osd pause#Pause osd   (the whole cluster will no longer receive data after suspension)

ceph osd unpause#Turn on osd again   (receive data again after opening)

pg command

ceph pg stat#View pg status

ceph pg dump#View the mapping information of pg group

ceph pg map 0.3f#View a pg map

ceph pg  0.26 query#View pg details

ceph pg dump --format plain#Displays all pg statistics in a cluster

osd command

ceph osd lspools#View the number of pool s in the ceph cluster

ceph osd pool create jiayuan 100#Create a pool    100 here refers to PG group

ceph osd pool delete jiayuan  jiayuan  --yes-i-really-really-mean-it  #The cluster name needs to be repeated twice

rados df#Displays the details of the pool in the cluster

ceph osd pool get data pg_num  #View the pg quantity of the data pool

ceph osd pool set data target_max_bytes 100000000000000#Set the maximum storage space of the data pool to 100T (1t by default)

ceph osd pool set data size 3  #Set the number of replicas of the data pool to 3

ceph osd pool set data min_size 2 #Set the minimum replica that the data pool can accept write operations to 2

ceph osd pool set data pg_num 100#Set the pg quantity of a pool

ceph osd pool set data pgp_num 100#Set the number of PGPS in a pool

rados and rbd commands

rados lspools#Check how many pools are in the ceph cluster   (just check the pool)

rados df #Check the number of pools in the ceph cluster, and the capacity and utilization of each pool

rados mkpool test#Create a pool

rados create test-object -p test#Create an object  

rados rm test-object-1 -p test#Delete an object  

rados -p test ls

rbd ls pool_name#View all images in a pool in ceph

rbd info -p pool_name --image 74cb427c-cee9-47d0-b467-af217a67e60a #View ceph   Information of an image in the pool

rbd create -p test --size 10000 zhanguo#Create a 10000M image named zhanguo in the test pool

rbd rm  -p test  lizhanguo #Delete a mirror

rbd resize -p test --size 20000 zhanguo  #Resize a mirror image

Create an osd

ceph-deploy disk zap {osd-server-name}:{disk-name}   #Wipe the disk

ceph-deploy osd prepare {node-name}:{disk}[:{path/to/journal}]

ceph-deploy osd prepare osdserver1:sdb:/dev/ssd1

ceph-deploy osd activate {node-name}:{path/to/disk}[:{path/to/journal}]

ceph-deploy osd activate osdserver1:/dev/sdb1:/dev/ssd1

Distribute the modified configuration file to the hosts in the cluster

ceph-deploy config push {host-name [host-name]...}

CRUSH mapping

ceph osd getcrushmap -o MAP   #Get a CRUSH map

crushtool -d MAP -o MAP.TXT   #Decompile a CRUSH map

crushtool -c MAP.TXT -o MAP   #Compile a CRUSH map

ceph osd setcrushmap -i MAP    #Set a CRUSH map

Some commands for block devices

The unit is M, which is in rbd pool by default

To create a block device: rbd create {image-name}  --size {megabytes}  --pool {pool-name}

List block devices: rbd ls {poolname} -l

Retrieve block information: rbd --image {image-name} info

Change block size: rbd resize --image {image-name} --size {megabytes}

Delete block device: rbd rm {image-name}

Mapping block device: rbd map {image-name} --pool {pool-name} --id {user-name}

To view mapped block devices: rbd showmapped

Unmap: rbd unmap /dev/rbd/{poolname}/{imagename}

Snapshot and clone related commands

To create a snapshot:

rbd --pool {pool-name} snap create --snap {snap-name} {image-name}

rbd snap create {pool-name}/{image-name}@{snap-name}

Snapshot rollback:

rbd --pool {pool-name} snap rollback --snap {snap-name} {image-name}

rbd snap rollback {pool-name}/{image-name}@{snap-name}



Clear snapshot:

rbd --pool {pool-name} snap purge {image-name}

rbd snap purge {pool-name}/{image-name}

To delete a snapshot:

rbd --pool {pool-name} snap rm --snap {snap-name} {image-name}

rbd snap rm {pool-name}/{image-name}@{snap-name}

List snapshots:

rbd --pool {pool-name} snap ls {image-name}

rbd snap ls {pool-name}/{image-name}

Protect snapshots:

rbd --pool {pool-name} snap protect --image {image-name} --snap {snapshot-name}

rbd snap protect {pool-name}/{image-name}@{snapshot-name}

Unprotect snapshot:

rbd --pool {pool-name} snap unprotect --image {image-name} --snap {snapshot-name}

rbd snap unprotect {pool-name}/{image-name}@{snapshot-name}

Snapshot clone

rbd clone {pool-name}/{parent-image}@{snap-name} {pool-name}/{child-image-name}

Viewing clones of snapshots

rbd --pool {pool-name} children --image {image-name} --snap {snap-name}

rbd children {pool-name}/{image-name}@{snapshot-name}

RGW object storage
establish

cceph-deploy rgw create node-1

CephFS file store
Create mds service

ceph-deploy mds create node-1 node-2 node-3   

[root@node-1 ceph-deploy]# ceph osd pool create cephfs_metadata 16 16
pool 'cephfs_metadata' created
[root@node-1 ceph-deploy]# ceph osd pool create cephfs_data 16 16
pool 'cephfs_data' created

[root@node-1 ceph-deploy]# ceph fs new cephfs-demo cephfs_metadata cephfs_data
new fs with metadata pool 6 and data pool 7
[root@node-1 ceph-deploy]# 

[root@node-1 ceph-deploy]# ceph fs ls
name: cephfs-demo, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

[root@node-1 ceph-deploy]# ceph -s
  cluster:
    id:     c16b9145-7216-4058-8bfb-c9b7b2b702de
    health: HEALTH_OK

 services:
    mon: 3 daemons, quorum node-1,node-2,node-3 (age 46m)
    mgr: node-1(active, since 46m), standbys: node-2, node-3
    mds: cephfs-demo:1 {0=node-2=up:active} 2 up:standby
    osd: 3 osds: 3 up (since 2w), 3 in (since 2w)
    rgw: 1 daemon active (node-1)
 
  task status:
    scrub status:
        mds.node-2: idle
 
  data:
    pools:   7 pools, 288 pgs
    objects: 213 objects, 3.5 KiB
    usage:   3.0 GiB used, 147 GiB / 150 GiB avail
    pgs:     288 active+clean

Kernel level mount, higher performance

mkdir /mnt/cephfs
[root@node-1 ceph-deploy]# mount -t ceph 172.16.10.224:6789:/ /mnt/cephfs/ -o name=admin
[root@node-1 ceph-deploy]# df -h
Filesystem            Size  Used Avail Use% Mounted on
devtmpfs              3.9G     0  3.9G   0% /dev
tmpfs                 3.9G     0  3.9G   0% /dev/shm
tmpfs                 3.9G  8.9M  3.9G   1% /run
tmpfs                 3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sda3             211G  1.8G  210G   1% /
/dev/sda1            1014M  141M  874M  14% /boot
tmpfs                 783M     0  783M   0% /run/user/0
tmpfs                 3.9G   52K  3.9G   1% /var/lib/ceph/osd/ceph-0
172.16.10.224:6789:/   47G     0   47G   0% /mnt/cephfs

User level mount

Install client
yum -y install ceph-fuse
mkdir /mnt/ceph-fuse
[root@node-1 ceph-deploy]# ceph-fuse -n client.admin -m 172.16.10.224:6789,172.16.10.225:6789,172.16.10.226:6789 /mnt/ceph-fuse
2020-11-17 17:54:20.511 7eff93888f80 -1 init, newargv = 0x5571a711e340 newargc=9ceph-fuse[29325]: starting ceph client

ceph-fuse[29325]: starting fuse
[root@node-1 ceph-deploy]# df -h
Filesystem            Size  Used Avail Use% Mounted on
devtmpfs              3.9G     0  3.9G   0% /dev
tmpfs                 3.9G     0  3.9G   0% /dev/shm
tmpfs                 3.9G  9.0M  3.9G   1% /run
tmpfs                 3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/sda3             211G  1.9G  210G   1% /
/dev/sda1            1014M  141M  874M  14% /boot
tmpfs                 783M     0  783M   0% /run/user/0
tmpfs                 3.9G   52K  3.9G   1% /var/lib/ceph/osd/ceph-0
172.16.10.224:6789:/   47G     0   47G   0% /mnt/cephfs
ceph-fuse              47G     0   47G   0% /mnt/ceph-fuse

OSD expansion and disk replacement
Add nodes for horizontal expansion
Vertical expansion to increase capacity

root@node-1 ceph-deploy]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF 
-1       0.14639 root default                            
-3       0.04880     host node-1                         
 0   hdd 0.04880         osd.0       up  1.00000 1.00000 
-5       0.04880     host node-2                         
 1   hdd 0.04880         osd.1       up  1.00000 1.00000 
-7       0.04880     host node-3                         
 2   hdd 0.04880         osd.2       up  1.00000 1.00000
-----------------------------------

Clean up partitioned disks

ceph-deploy disk zap node-1 /dev/sdc
 The principle is to brush out the partition data with dd

Add disk

ceph-deploy osd  create node-1 --data /dev/sdc

Increasing osd will cause PG to move and consume performance. Therefore, osd expansion needs to be carried out one by one to reduce the impact on business

see osd delayed
ceph osd perf

delete osd
ceph osd out osd.5
 Delete after the information is synchronized
ceph osd crush rm osd.5
ceph osd rm osd.5
ceph author rm osd.5

RBD advanced functions

establish

[root@node-1 ~]# rbd create ceph-demo/ceph-trash.img --size 10G
[root@node-1 ~]# rbd info ceph-demo/ceph-trash.img
rbd image 'ceph-trash.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 861f92bbad7f
        block_name_prefix: rbd_data.861f92bbad7f
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Wed Nov 18 10:57:52 2020
        access_timestamp: Wed Nov 18 10:57:52 2020
        modify_timestamp: Wed Nov 18 10:57:52 2020

delete

[root@node-1 ~]# rbd rm ceph-demo/ceph-trash.img
Removing image: 100% complete...done.

Set up recycling mechanism

[root@node-1 ~]# rbd create ceph-demo/ceph-trash.img --size 10G
[root@node-1 ~]# rbd trash move ceph-demo/ceph-trash.img --expires-at 20201119
[root@node-1 ~]# rbd trash -p ceph-demo ls
8640e50e08fb ceph-trash.img

Recycle back to prevent accidental deletion

[root@node-1 ~]# rbd trash restore -p ceph-demo 8640e50e08fb
[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
 

Map to local file system   

ceph-deploy osd  create node-1 --data /dev/sdc
[root@node-1 ~]# rbd device map ceph-demo/rbd-test.img
/dev/rbd1

format
mkfs.ext4 /dev/rbd1
mount /dev/rbd0 /media
touch test.txt

Create Snapshot

[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@snap_20201118
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME          SIZE   PROTECTED TIMESTAMP                
     4 snap_20201118 10 GiB           Wed Nov 18 11:15:23 2020

data recovery
Delete test.txt

rbd snap rollback ceph-demo/rbd-test.img@snap_20201118
rbd snap ls ceph-demo/rbd-test.img

After uninstallation, mount again
umount /media
mount /dev/rbd1 /media

Create Snapshot 
[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@template
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME          SIZE   PROTECTED TIMESTAMP                
     4 snap_20201118 10 GiB           Wed Nov 18 11:15:23 2020 
     5 template      10 GiB           Wed Nov 18 11:29:21 2020 

Protect snapshots
[root@node-1 ~]# rbd snap protect ceph-demo/rbd-test.img@template
 You will find that you can't delete it
[root@node-1 ~]# rbd snap rm ceph-demo/rbd-test.img@template
Removing snap: 0% complete...failed.2020-11-18 11:32:20.904 7f2cef31ec80 -1 librbd::Operations: snapshot is protected

rbd: snapshot 'template' is protected from removal.



Snapshot clone
[root@node-1 ~]# rbd clone ceph-demo/rbd-test.img@template ceph-demo/vm1-clone.img
[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
rbd-test.img
vm1-clone.img

Using clone snapshots
rbd device map ceph-demo/vm1-clone.img
mount /dev/rbd2 /mnt 

View how many snapshots the mirror has created
[root@node-1 ~]# rbd children ceph-demo/rbd-test.img@template
ceph-demo/vm1-clone.img

Dissolve replication relationship

Copy after login

rbd flatten ceph-demo/vm1-clone.img

RBD import and export

Create Snapshot 
[root@node-1 ~]# rbd snap create ceph-demo/rbd-test.img@snap-demo
[root@node-1 ~]# rbd snap ls ceph-demo/rbd-test.img
SNAPID NAME          SIZE   PROTECTED TIMESTAMP                
     4 snap_20201118 10 GiB           Wed Nov 18 11:15:23 2020 
     5 template      10 GiB yes       Wed Nov 18 11:29:21 2020 
     6 snap-demo     10 GiB           Wed Nov 18 15:17:24 2020 
[root@node-1 ~]# 

Export backup
[root@node-1 ~]# rbd export ceph-demo/rbd-test.img@snap-demo /root/rbd-test.img
Exporting image: 100% complete...done.

Import
[root@node-1 ~]# rbd import rbd-test.img ceph-demo/rbd-test-new.img
Importing image: 100% complete...done.


Import
[root@node-1 ~]# rbd import rbd-test.img ceph-demo/rbd-test-new.img
Importing image: 100% complete...done.

[root@node-1 ~]# rbd -p ceph-demo ls
ceph-trash.img
rbd-demo.img
rbd-test-new.img
rbd-test.img
vm1-clone.img

Posted by Andy17 on Tue, 09 Nov 2021 14:11:50 -0800