Difference between revisions of "Setting up petalink to work with Ceph"

From Define Wiki
Jump to navigation Jump to search
(Created page with "== Verify Ceph Environment has RGW Setup == This won't always be the case on Ceph, check the output of ceph status. Make sure rgw service is up and running. <pre> [root@ceph...")
(No difference)

Revision as of 22:08, 7 May 2021

Verify Ceph Environment has RGW Setup

This won't always be the case on Ceph, check the output of ceph status. Make sure rgw service is up and running.

[root@ceph-ansible ceph-ansible]# ceph status
  cluster:
    id:     a5098828-f84e-4460-aa49-14a4a49705f6
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph-mon3,ceph-mon1,ceph-mon2 (age 79m)
    mgr: ceph-mon2(active, since 75m), standbys: ceph-mon1, ceph-mon3
    osd: 9 osds: 9 up (since 81m), 9 in (since 2M)
    rgw: 3 daemons active (ceph-mon1.rgw0, ceph-mon2.rgw0, ceph-mon3.rgw0) # <-- This is the RGW / S3 service

  task status:

  data:
    pools:   4 pools, 128 pgs
    objects: 189 objects, 2.3 KiB
    usage:   9.1 GiB used, 81 GiB / 90 GiB avail
    pgs:     128 active+clean

Create a user for Ceph RGW access

Assuming there are no existing RGW users for Ceph - perform the following

# User Create 
[root@ceph-mon1 ~]# radosgw-admin user create --uid=cephs3 --display-name="Ceph S3 User" --email="whatever@you.want" -k /var/lib/ceph/radosgw/ceph-rgw.ceph-mon1.rgw0/keyring --name client.rgw.ceph-mon1.rgw0 
{   
    "user_id": "cephs3",
    "display_name": "Ceph S3 User",
    "email": "whatever@you.want",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {   
            "user": "cephs3",
            "access_key": "[blanked]",
            "secret_key": "[blanked]"
        }
    ],
    "swift_keys": [], 
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },  
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

# Create subuser
[root@ceph-mon1 ~]# radosgw-admin subuser create --uid=cephs3 --subuser=cephs3:swift --access=full
{
    "user_id": "cephs3",
    "display_name": "Ceph S3 User",
    "email": "whatever@you.want",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [
        {
            "id": "cephs3:swift",
            "permissions": "full-control"
        }
    ],
    "keys": [
        {
            "user": "cephs3",
            "access_key": "[blanked]",
            "secret_key": "[blanked]"
        }
    ],
    "swift_keys": [
        {
            "user": "cephs3:swift",
            "secret_key": "[blanked]"
        }
    ],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

Make a note of your access keys and secret keys above - we need these for the credentials input file.

Credentials File for Ceph

[root@deploy-ext scratch]# cat s3creds.txt 
aws_access_key_id = [none of your busyness]
aws_secret_access_key = [refer to above]
region = US
endpoint = http://rgw.deploy-ext:8080
skipssl
pathstyle

== Import and show

pgman import /root/scratch/s3creds.txt 
pgman list


+-------------+
| credentials |
+-------------+
 [*] s3creds.s3c

+----------+
| pairings |
+----------+
 [*] s3://bucket1 -> s3creds.s3c
 [*] s3://bucket2 -> s3creds.s3c
 [*] s3://bucket3 -> s3creds.s3c