Install packages

Need to use backports to install the nfs-ganesha version that supports rados_* backends

echo deb http://http.us.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list
apt update
apt install nfs-ganesha/buster-backports nfs-ganesha-ceph/buster-backports nfs-ganesha-vfs/buster-backports
apt install nfs-ganesha nfs-ganesha-ceph nfs-ganesha-vfs

Find out which pools are available

root@pvetest2:~# rados lspools
device_health_metrics
gold
ec21pool
cephfs_meta

In this case I am going to use cephfs_meta as my pool and I choose ganesha-config as the namespace within the pool for storing the configuration file, ganesha-kv namespace for storing KV data for recovery

Edit /etc/ganesha/ceph.conf

#
# It is possible to use FSAL_CEPH to provide an NFS gateway to CephFS. The
# following sample config should be useful as a starting point for
# configuration. This basic configuration is suitable for a standalone NFS
# server, or an active/passive configuration managed by some sort of clustering
# software (e.g. pacemaker, docker, etc.).
#
# Note too that it is also possible to put a config file in RADOS, and give
# ganesha a rados URL from which to fetch it. For instance, if the config
# file is stored in a RADOS pool called "nfs-ganesha" with an object name of
# "ganesha-config":
#
# %url  rados://nfs-ganesha/ganesha-config
#
# If we only export cephfs (or RGW), store the configs and recovery data in
# RADOS, and mandate NFSv4.1+ for access, we can avoid any sort of local
# storage, and ganesha can run as an unprivileged user (even inside a
# locked-down container).
#

NFS_CORE_PARAM
{
        # Ganesha can lift the NFS grace period early if NLM is disabled.
        Enable_NLM = false;

        # rquotad doesn't add any value here. CephFS doesn't support per-uid
        # quotas anyway.
        Enable_RQUOTA = false;

        # In this configuration, we're just exporting NFSv4. In practice, it's
        # best to use NFSv4.1+ to get the benefit of sessions.
        Protocols = 4;
}

NFSv4
{
        # Modern versions of libcephfs have delegation support, though they
        # are not currently recommended in clustered configurations. They are
        # disabled by default but can be reenabled for singleton or
        # active/passive configurations.
        # Delegations = false;

        # One can use any recovery backend with this configuration, but being
        # able to store it in RADOS is a nice feature that makes it easy to
        # migrate the daemon to another host.
        #
        # For a single-node or active/passive configuration, rados_ng driver
        # is preferred. For active/active clustered configurations, the
        # rados_cluster backend can be used instead. See the
        # ganesha-rados-grace manpage for more information.
        RecoveryBackend = rados_cluster;

        # NFSv4.0 clients do not send a RECLAIM_COMPLETE, so we end up having
        # to wait out the entire grace period if there are any. Avoid them.
        Minor_Versions =  1,2;
}

# The libcephfs client will aggressively cache information while it
# can, so there is little benefit to ganesha actively caching the same
# objects. Doing so can also hurt cache coherency. Here, we disable
# as much attribute and directory caching as we can.
CACHEINODE {
        # Size the dirent cache down as small as possible.
        Dir_Chunk = 0;

        # size the inode cache as small as possible
        NParts = 1;
        Cache_Size = 1;
}

EXPORT
{
        # Unique export ID number for this export
        Export_ID=100;

        # We're only interested in NFSv4 in this configuration
        Protocols = 4;

        # NFSv4 does not allow UDP transport
        Transports = TCP;

        #
        # Path into the cephfs tree. For now, FSAL_CEPH doesn't support
        # having more than one filesystem per running ganesha daemon.
        #
        # Note that FSAL_CEPH does not support subtree checking, so there is
        # no way to validate that a filehandle presented by a client is
        # reachable via an exported subtree.
        #
        # For that reason, we just export "/" here.
        Path = /;

        #
        # The pseudoroot path. This is where the export will appear in the
        # NFS pseudoroot namespace.
        #
        Pseudo = /ceph/;

        # We want to be able to read and write
        Access_Type = RW;

        # Time out attribute cache entries immediately
        Attr_Expiration_Time = 0;

        # Enable read delegations? libcephfs v13.0.1 and later allow the
        # ceph client to set a delegation. While it's possible to allow RW
        # delegations it's not recommended to enable them until ganesha
        # acquires CB_GETATTR support.
        #
        # Note too that delegations may not be safe in clustered
        # configurations, so it's probably best to just disable them until
        # this problem is resolved:
        #
        # http://tracker.ceph.com/issues/24802
        #
        # Delegations = R;

        # NFS servers usually decide to "squash" incoming requests from the
        # root user to a "nobody" user. It's possible to disable that, but for
        # now, we leave it enabled.
        # Squash = root;
        Squash = No_Root_Squash;

        FSAL {
                # FSAL_CEPH export
                Name = CEPH;

                # Ceph clusters have their own authentication scheme (cephx).
                # Ganesha acts as a cephfs client. This is the client username
                # to use. Note that this user will need to be created before
                # running ganesha. See:
                #
                # http://docs.ceph.com/docs/jewel/rados/operations/user-management/
                #
                # The default is to send a NULL here, which means that the
                # userid is auto-generated by libcephfs.
                #
                # User_Id = "ganesha";
                User_Id = "admin";
                #
                # Key to use for the session (if any). If not set, it uses the
                # normal search path for cephx keyring files to find a key:
                # Secret_Access_Key = "YOUR SECRET KEY HERE";
                # Secret_Access_Key = "AQB1cfZf43y6JhAAKbojCXvNOANEzlTvX9A4Xw==";
        }
}

# Config block for FSAL_CEPH
CEPH
{
        # Path to a ceph.conf file for this cluster.
        # Ceph_Conf = /etc/ceph/ceph.conf;
        Ceph_Conf = /etc/ceph/ceph.conf;

        # User file-creation mask. These bits will be masked off from the unix
        # permissions on newly-created inodes.
        # umask = 0;
}

#
# This is the config block for the RADOS RecoveryBackend. This is only
# used if you're storing the client recovery records in a RADOS object.
#
RADOS_KV
{
        # Path to a ceph.conf file for this cluster.
        # Ceph_Conf = /etc/ceph/ceph.conf;
        Ceph_Conf = /etc/ceph/ceph.conf;

        # The recoverybackend has its own ceph client. The default is to
        # let libcephfs autogenerate the userid. Note that RADOS_KV block does
        # not have a setting for Secret_Access_Key. A cephx keyring file must
        # be used for authenticated access.
        # UserId = "ganesharecov";
        UserId = "admin";

        # Pool ID of the ceph storage pool that contains the recovery objects.
        # The default is "nfs-ganesha".
        # pool = "nfs-ganesha";
        pool = "cephfs_meta";
        namespace = "ganesha-kv"

        # If using the rados_cluster backend, then consider setting a unique
        # nodeid for each running daemon here, particularly if this daemon
        # could end up migrating to a host with a different hostname. The
        # default is to use the hostname of the node where ganesha is running.
        # nodeid = hostname.example.com
        #nodeid = hostname.example.com
}

Upload this config to rados pool

root@pvetest2:~# rados put -p cephfs_meta -N ganesha-config ganesha.conf ganesha.conf
root@pvetest2:~# rados ls -p cephfs_meta -N ganesha-config
ganesha.conf

Use %url line within /etc/ganesha/ganesha.conf to include the configuration file from the pool

# Config block for rados:// URL access. It too uses its own client to access
# the object, separate from the FSAL_CEPH and RADOS_KV client.
RADOS_URLS
{
        # Path to a ceph.conf file for this cluster.
        # Ceph_Conf = /etc/ceph/ceph.conf;

        # RADOS_URLS use their own ceph client too. Authenticated access
        # requires a cephx keyring file.
        # UserId = "ganeshaurls";
        UserID = "admin";

        # We can also have ganesha watch a RADOS object for notifications, and
        # have it force a configuration reload when one comes in. Set this to
        # a valid rados:// URL to enable this feature.
        # watch_url = "rados://pool/namespace/object";
}

%url rados://cephfs_meta/ganesha-config/ganesha.conf

For the rados_cluster backend to work we would need to create certain initial records in the ganesha-kv namespace. For that we need to install nfs-ganesha-rados-grace package Below we add host records for pve1.mife.ca, pve2.mife.ca and pve3.mife.ca

apt install nfs-ganesha-rados-grace/buster-backports
ganesha-rados-grace -p cephfs_meta -n ganesha-kv add pve1.mife.ca
ganesha-rados-grace -p cephfs_meta -n ganesha-kv add pve2.mife.ca
ganesha-rados-grace -p cephfs_meta -n ganesha-kv add pve3.mife.ca

Check the status before starting

root@pve3:~# ganesha-rados-grace -p cephfs_metadata -n ganesha-kv
cur=1 rec=0
======================================================
pve1.mife.ca     E
pve2.mife.ca     E
pve3.mife.ca     E
systemctl restart nfs-ganesha

Installation on the additional nodes

echo deb http://http.us.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list
apt update
apt install nfs-ganesha/buster-backports nfs-ganesha-ceph/buster-backports nfs-ganesha-vfs/buster-backports

scp pve3:/etc/ganesha/ganesha.conf /etc/ganesha/
systemctl restart nfs-ganesha