dect
/
linux-2.6
Archived
13
0
Fork 0

ceph: MDS client

The MDS (metadata server) client is responsible for submitting
requests to the MDS cluster and parsing the response.  We decide which
MDS to submit each request to based on cached information about the
current partition of the directory hierarchy across the cluster.  A
stateful session is opened with each MDS before we submit requests to
it, and a mutex is used to control the ordering of messages within
each session.

An MDS request may generate two responses.  The first indicates the
operation was a success and returns any result.  A second reply is
sent when the operation commits to disk.  Note that locking on the MDS
ensures that the results of updates are visible only to the updating
client before the operation commits.  Requests are linked to the
containing directory so that an fsync will wait for them to commit.

If an MDS fails and/or recovers, we resubmit requests as needed.  We
also reconnect existing capabilities to a recovering MDS to
reestablish that shared session state.  Old dentry leases are
invalidated.

Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Sage Weil 2009-10-06 11:31:09 -07:00
parent 1d3576fd10
commit 2f2dc05340
4 changed files with 3452 additions and 0 deletions

2912
fs/ceph/mds_client.c Normal file

File diff suppressed because it is too large Load Diff

321
fs/ceph/mds_client.h Normal file
View File

@ -0,0 +1,321 @@
#ifndef _FS_CEPH_MDS_CLIENT_H
#define _FS_CEPH_MDS_CLIENT_H
#include <linux/completion.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include "types.h"
#include "messenger.h"
#include "mdsmap.h"
/*
* Some lock dependencies:
*
* session->s_mutex
* mdsc->mutex
*
* mdsc->snap_rwsem
*
* inode->i_lock
* mdsc->snap_flush_lock
* mdsc->cap_delay_lock
*
*/
struct ceph_client;
struct ceph_cap;
/*
* parsed info about a single inode. pointers are into the encoded
* on-wire structures within the mds reply message payload.
*/
struct ceph_mds_reply_info_in {
struct ceph_mds_reply_inode *in;
u32 symlink_len;
char *symlink;
u32 xattr_len;
char *xattr_data;
};
/*
* parsed info about an mds reply, including information about the
* target inode and/or its parent directory and dentry, and directory
* contents (for readdir results).
*/
struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_head *head;
struct ceph_mds_reply_info_in diri, targeti;
struct ceph_mds_reply_dirfrag *dirfrag;
char *dname;
u32 dname_len;
struct ceph_mds_reply_lease *dlease;
struct ceph_mds_reply_dirfrag *dir_dir;
int dir_nr;
char **dir_dname;
u32 *dir_dname_len;
struct ceph_mds_reply_lease **dir_dlease;
struct ceph_mds_reply_info_in *dir_in;
u8 dir_complete, dir_end;
/* encoded blob describing snapshot contexts for certain
operations (e.g., open) */
void *snapblob;
int snapblob_len;
};
/*
* cap releases are batched and sent to the MDS en masse.
*/
#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
sizeof(struct ceph_mds_cap_release)) / \
sizeof(struct ceph_mds_cap_item))
/*
* state associated with each MDS<->client session
*/
enum {
CEPH_MDS_SESSION_NEW = 1,
CEPH_MDS_SESSION_OPENING = 2,
CEPH_MDS_SESSION_OPEN = 3,
CEPH_MDS_SESSION_HUNG = 4,
CEPH_MDS_SESSION_CLOSING = 5,
CEPH_MDS_SESSION_RESTARTING = 6,
CEPH_MDS_SESSION_RECONNECTING = 7,
};
struct ceph_mds_session {
struct ceph_mds_client *s_mdsc;
int s_mds;
int s_state;
unsigned long s_ttl; /* time until mds kills us */
u64 s_seq; /* incoming msg seq # */
struct mutex s_mutex; /* serialize session messages */
struct ceph_connection s_con;
/* protected by s_cap_lock */
spinlock_t s_cap_lock;
u32 s_cap_gen; /* inc each time we get mds stale msg */
unsigned long s_cap_ttl; /* when session caps expire */
struct list_head s_caps; /* all caps issued by this session */
int s_nr_caps, s_trim_caps;
int s_num_cap_releases;
struct list_head s_cap_releases; /* waiting cap_release messages */
struct list_head s_cap_releases_done; /* ready to send */
/* protected by mutex */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
struct list_head s_cap_snaps_flushing;
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
atomic_t s_ref;
struct list_head s_waiting; /* waiting requests */
struct list_head s_unsafe; /* unsafe requests */
};
/*
* modes of choosing which MDS to send a request to
*/
enum {
USE_ANY_MDS,
USE_RANDOM_MDS,
USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */
};
struct ceph_mds_request;
struct ceph_mds_client;
/*
* request completion callback
*/
typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
struct ceph_mds_request *req);
/*
* an in-flight mds request
*/
struct ceph_mds_request {
u64 r_tid; /* transaction id */
int r_op; /* mds op code */
int r_mds;
/* operation on what? */
struct inode *r_inode; /* arg1 */
struct dentry *r_dentry; /* arg1 */
struct dentry *r_old_dentry; /* arg2: rename from or link from */
char *r_path1, *r_path2;
struct ceph_vino r_ino1, r_ino2;
struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */
struct inode *r_target_inode; /* resulting inode */
union ceph_mds_request_args r_args;
int r_fmode; /* file mode, if expecting cap */
/* for choosing which mds to send this request to */
int r_direct_mode;
u32 r_direct_hash; /* choose dir frag based on this dentry hash */
bool r_direct_is_hash; /* true if r_direct_hash is valid */
/* data payload is used for xattr ops */
struct page **r_pages;
int r_num_pages;
int r_data_len;
/* what caps shall we drop? */
int r_inode_drop, r_inode_unless;
int r_dentry_drop, r_dentry_unless;
int r_old_dentry_drop, r_old_dentry_unless;
struct inode *r_old_inode;
int r_old_inode_drop, r_old_inode_unless;
struct ceph_msg *r_request; /* original request */
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
int r_err;
unsigned long r_timeout; /* optional. jiffies */
unsigned long r_started; /* start time to measure timeout against */
unsigned long r_request_started; /* start time for mds request only,
used to measure lease durations */
/* link unsafe requests to parent directory, for fsync */
struct inode *r_unsafe_dir;
struct list_head r_unsafe_dir_item;
struct ceph_mds_session *r_session;
int r_attempts; /* resend attempts */
int r_num_fwd; /* number of forward attempts */
int r_num_stale;
int r_resend_mds; /* mds to resend to next, if any*/
atomic_t r_ref;
struct list_head r_wait;
struct completion r_completion;
struct completion r_safe_completion;
ceph_mds_request_callback_t r_callback;
struct list_head r_unsafe_item; /* per-session unsafe list item */
bool r_got_unsafe, r_got_safe;
bool r_did_prepopulate;
u32 r_readdir_offset;
struct ceph_cap_reservation r_caps_reservation;
int r_num_caps;
};
/*
* mds client state
*/
struct ceph_mds_client {
struct ceph_client *client;
struct mutex mutex; /* all nested structures */
struct ceph_mdsmap *mdsmap;
struct completion safe_umount_waiters, session_close_waiters;
struct list_head waiting_for_map;
struct ceph_mds_session **sessions; /* NULL for mds if no session */
int max_sessions; /* len of s_mds_sessions */
int stopping; /* true if shutting down */
/*
* snap_rwsem will cover cap linkage into snaprealms, and
* realm snap contexts. (later, we can do per-realm snap
* contexts locks..) the empty list contains realms with no
* references (implying they contain no inodes with caps) that
* should be destroyed.
*/
struct rw_semaphore snap_rwsem;
struct radix_tree_root snap_realms;
struct list_head snap_empty;
spinlock_t snap_empty_lock; /* protect snap_empty */
u64 last_tid; /* most recent mds request */
struct radix_tree_root request_tree; /* pending mds requests */
struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
spinlock_t cap_delay_lock; /* protects cap_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
u64 cap_flush_seq;
struct list_head cap_dirty; /* inodes with dirty caps */
int num_cap_flushing; /* # caps we are flushing */
spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq;
struct dentry *debugfs_file;
spinlock_t dentry_lru_lock;
struct list_head dentry_lru;
int num_dentry;
};
extern const char *ceph_mds_op_name(int op);
extern struct ceph_mds_session *
__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
static inline struct ceph_mds_session *
ceph_get_mds_session(struct ceph_mds_session *s)
{
atomic_inc(&s->s_ref);
return s;
}
extern void ceph_put_mds_session(struct ceph_mds_session *s);
extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
struct ceph_msg *msg, int mds);
extern void ceph_mdsc_init(struct ceph_mds_client *mdsc,
struct ceph_client *client);
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
struct inode *inode,
struct dentry *dn, int mask);
extern struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req);
extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
{
atomic_inc(&req->r_ref);
}
extern void ceph_mdsc_put_request(struct ceph_mds_request *req);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
int stop_on_nosnap);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct inode *inode,
struct dentry *dentry, char action,
u32 seq);
extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
struct ceph_msg *msg);
#endif

166
fs/ceph/mdsmap.c Normal file
View File

@ -0,0 +1,166 @@
#include "ceph_debug.h"
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "mdsmap.h"
#include "messenger.h"
#include "decode.h"
#include "super.h"
/*
* choose a random mds that is "up" (i.e. has a state > 0), or -1.
*/
int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
{
int n = 0;
int i;
char r;
/* count */
for (i = 0; i < m->m_max_mds; i++)
if (m->m_info[i].state > 0)
n++;
if (n == 0)
return -1;
/* pick */
get_random_bytes(&r, 1);
n = r % n;
i = 0;
for (i = 0; n > 0; i++, n--)
while (m->m_info[i].state <= 0)
i++;
return i;
}
/*
* Decode an MDS map
*
* Ignore any fields we don't care about (there are quite a few of
* them).
*/
struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
{
struct ceph_mdsmap *m;
int i, j, n;
int err = -EINVAL;
u16 version;
m = kzalloc(sizeof(*m), GFP_NOFS);
if (m == NULL)
return ERR_PTR(-ENOMEM);
ceph_decode_16_safe(p, end, version, bad);
ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
ceph_decode_32(p, m->m_epoch);
ceph_decode_32(p, m->m_client_epoch);
ceph_decode_32(p, m->m_last_failure);
ceph_decode_32(p, m->m_root);
ceph_decode_32(p, m->m_session_timeout);
ceph_decode_32(p, m->m_session_autoclose);
ceph_decode_64(p, m->m_max_file_size);
ceph_decode_32(p, m->m_max_mds);
m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
if (m->m_info == NULL)
goto badmem;
/* pick out active nodes from mds_info (state > 0) */
ceph_decode_32(p, n);
for (i = 0; i < n; i++) {
u32 namelen;
s32 mds, inc, state;
u64 state_seq;
u8 infoversion;
struct ceph_entity_addr addr;
u32 num_export_targets;
void *pexport_targets = NULL;
ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad);
*p += sizeof(addr); /* skip addr key */
ceph_decode_8(p, infoversion);
ceph_decode_32(p, namelen); /* skip mds name */
*p += namelen;
ceph_decode_need(p, end,
5*sizeof(u32) + sizeof(u64) +
sizeof(addr) + sizeof(struct ceph_timespec),
bad);
ceph_decode_32(p, mds);
ceph_decode_32(p, inc);
ceph_decode_32(p, state);
ceph_decode_64(p, state_seq);
ceph_decode_copy(p, &addr, sizeof(addr));
*p += sizeof(struct ceph_timespec);
*p += sizeof(u32);
ceph_decode_32_safe(p, end, namelen, bad);
*p += sizeof(namelen);
if (infoversion >= 2) {
ceph_decode_32_safe(p, end, num_export_targets, bad);
pexport_targets = *p;
*p += sizeof(num_export_targets * sizeof(u32));
} else {
num_export_targets = 0;
}
dout("mdsmap_decode %d/%d mds%d.%d %s %s\n",
i+1, n, mds, inc, pr_addr(&addr.in_addr),
ceph_mds_state_name(state));
if (mds >= 0 && mds < m->m_max_mds && state > 0) {
m->m_info[mds].state = state;
m->m_info[mds].addr = addr;
m->m_info[mds].num_export_targets = num_export_targets;
if (num_export_targets) {
m->m_info[mds].export_targets =
kcalloc(num_export_targets, sizeof(u32),
GFP_NOFS);
for (j = 0; j < num_export_targets; j++)
ceph_decode_32(&pexport_targets,
m->m_info[mds].export_targets[j]);
} else {
m->m_info[mds].export_targets = NULL;
}
}
}
/* pg_pools */
ceph_decode_32_safe(p, end, n, bad);
m->m_num_data_pg_pools = n;
m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
if (!m->m_data_pg_pools)
goto badmem;
ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
for (i = 0; i < n; i++)
ceph_decode_32(p, m->m_data_pg_pools[i]);
ceph_decode_32(p, m->m_cas_pg_pool);
/* ok, we don't care about the rest. */
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
return m;
badmem:
err = -ENOMEM;
bad:
pr_err("corrupt mdsmap\n");
ceph_mdsmap_destroy(m);
return ERR_PTR(-EINVAL);
}
void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
{
int i;
for (i = 0; i < m->m_max_mds; i++)
kfree(m->m_info[i].export_targets);
kfree(m->m_info);
kfree(m->m_data_pg_pools);
kfree(m);
}

53
fs/ceph/mdsmap.h Normal file
View File

@ -0,0 +1,53 @@
#ifndef _FS_CEPH_MDSMAP_H
#define _FS_CEPH_MDSMAP_H
#include "types.h"
/*
* mds map - describe servers in the mds cluster.
*
* we limit fields to those the client actually xcares about
*/
struct ceph_mds_info {
struct ceph_entity_addr addr;
s32 state;
int num_export_targets;
u32 *export_targets;
};
struct ceph_mdsmap {
u32 m_epoch, m_client_epoch, m_last_failure;
u32 m_root;
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
u32 m_max_mds; /* size of m_addr, m_state arrays */
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
int m_num_data_pg_pools;
u32 *m_data_pg_pools;
u32 m_cas_pg_pool;
};
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
if (w >= m->m_max_mds)
return NULL;
return &m->m_info[w].addr;
}
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
if (w >= m->m_max_mds)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
#endif