Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client

Pull Ceph updates from Sage Weil:
 "A few groups of patches here.  Alex has been hard at work improving
  the RBD code, layout groundwork for understanding the new formats and
  doing layering.  Most of the infrastructure is now in place for the
  final bits that will come with the next window.

  There are a few changes to the data layout.  Jim Schutt's patch fixes
  some non-ideal CRUSH behavior, and a set of patches from me updates
  the client to speak a newer version of the protocol and implement an
  improved hashing strategy across storage nodes (when the server side
  supports it too).

  A pair of patches from Sam Lang fix the atomicity of open+create
  operations.  Several patches from Yan, Zheng fix various mds/client
  issues that turned up during multi-mds torture tests.

  A final set of patches expose file layouts via virtual xattrs, and
  allow the policies to be set on directories via xattrs as well
  (avoiding the awkward ioctl interface and providing a consistent
  interface for both kernel mount and ceph-fuse users)."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: (143 commits)
  libceph: add support for HASHPSPOOL pool flag
  libceph: update osd request/reply encoding
  libceph: calculate placement based on the internal data types
  ceph: update support for PGID64, PGPOOL3, OSDENC protocol features
  ceph: update "ceph_features.h"
  libceph: decode into cpu-native ceph_pg type
  libceph: rename ceph_pg -> ceph_pg_v1
  rbd: pass length, not op for osd completions
  rbd: move rbd_osd_trivial_callback()
  libceph: use a do..while loop in con_work()
  libceph: use a flag to indicate a fault has occurred
  libceph: separate non-locked fault handling
  libceph: encapsulate connection backoff
  libceph: eliminate sparse warnings
  ceph: eliminate sparse warnings in fs code
  rbd: eliminate sparse warnings
  libceph: define connection flag helpers
  rbd: normalize dout() calls
  rbd: barriers are hard
  rbd: ignore zero-length requests
  ...
This commit is contained in:
Linus Torvalds 2013-02-28 17:43:09 -08:00
commit 1cf0209c43
32 changed files with 2450 additions and 1576 deletions

File diff suppressed because it is too large Load Diff

View File

@ -236,16 +236,10 @@ static int ceph_readpage(struct file *filp, struct page *page)
static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
{
struct inode *inode = req->r_inode;
struct ceph_osd_reply_head *replyhead;
int rc, bytes;
int rc = req->r_result;
int bytes = le32_to_cpu(msg->hdr.data_len);
int i;
/* parse reply */
replyhead = msg->front.iov_base;
WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
rc = le32_to_cpu(replyhead->result);
bytes = le32_to_cpu(msg->hdr.data_len);
dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
/* unlock all pages, zeroing any data we didn't read */
@ -315,7 +309,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0,
ci->i_truncate_seq, ci->i_truncate_size,
NULL, false, 1, 0);
NULL, false, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@ -492,8 +486,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
&ci->i_layout, snapc,
page_off, len,
ci->i_truncate_seq, ci->i_truncate_size,
&inode->i_mtime,
&page, 1, 0, 0, true);
&inode->i_mtime, &page, 1);
if (err < 0) {
dout("writepage setting page/mapping error %d %p\n", err, page);
SetPageError(page);
@ -554,27 +547,18 @@ static void writepages_finish(struct ceph_osd_request *req,
struct ceph_msg *msg)
{
struct inode *inode = req->r_inode;
struct ceph_osd_reply_head *replyhead;
struct ceph_osd_op *op;
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned wrote;
struct page *page;
int i;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
__s32 rc = -EIO;
u64 bytes = 0;
int rc = req->r_result;
u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
long writeback_stat;
unsigned issued = ceph_caps_issued(ci);
/* parse reply */
replyhead = msg->front.iov_base;
WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
op = (void *)(replyhead + 1);
rc = le32_to_cpu(replyhead->result);
bytes = le64_to_cpu(op->extent.length);
if (rc >= 0) {
/*
* Assume we wrote the pages we originally sent. The
@ -741,8 +725,6 @@ retry:
struct page *page;
int want;
u64 offset, len;
struct ceph_osd_request_head *reqhead;
struct ceph_osd_op *op;
long writeback_stat;
next = 0;
@ -838,7 +820,7 @@ get_more_pages:
snapc, do_sync,
ci->i_truncate_seq,
ci->i_truncate_size,
&inode->i_mtime, true, 1, 0);
&inode->i_mtime, true, 0);
if (IS_ERR(req)) {
rc = PTR_ERR(req);
@ -906,10 +888,8 @@ get_more_pages:
/* revise final length, page count */
req->r_num_pages = locked_pages;
reqhead = req->r_request->front.iov_base;
op = (void *)(reqhead + 1);
op->extent.length = cpu_to_le64(len);
op->payload_len = cpu_to_le32(len);
req->r_request_ops[0].extent.length = cpu_to_le64(len);
req->r_request_ops[0].payload_len = cpu_to_le32(len);
req->r_request->hdr.data_len = cpu_to_le32(len);
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);

View File

@ -611,8 +611,16 @@ retry:
if (flags & CEPH_CAP_FLAG_AUTH)
ci->i_auth_cap = cap;
else if (ci->i_auth_cap == cap)
else if (ci->i_auth_cap == cap) {
ci->i_auth_cap = NULL;
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&ci->i_dirty_item)) {
dout(" moving %p to cap_dirty_migrating\n", inode);
list_move(&ci->i_dirty_item,
&mdsc->cap_dirty_migrating);
}
spin_unlock(&mdsc->cap_dirty_lock);
}
dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
@ -1460,7 +1468,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
int file_wanted, used;
int file_wanted, used, cap_used;
int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
int issued, implemented, want, retain, revoking, flushing = 0;
int mds = -1; /* keep track of how far we've gone through i_caps list
@ -1563,9 +1571,14 @@ retry_locked:
/* NOTE: no side-effects allowed, until we take s_mutex */
cap_used = used;
if (ci->i_auth_cap && cap != ci->i_auth_cap)
cap_used &= ~ci->i_auth_cap->issued;
revoking = cap->implemented & ~cap->issued;
dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
cap->mds, cap, ceph_cap_string(cap->issued),
ceph_cap_string(cap_used),
ceph_cap_string(cap->implemented),
ceph_cap_string(revoking));
@ -1593,7 +1606,7 @@ retry_locked:
}
/* completed revocation? going down and there are no caps? */
if (revoking && (revoking & used) == 0) {
if (revoking && (revoking & cap_used) == 0) {
dout("completed revocation of %s\n",
ceph_cap_string(cap->implemented & ~cap->issued));
goto ack;
@ -1670,8 +1683,8 @@ ack:
sent++;
/* __send_cap drops i_ceph_lock */
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
retain, flushing, NULL);
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
want, retain, flushing, NULL);
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
@ -2417,7 +2430,9 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
dout("mds wanted %s -> %s\n",
ceph_cap_string(le32_to_cpu(grant->wanted)),
ceph_cap_string(wanted));
grant->wanted = cpu_to_le32(wanted);
/* imported cap may not have correct mds_wanted */
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
check_caps = 1;
}
cap->seq = seq;
@ -2821,6 +2836,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
(unsigned)seq);
if (op == CEPH_CAP_OP_IMPORT)
ceph_add_cap_releases(mdsc, session);
/* lookup ino */
inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode);

View File

@ -243,6 +243,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
if (err)
goto out_err;
err = ceph_handle_snapdir(req, dentry, err);
if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
@ -263,6 +266,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = finish_no_open(file, dn);
} else {
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
*opened |= FILE_CREATED;
}
err = finish_open(file, dentry, ceph_open, opened);
}
@ -535,7 +541,7 @@ more:
ci->i_snap_realm->cached_context,
do_sync,
ci->i_truncate_seq, ci->i_truncate_size,
&mtime, false, 2, page_align);
&mtime, false, page_align);
if (IS_ERR(req))
return PTR_ERR(req);

View File

@ -185,7 +185,6 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
&ceph_sb_to_client(inode->i_sb)->client->osdc;
u64 len = 1, olen;
u64 tmp;
struct ceph_object_layout ol;
struct ceph_pg pgid;
int r;
@ -194,7 +193,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return -EFAULT;
down_read(&osdc->map_sem);
r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
&dl.object_no, &dl.object_offset,
&olen);
if (r < 0)
@ -209,10 +208,9 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no);
ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout,
ceph_calc_object_layout(&pgid, dl.object_name, &ci->i_layout,
osdc->osdmap);
pgid = ol.ol_pgid;
dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
if (dl.osd >= 0) {
struct ceph_entity_addr *a =

View File

@ -232,6 +232,30 @@ bad:
return -EIO;
}
/*
* parse create results
*/
static int parse_reply_info_create(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
int features)
{
if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
if (*p == end) {
info->has_create_ino = false;
} else {
info->has_create_ino = true;
info->ino = ceph_decode_64(p);
}
}
if (unlikely(*p != end))
goto bad;
return 0;
bad:
return -EIO;
}
/*
* parse extra results
*/
@ -241,8 +265,12 @@ static int parse_reply_info_extra(void **p, void *end,
{
if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else
else if (info->head->op == CEPH_MDS_OP_READDIR)
return parse_reply_info_dir(p, end, info, features);
else if (info->head->op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
return -EIO;
}
/*
@ -2170,7 +2198,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&req->r_fill_mutex);
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) {
if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK &&
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP) &&
rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);

View File

@ -74,6 +74,12 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_info_in *dir_in;
u8 dir_complete, dir_end;
};
/* for create results */
struct {
bool has_create_ino;
u64 ino;
};
};
/* encoded blob describing snapshot contexts for certain

View File

@ -59,6 +59,10 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
return ERR_PTR(-ENOMEM);
ceph_decode_16_safe(p, end, version, bad);
if (version > 3) {
pr_warning("got mdsmap version %d > 3, failing", version);
goto bad;
}
ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
m->m_epoch = ceph_decode_32(p);
@ -144,13 +148,13 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
/* pg_pools */
ceph_decode_32_safe(p, end, n, bad);
m->m_num_data_pg_pools = n;
m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
if (!m->m_data_pg_pools)
goto badmem;
ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
for (i = 0; i < n; i++)
m->m_data_pg_pools[i] = ceph_decode_32(p);
m->m_cas_pg_pool = ceph_decode_32(p);
m->m_data_pg_pools[i] = ceph_decode_64(p);
m->m_cas_pg_pool = ceph_decode_64(p);
/* ok, we don't care about the rest. */
dout("mdsmap_decode success epoch %u\n", m->m_epoch);

View File

@ -15,6 +15,7 @@ const char *ceph_mds_state_name(int s)
case CEPH_MDS_STATE_BOOT: return "up:boot";
case CEPH_MDS_STATE_STANDBY: return "up:standby";
case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
case CEPH_MDS_STATE_REPLAYONCE: return "up:oneshot-replay";
case CEPH_MDS_STATE_CREATING: return "up:creating";
case CEPH_MDS_STATE_STARTING: return "up:starting";
/* up and in */
@ -50,10 +51,13 @@ const char *ceph_mds_op_name(int op)
case CEPH_MDS_OP_LOOKUP: return "lookup";
case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
case CEPH_MDS_OP_LOOKUPINO: return "lookupino";
case CEPH_MDS_OP_GETATTR: return "getattr";
case CEPH_MDS_OP_SETXATTR: return "setxattr";
case CEPH_MDS_OP_SETATTR: return "setattr";
case CEPH_MDS_OP_RMXATTR: return "rmxattr";
case CEPH_MDS_OP_SETLAYOUT: return "setlayou";
case CEPH_MDS_OP_SETDIRLAYOUT: return "setdirlayout";
case CEPH_MDS_OP_READDIR: return "readdir";
case CEPH_MDS_OP_MKNOD: return "mknod";
case CEPH_MDS_OP_LINK: return "link";

View File

@ -71,8 +71,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
/*
* express utilization in terms of large blocks to avoid
* overflow on 32-bit machines.
*
* NOTE: for the time being, we make bsize == frsize to humor
* not-yet-ancient versions of glibc that are broken.
* Someday, we will probably want to report a real block
* size... whatever that may mean for a network file system!
*/
buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
@ -80,7 +86,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
buf->f_frsize = PAGE_CACHE_SIZE;
/* leave fsid little-endian, regardless of host endianness */
fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);

View File

@ -21,7 +21,7 @@
/* large granularity for statfs utilization stats to facilitate
* large volume sizes on 32-bit machines. */
#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
@ -798,13 +798,7 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
/* file.c */
extern const struct file_operations ceph_file_fops;
extern const struct address_space_operations ceph_aops;
extern int ceph_copy_to_page_vector(struct page **pages,
const char *data,
loff_t off, size_t len);
extern int ceph_copy_from_page_vector(struct page **pages,
char *data,
loff_t off, size_t len);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,

View File

@ -29,9 +29,94 @@ struct ceph_vxattr {
size_t name_size; /* strlen(name) + 1 (for '\0') */
size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
size_t size);
bool readonly;
bool readonly, hidden;
bool (*exists_cb)(struct ceph_inode_info *ci);
};
/* layouts */
static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
{
size_t s;
char *p = (char *)&ci->i_layout;
for (s = 0; s < sizeof(ci->i_layout); s++, p++)
if (*p)
return true;
return false;
}
static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
const char *pool_name;
dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
down_read(&osdc->map_sem);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name)
ret = snprintf(val, size,
"stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%s",
(unsigned long long)ceph_file_layout_su(ci->i_layout),
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
(unsigned long long)ceph_file_layout_object_size(ci->i_layout),
pool_name);
else
ret = snprintf(val, size,
"stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
(unsigned long long)ceph_file_layout_su(ci->i_layout),
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
(unsigned long long)ceph_file_layout_object_size(ci->i_layout),
(unsigned long long)pool);
up_read(&osdc->map_sem);
return ret;
}
static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
char *val, size_t size)
{
return snprintf(val, size, "%lld",
(unsigned long long)ceph_file_layout_su(ci->i_layout));
}
static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
char *val, size_t size)
{
return snprintf(val, size, "%lld",
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
}
static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
char *val, size_t size)
{
return snprintf(val, size, "%lld",
(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
}
static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
char *val, size_t size)
{
int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
const char *pool_name;
down_read(&osdc->map_sem);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name)
ret = snprintf(val, size, "%s", pool_name);
else
ret = snprintf(val, size, "%lld", (unsigned long long)pool);
up_read(&osdc->map_sem);
return ret;
}
/* directories */
static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
@ -83,17 +168,43 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
(long)ci->i_rctime.tv_nsec);
}
#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
#define XATTR_NAME_CEPH(_type, _name) \
{ \
.name = CEPH_XATTR_NAME(_type, _name), \
.name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
.getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
.readonly = true, \
}
#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
#define CEPH_XATTR_NAME2(_type, _name, _name2) \
XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
#define XATTR_NAME_CEPH(_type, _name) \
{ \
.name = CEPH_XATTR_NAME(_type, _name), \
.name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
.getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
.readonly = true, \
.hidden = false, \
.exists_cb = NULL, \
}
#define XATTR_LAYOUT_FIELD(_type, _name, _field) \
{ \
.name = CEPH_XATTR_NAME2(_type, _name, _field), \
.name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
.getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
.readonly = false, \
.hidden = true, \
.exists_cb = ceph_vxattrcb_layout_exists, \
}
static struct ceph_vxattr ceph_dir_vxattrs[] = {
{
.name = "ceph.dir.layout",
.name_size = sizeof("ceph.dir.layout"),
.getxattr_cb = ceph_vxattrcb_layout,
.readonly = false,
.hidden = false,
.exists_cb = ceph_vxattrcb_layout_exists,
},
XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
XATTR_LAYOUT_FIELD(dir, layout, object_size),
XATTR_LAYOUT_FIELD(dir, layout, pool),
XATTR_NAME_CEPH(dir, entries),
XATTR_NAME_CEPH(dir, files),
XATTR_NAME_CEPH(dir, subdirs),
@ -102,35 +213,26 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_NAME_CEPH(dir, rsubdirs),
XATTR_NAME_CEPH(dir, rbytes),
XATTR_NAME_CEPH(dir, rctime),
{ 0 } /* Required table terminator */
{ .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
/* files */
static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
int ret;
ret = snprintf(val, size,
"chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
(unsigned long long)ceph_file_layout_su(ci->i_layout),
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
return ret;
}
static struct ceph_vxattr ceph_file_vxattrs[] = {
XATTR_NAME_CEPH(file, layout),
/* The following extended attribute name is deprecated */
{
.name = XATTR_CEPH_PREFIX "layout",
.name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
.getxattr_cb = ceph_vxattrcb_file_layout,
.readonly = true,
.name = "ceph.file.layout",
.name_size = sizeof("ceph.file.layout"),
.getxattr_cb = ceph_vxattrcb_layout,
.readonly = false,
.hidden = false,
.exists_cb = ceph_vxattrcb_layout_exists,
},
{ 0 } /* Required table terminator */
XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
XATTR_LAYOUT_FIELD(file, layout, stripe_count),
XATTR_LAYOUT_FIELD(file, layout, object_size),
XATTR_LAYOUT_FIELD(file, layout, pool),
{ .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_file_vxattrs_name_size; /* total size of all names */
@ -164,7 +266,8 @@ static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
size_t size = 0;
for (vxattr = vxattrs; vxattr->name; vxattr++)
size += vxattr->name_size;
if (!vxattr->hidden)
size += vxattr->name_size;
return size;
}
@ -572,13 +675,17 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
if (!ceph_is_valid_xattr(name))
return -ENODATA;
/* let's see if a virtual xattr was requested */
vxattr = ceph_match_vxattr(inode, name);
spin_lock(&ci->i_ceph_lock);
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
/* let's see if a virtual xattr was requested */
vxattr = ceph_match_vxattr(inode, name);
if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
err = vxattr->getxattr_cb(ci, value, size);
goto out;
}
if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto get_xattr;
@ -592,11 +699,6 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
spin_lock(&ci->i_ceph_lock);
if (vxattr && vxattr->readonly) {
err = vxattr->getxattr_cb(ci, value, size);
goto out;
}
err = __build_xattrs(inode);
if (err < 0)
goto out;
@ -604,11 +706,8 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
get_xattr:
err = -ENODATA; /* == ENOATTR */
xattr = __get_xattr(ci, name);
if (!xattr) {
if (vxattr)
err = vxattr->getxattr_cb(ci, value, size);
if (!xattr)
goto out;
}
err = -ERANGE;
if (size && size < xattr->val_len)
@ -664,23 +763,30 @@ list_xattr:
vir_namelen = ceph_vxattrs_name_size(vxattrs);
/* adding 1 byte per each variable due to the null termination */
namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
err = -ERANGE;
if (size && namelen > size)
if (size && vir_namelen + namelen > size)
goto out;
err = namelen;
err = namelen + vir_namelen;
if (size == 0)
goto out;
names = __copy_xattr_names(ci, names);
/* virtual xattr names, too */
if (vxattrs)
err = namelen;
if (vxattrs) {
for (i = 0; vxattrs[i].name; i++) {
len = sprintf(names, "%s", vxattrs[i].name);
names += len + 1;
if (!vxattrs[i].hidden &&
!(vxattrs[i].exists_cb &&
!vxattrs[i].exists_cb(ci))) {
len = sprintf(names, "%s", vxattrs[i].name);
names += len + 1;
err += len + 1;
}
}
}
out:
spin_unlock(&ci->i_ceph_lock);
@ -782,6 +888,10 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (vxattr && vxattr->readonly)
return -EOPNOTSUPP;
/* pass any unhandled ceph.* xattrs through to the MDS */
if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
goto do_sync_unlocked;
/* preallocate memory for xattr name, value, index node */
err = -ENOMEM;
newname = kmemdup(name, name_len + 1, GFP_NOFS);
@ -838,6 +948,7 @@ retry:
do_sync:
spin_unlock(&ci->i_ceph_lock);
do_sync_unlocked:
err = ceph_sync_setxattr(dentry, name, value, size, flags);
out:
kfree(newname);
@ -892,6 +1003,10 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
if (vxattr && vxattr->readonly)
return -EOPNOTSUPP;
/* pass any unhandled ceph.* xattrs through to the MDS */
if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
goto do_sync_unlocked;
err = -ENOMEM;
spin_lock(&ci->i_ceph_lock);
retry:
@ -931,6 +1046,7 @@ retry:
return err;
do_sync:
spin_unlock(&ci->i_ceph_lock);
do_sync_unlocked:
err = ceph_send_removexattr(dentry, name);
out:
return err;

View File

@ -12,16 +12,46 @@
#define CEPH_FEATURE_MONNAMES (1<<5)
#define CEPH_FEATURE_RECONNECT_SEQ (1<<6)
#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
/* bits 8-17 defined by user-space; not supported yet here */
#define CEPH_FEATURE_OBJECTLOCATOR (1<<8)
#define CEPH_FEATURE_PGID64 (1<<9)
#define CEPH_FEATURE_INCSUBOSDMAP (1<<10)
#define CEPH_FEATURE_PGPOOL3 (1<<11)
#define CEPH_FEATURE_OSDREPLYMUX (1<<12)
#define CEPH_FEATURE_OSDENC (1<<13)
#define CEPH_FEATURE_OMAP (1<<14)
#define CEPH_FEATURE_MONENC (1<<15)
#define CEPH_FEATURE_QUERY_T (1<<16)
#define CEPH_FEATURE_INDEP_PG_MAP (1<<17)
#define CEPH_FEATURE_CRUSH_TUNABLES (1<<18)
#define CEPH_FEATURE_CHUNKY_SCRUB (1<<19)
#define CEPH_FEATURE_MON_NULLROUTE (1<<20)
#define CEPH_FEATURE_MON_GV (1<<21)
#define CEPH_FEATURE_BACKFILL_RESERVATION (1<<22)
#define CEPH_FEATURE_MSG_AUTH (1<<23)
#define CEPH_FEATURE_RECOVERY_RESERVATION (1<<24)
#define CEPH_FEATURE_CRUSH_TUNABLES2 (1<<25)
#define CEPH_FEATURE_CREATEPOOLID (1<<26)
#define CEPH_FEATURE_REPLY_CREATE_INODE (1<<27)
#define CEPH_FEATURE_OSD_HBMSGS (1<<28)
#define CEPH_FEATURE_MDSENC (1<<29)
#define CEPH_FEATURE_OSDHASHPSPOOL (1<<30)
/*
* Features supported.
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_CRUSH_TUNABLES)
(CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_OSDHASHPSPOOL)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR)
(CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC)
#endif

View File

@ -21,16 +21,14 @@
* internal cluster protocols separately from the public,
* client-facing protocol.
*/
#define CEPH_OSD_PROTOCOL 8 /* cluster internal */
#define CEPH_MDS_PROTOCOL 12 /* cluster internal */
#define CEPH_MON_PROTOCOL 5 /* cluster internal */
#define CEPH_OSDC_PROTOCOL 24 /* server/client */
#define CEPH_MDSC_PROTOCOL 32 /* server/client */
#define CEPH_MONC_PROTOCOL 15 /* server/client */
#define CEPH_INO_ROOT 1
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
#define CEPH_INO_ROOT 1
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@ -51,7 +49,7 @@ struct ceph_file_layout {
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
/* object -> pg layout */
__le32 fl_unused; /* unused; used to be preferred primary (-1) */
__le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
@ -101,6 +99,8 @@ struct ceph_dir_layout {
#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
#define CEPH_MSG_AUTH 17
#define CEPH_MSG_AUTH_REPLY 18
#define CEPH_MSG_MON_GET_VERSION 19
#define CEPH_MSG_MON_GET_VERSION_REPLY 20
/* client <-> mds */
#define CEPH_MSG_MDS_MAP 21
@ -220,6 +220,11 @@ struct ceph_mon_subscribe_ack {
struct ceph_fsid fsid;
} __attribute__ ((packed));
/*
* mdsmap flags
*/
#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */
/*
* mds states
* > 0 -> in
@ -233,6 +238,7 @@ struct ceph_mon_subscribe_ack {
#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */
#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
@ -264,6 +270,7 @@ extern const char *ceph_mds_state_name(int s);
#define CEPH_LOCK_IXATTR 2048
#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */
/* client_session ops */
enum {
@ -338,6 +345,12 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_SETATTR_SIZE 32
#define CEPH_SETATTR_CTIME 64
/*
* Ceph setxattr request flags.
*/
#define CEPH_XATTR_CREATE 1
#define CEPH_XATTR_REPLACE 2
union ceph_mds_request_args {
struct {
__le32 mask; /* CEPH_CAP_* */
@ -522,14 +535,17 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
#define CEPH_CAP_SIMPLE_BITS 2
#define CEPH_CAP_FILE_BITS 8
/* per-lock shift */
#define CEPH_CAP_SAUTH 2
#define CEPH_CAP_SLINK 4
#define CEPH_CAP_SXATTR 6
#define CEPH_CAP_SFILE 8
#define CEPH_CAP_SFLOCK 20
#define CEPH_CAP_SFLOCK 20
#define CEPH_CAP_BITS 22
#define CEPH_CAP_BITS 22
/* composed values */
#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)

View File

@ -52,10 +52,10 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
return end >= *p && n <= end - *p;
}
#define ceph_decode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
#define ceph_decode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
#define ceph_decode_64_safe(p, end, v, bad) \
@ -99,8 +99,8 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
*
* There are two possible failures:
* - converting the string would require accessing memory at or
* beyond the "end" pointer provided (-E
* - memory could not be allocated for the result
* beyond the "end" pointer provided (-ERANGE)
* - memory could not be allocated for the result (-ENOMEM)
*/
static inline char *ceph_extract_encoded_string(void **p, void *end,
size_t *lenp, gfp_t gfp)
@ -217,10 +217,10 @@ static inline void ceph_encode_string(void **p, void *end,
*p += len;
}
#define ceph_encode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
#define ceph_encode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
#define ceph_encode_64_safe(p, end, v, bad) \
@ -231,12 +231,17 @@ static inline void ceph_encode_string(void **p, void *end,
#define ceph_encode_32_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u32), bad); \
ceph_encode_32(p, v); \
ceph_encode_32(p, v); \
} while (0)
#define ceph_encode_16_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u16), bad); \
ceph_encode_16(p, v); \
ceph_encode_16(p, v); \
} while (0)
#define ceph_encode_8_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u8), bad); \
ceph_encode_8(p, v); \
} while (0)
#define ceph_encode_copy_safe(p, end, pv, n, bad) \

View File

@ -193,6 +193,8 @@ static inline int calc_pages_for(u64 off, u64 len)
}
/* ceph_common.c */
extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern struct kmem_cache *ceph_inode_cachep;
@ -220,7 +222,7 @@ extern int ceph_open_session(struct ceph_client *client);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_get_direct_page_vector(const char __user *data,
extern struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages,
bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
@ -228,15 +230,15 @@ extern void ceph_put_page_vector(struct page **pages, int num_pages,
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_copy_user_to_page_vector(struct page **pages,
const char __user *data,
const void __user *data,
loff_t off, size_t len);
extern int ceph_copy_to_page_vector(struct page **pages,
const char *data,
extern void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len);
extern int ceph_copy_from_page_vector(struct page **pages,
char *data,
extern void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len);
extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
extern int ceph_copy_page_vector_to_user(struct page **pages, void __user *data,
loff_t off, size_t len);
extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);

View File

@ -29,8 +29,8 @@ struct ceph_mdsmap {
/* which object pools file data can be stored in */
int m_num_data_pg_pools;
u32 *m_data_pg_pools;
u32 m_cas_pg_pool;
u64 *m_data_pg_pools;
u64 m_cas_pg_pool;
};
static inline struct ceph_entity_addr *

View File

@ -83,9 +83,11 @@ struct ceph_msg {
struct list_head list_head;
struct kref kref;
#ifdef CONFIG_BLOCK
struct bio *bio; /* instead of pages/pagelist */
struct bio *bio_iter; /* bio iterator */
int bio_seg; /* current bio segment */
#endif /* CONFIG_BLOCK */
struct ceph_pagelist *trail; /* the trailing part of the data */
bool front_is_vmalloc;
bool more_to_follow;

View File

@ -10,6 +10,7 @@
#include <linux/ceph/osdmap.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
/*
* Maximum object name size
@ -22,7 +23,6 @@ struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
struct ceph_authorizer;
struct ceph_pagelist;
/*
* completion callback for async writepages
@ -47,6 +47,9 @@ struct ceph_osd {
struct list_head o_keepalive_item;
};
#define CEPH_OSD_MAX_OP 10
/* an in-flight request */
struct ceph_osd_request {
u64 r_tid; /* unique for this client */
@ -63,9 +66,23 @@ struct ceph_osd_request {
struct ceph_connection *r_con_filling_msg;
struct ceph_msg *r_request, *r_reply;
int r_result;
int r_flags; /* any additional flags for the osd */
u32 r_sent; /* >0 if r_request is sending/sent */
int r_num_ops;
/* encoded message content */
struct ceph_osd_op *r_request_ops;
/* these are updated on each send */
__le32 *r_request_osdmap_epoch;
__le32 *r_request_flags;
__le64 *r_request_pool;
void *r_request_pgid;
__le32 *r_request_attempts;
struct ceph_eversion *r_request_reassert_version;
int r_result;
int r_reply_op_len[CEPH_OSD_MAX_OP];
s32 r_reply_op_result[CEPH_OSD_MAX_OP];
int r_got_reply;
int r_linger;
@ -82,6 +99,7 @@ struct ceph_osd_request {
char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */
int r_oid_len;
u64 r_snapid;
unsigned long r_stamp; /* send OR check time */
struct ceph_file_layout r_file_layout;
@ -95,7 +113,7 @@ struct ceph_osd_request {
struct bio *r_bio; /* instead of pages */
#endif
struct ceph_pagelist *r_trail; /* trailing part of the data */
struct ceph_pagelist r_trail; /* trailing part of the data */
};
struct ceph_osd_event {
@ -107,7 +125,6 @@ struct ceph_osd_event {
struct rb_node node;
struct list_head osd_node;
struct kref kref;
struct completion completion;
};
struct ceph_osd_event_work {
@ -157,7 +174,7 @@ struct ceph_osd_client {
struct ceph_osd_req_op {
u16 op; /* CEPH_OSD_OP_* */
u32 flags; /* CEPH_OSD_FLAG_* */
u32 payload_len;
union {
struct {
u64 offset, length;
@ -166,23 +183,24 @@ struct ceph_osd_req_op {
} extent;
struct {
const char *name;
u32 name_len;
const char *val;
u32 name_len;
u32 value_len;
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
} xattr;
struct {
const char *class_name;
__u8 class_len;
const char *method_name;
__u8 method_len;
__u8 argc;
const char *indata;
u32 indata_len;
__u8 class_len;
__u8 method_len;
__u8 argc;
} cls;
struct {
u64 cookie, count;
u64 cookie;
u64 count;
} pgls;
struct {
u64 snapid;
@ -190,12 +208,11 @@ struct ceph_osd_req_op {
struct {
u64 cookie;
u64 ver;
__u8 flag;
u32 prot_ver;
u32 timeout;
__u8 flag;
} watch;
};
u32 payload_len;
};
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
@ -207,29 +224,19 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
u64 snapid,
u64 off, u64 *plen, u64 *bno,
struct ceph_osd_request *req,
struct ceph_osd_req_op *op);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
int flags,
struct ceph_snap_context *snapc,
struct ceph_osd_req_op *ops,
unsigned int num_op,
bool use_mempool,
gfp_t gfp_flags,
struct page **pages,
struct bio *bio);
gfp_t gfp_flags);
extern void ceph_osdc_build_request(struct ceph_osd_request *req,
u64 off, u64 *plen,
u64 off, u64 len,
unsigned int num_op,
struct ceph_osd_req_op *src_ops,
struct ceph_snap_context *snapc,
struct timespec *mtime,
const char *oid,
int oid_len);
u64 snap_id,
struct timespec *mtime);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
@ -239,8 +246,7 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
int do_sync, u32 truncate_seq,
u64 truncate_size,
struct timespec *mtime,
bool use_mempool, int num_reply,
int page_align);
bool use_mempool, int page_align);
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@ -279,17 +285,13 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
struct timespec *mtime,
struct page **pages, int nr_pages,
int flags, int do_sync, bool nofail);
struct page **pages, int nr_pages);
/* watch/notify events */
extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
void (*event_cb)(u64, u64, u8, void *),
int one_shot, void *data,
struct ceph_osd_event **pevent);
void *data, struct ceph_osd_event **pevent);
extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
extern int ceph_osdc_wait_event(struct ceph_osd_event *event,
unsigned long timeout);
extern void ceph_osdc_put_event(struct ceph_osd_event *event);
#endif

View File

@ -18,14 +18,31 @@
* The map can be updated either via an incremental map (diff) describing
* the change between two successive epochs, or as a fully encoded map.
*/
struct ceph_pg {
uint64_t pool;
uint32_t seed;
};
#define CEPH_POOL_FLAG_HASHPSPOOL 1
struct ceph_pg_pool_info {
struct rb_node node;
int id;
struct ceph_pg_pool v;
int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
s64 id;
u8 type;
u8 size;
u8 crush_ruleset;
u8 object_hash;
u32 pg_num, pgp_num;
int pg_num_mask, pgp_num_mask;
u64 flags;
char *name;
};
struct ceph_object_locator {
uint64_t pool;
char *key;
};
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@ -110,15 +127,16 @@ extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
/* calculate mapping of a file extent to an object */
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 *plen,
u64 off, u64 len,
u64 *bno, u64 *oxoff, u64 *oxlen);
/* calculate mapping of object to a placement group */
extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
extern int ceph_calc_object_layout(struct ceph_pg *pg,
const char *oid,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap);
extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
struct ceph_pg pgid,
int *acting);
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
struct ceph_pg pgid);

View File

@ -8,14 +8,6 @@
#include <linux/ceph/msgr.h>
/*
* osdmap encoding versions
*/
#define CEPH_OSDMAP_INC_VERSION 5
#define CEPH_OSDMAP_INC_VERSION_EXT 6
#define CEPH_OSDMAP_VERSION 5
#define CEPH_OSDMAP_VERSION_EXT 6
/*
* fs id
*/
@ -64,7 +56,7 @@ struct ceph_timespec {
* placement group.
* we encode this into one __le64.
*/
struct ceph_pg {
struct ceph_pg_v1 {
__le16 preferred; /* preferred primary osd */
__le16 ps; /* placement seed */
__le32 pool; /* object pool */
@ -91,21 +83,6 @@ struct ceph_pg {
#define CEPH_PG_TYPE_REP 1
#define CEPH_PG_TYPE_RAID4 2
#define CEPH_PG_POOL_VERSION 2
struct ceph_pg_pool {
__u8 type; /* CEPH_PG_TYPE_* */
__u8 size; /* number of osds in each pg */
__u8 crush_ruleset; /* crush placement rule */
__u8 object_hash; /* hash mapping object name to ps */
__le32 pg_num, pgp_num; /* number of pg's */
__le32 lpg_num, lpgp_num; /* number of localized pg's */
__le32 last_change; /* most recent epoch changed */
__le64 snap_seq; /* seq for per-pool snapshot */
__le32 snap_epoch; /* epoch of last snap */
__le32 num_snaps;
__le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */
__le64 auid; /* who owns the pg */
} __attribute__ ((packed));
/*
* stable_mod func is used to control number of placement groups.
@ -128,7 +105,7 @@ static inline int ceph_stable_mod(int x, int b, int bmask)
* object layout - how a given object should be stored.
*/
struct ceph_object_layout {
struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
struct ceph_pg_v1 ol_pgid; /* raw pg, with _full_ ps precision. */
__le32 ol_stripe_unit; /* for per-object parity, if any */
} __attribute__ ((packed));
@ -145,8 +122,12 @@ struct ceph_eversion {
*/
/* status bits */
#define CEPH_OSD_EXISTS 1
#define CEPH_OSD_UP 2
#define CEPH_OSD_EXISTS (1<<0)
#define CEPH_OSD_UP (1<<1)
#define CEPH_OSD_AUTOOUT (1<<2) /* osd was automatically marked out */
#define CEPH_OSD_NEW (1<<3) /* osd is new, never marked in */
extern const char *ceph_osd_state_name(int s);
/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
#define CEPH_OSD_IN 0x10000
@ -161,9 +142,25 @@ struct ceph_eversion {
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
#define CEPH_OSDMAP_NOUP (1<<5) /* block osd boot */
#define CEPH_OSDMAP_NODOWN (1<<6) /* block osd mark-down/failure */
#define CEPH_OSDMAP_NOOUT (1<<7) /* block osd auto mark-out */
#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */
#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
/*
* The error code to return when an OSD can't handle a write
* because it is too large.
*/
#define OSD_WRITETOOBIG EMSGSIZE
/*
* osd ops
*
* WARNING: do not use these op codes directly. Use the helpers
* defined below instead. In certain cases, op code behavior was
* redefined, resulting in special-cases in the helpers.
*/
#define CEPH_OSD_OP_MODE 0xf000
#define CEPH_OSD_OP_MODE_RD 0x1000
@ -177,6 +174,7 @@ struct ceph_eversion {
#define CEPH_OSD_OP_TYPE_ATTR 0x0300
#define CEPH_OSD_OP_TYPE_EXEC 0x0400
#define CEPH_OSD_OP_TYPE_PG 0x0500
#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
enum {
/** data **/
@ -217,6 +215,23 @@ enum {
CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
/* omap */
CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
CEPH_OSD_OP_OMAPGETVALSBYKEYS =
CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
/** multi **/
CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
/** attrs **/
/* read */
CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
@ -238,6 +253,7 @@ enum {
CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9,
/** lock **/
CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
@ -248,10 +264,12 @@ enum {
CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
/** exec **/
/* note: the RD bit here is wrong; see special-case below in helper */
CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
/** pg **/
CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
};
static inline int ceph_osd_op_type_lock(int op)
@ -274,6 +292,10 @@ static inline int ceph_osd_op_type_pg(int op)
{
return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
}
static inline int ceph_osd_op_type_multi(int op)
{
return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI;
}
static inline int ceph_osd_op_mode_subop(int op)
{
@ -281,11 +303,12 @@ static inline int ceph_osd_op_mode_subop(int op)
}
static inline int ceph_osd_op_mode_read(int op)
{
return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
return (op & CEPH_OSD_OP_MODE_RD) &&
op != CEPH_OSD_OP_CALL;
}
static inline int ceph_osd_op_mode_modify(int op)
{
return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
return op & CEPH_OSD_OP_MODE_WR;
}
/*
@ -294,34 +317,38 @@ static inline int ceph_osd_op_mode_modify(int op)
*/
#define CEPH_OSD_TMAP_HDR 'h'
#define CEPH_OSD_TMAP_SET 's'
#define CEPH_OSD_TMAP_CREATE 'c' /* create key */
#define CEPH_OSD_TMAP_RM 'r'
#define CEPH_OSD_TMAP_RMSLOPPY 'R'
extern const char *ceph_osd_op_name(int op);
/*
* osd op flags
*
* An op may be READ, WRITE, or READ|WRITE.
*/
enum {
CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
CEPH_OSD_FLAG_READ = 16, /* op may read */
CEPH_OSD_FLAG_WRITE = 32, /* op may write */
CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
CEPH_OSD_FLAG_BALANCE_READS = 256,
CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */
CEPH_OSD_FLAG_ACK = 0x0001, /* want (or is) "ack" ack */
CEPH_OSD_FLAG_ONNVRAM = 0x0002, /* want (or is) "onnvram" ack */
CEPH_OSD_FLAG_ONDISK = 0x0004, /* want (or is) "ondisk" ack */
CEPH_OSD_FLAG_RETRY = 0x0008, /* resend attempt */
CEPH_OSD_FLAG_READ = 0x0010, /* op may read */
CEPH_OSD_FLAG_WRITE = 0x0020, /* op may write */
CEPH_OSD_FLAG_ORDERSNAP = 0x0040, /* EOLDSNAP if snapc is out of order */
CEPH_OSD_FLAG_PEERSTAT_OLD = 0x0080, /* DEPRECATED msg includes osd_peer_stat */
CEPH_OSD_FLAG_BALANCE_READS = 0x0100,
CEPH_OSD_FLAG_PARALLELEXEC = 0x0200, /* execute op in parallel */
CEPH_OSD_FLAG_PGOP = 0x0400, /* pg op, no object */
CEPH_OSD_FLAG_EXEC = 0x0800, /* op may exec */
CEPH_OSD_FLAG_EXEC_PUBLIC = 0x1000, /* DEPRECATED op may exec (public) */
CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000, /* read from nearby replica, if any */
CEPH_OSD_FLAG_RWORDERED = 0x4000, /* order wrt concurrent reads */
};
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@ -381,48 +408,13 @@ struct ceph_osd_op {
__le64 ver;
__u8 flag; /* 0 = unwatch, 1 = watch */
} __attribute__ ((packed)) watch;
};
struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
};
__le32 payload_len;
} __attribute__ ((packed));
/*
* osd request message header. each request may include multiple
* ceph_osd_op object operations.
*/
struct ceph_osd_request_head {
__le32 client_inc; /* client incarnation */
struct ceph_object_layout layout; /* pgid */
__le32 osdmap_epoch; /* client's osdmap epoch */
__le32 flags;
struct ceph_timespec mtime; /* for mutations only */
struct ceph_eversion reassert_version; /* if we are replaying op */
__le32 object_len; /* length of object name */
__le64 snapid; /* snapid to read */
__le64 snap_seq; /* writer's snap context */
__le32 num_snaps;
__le16 num_ops;
struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
} __attribute__ ((packed));
struct ceph_osd_reply_head {
__le32 client_inc; /* client incarnation */
__le32 flags;
struct ceph_object_layout layout;
__le32 osdmap_epoch;
struct ceph_eversion reassert_version; /* for replaying uncommitted */
__le32 result; /* result code */
__le32 object_len; /* length of object name */
__le32 num_ops;
struct ceph_osd_op ops[0]; /* ops[], object */
} __attribute__ ((packed));
#endif

View File

@ -162,6 +162,8 @@ struct crush_map {
__u32 choose_local_fallback_tries;
/* choose attempts before giving up */
__u32 choose_total_tries;
/* attempt chooseleaf inner descent once; on failure retry outer descent */
__u32 chooseleaf_descend_once;
};

View File

@ -28,6 +28,22 @@
#include "crypto.h"
/*
* Module compatibility interface. For now it doesn't do anything,
* but its existence signals a certain level of functionality.
*
* The data buffer is used to pass information both to and from
* libceph. The return value indicates whether libceph determines
* it is compatible with the caller (from another kernel module),
* given the provided data.
*
* The data pointer can be null.
*/
bool libceph_compatible(void *data)
{
return true;
}
EXPORT_SYMBOL(libceph_compatible);
/*
* find filename portion of a path (/foo/bar/baz -> baz)
@ -590,10 +606,8 @@ static int __init init_ceph_lib(void)
if (ret < 0)
goto out_crypto;
pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
pr_info("loaded (mon/osd proto %d/%d)\n",
CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL);
return 0;

View File

@ -21,9 +21,15 @@ const char *ceph_osd_op_name(int op)
switch (op) {
case CEPH_OSD_OP_READ: return "read";
case CEPH_OSD_OP_STAT: return "stat";
case CEPH_OSD_OP_MAPEXT: return "mapext";
case CEPH_OSD_OP_SPARSE_READ: return "sparse-read";
case CEPH_OSD_OP_NOTIFY: return "notify";
case CEPH_OSD_OP_NOTIFY_ACK: return "notify-ack";
case CEPH_OSD_OP_ASSERT_VER: return "assert-version";
case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
case CEPH_OSD_OP_CREATE: return "create";
case CEPH_OSD_OP_WRITE: return "write";
case CEPH_OSD_OP_DELETE: return "delete";
case CEPH_OSD_OP_TRUNCATE: return "truncate";
@ -39,6 +45,11 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_TMAPUP: return "tmapup";
case CEPH_OSD_OP_TMAPGET: return "tmapget";
case CEPH_OSD_OP_TMAPPUT: return "tmapput";
case CEPH_OSD_OP_WATCH: return "watch";
case CEPH_OSD_OP_CLONERANGE: return "clonerange";
case CEPH_OSD_OP_ASSERT_SRC_VERSION: return "assert-src-version";
case CEPH_OSD_OP_SRC_CMPXATTR: return "src-cmpxattr";
case CEPH_OSD_OP_GETXATTR: return "getxattr";
case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
@ -53,6 +64,10 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
case CEPH_OSD_OP_SCRUB: return "scrub";
case CEPH_OSD_OP_SCRUB_RESERVE: return "scrub-reserve";
case CEPH_OSD_OP_SCRUB_UNRESERVE: return "scrub-unreserve";
case CEPH_OSD_OP_SCRUB_STOP: return "scrub-stop";
case CEPH_OSD_OP_SCRUB_MAP: return "scrub-map";
case CEPH_OSD_OP_WRLOCK: return "wrlock";
case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
@ -64,10 +79,34 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_CALL: return "call";
case CEPH_OSD_OP_PGLS: return "pgls";
case CEPH_OSD_OP_PGLS_FILTER: return "pgls-filter";
case CEPH_OSD_OP_OMAPGETKEYS: return "omap-get-keys";
case CEPH_OSD_OP_OMAPGETVALS: return "omap-get-vals";
case CEPH_OSD_OP_OMAPGETHEADER: return "omap-get-header";
case CEPH_OSD_OP_OMAPGETVALSBYKEYS: return "omap-get-vals-by-keys";
case CEPH_OSD_OP_OMAPSETVALS: return "omap-set-vals";
case CEPH_OSD_OP_OMAPSETHEADER: return "omap-set-header";
case CEPH_OSD_OP_OMAPCLEAR: return "omap-clear";
case CEPH_OSD_OP_OMAPRMKEYS: return "omap-rm-keys";
}
return "???";
}
const char *ceph_osd_state_name(int s)
{
switch (s) {
case CEPH_OSD_EXISTS:
return "exists";
case CEPH_OSD_UP:
return "up";
case CEPH_OSD_AUTOOUT:
return "autoout";
case CEPH_OSD_NEW:
return "new";
default:
return "???";
}
}
const char *ceph_pool_op_name(int op)
{

View File

@ -287,6 +287,7 @@ static int is_out(const struct crush_map *map, const __u32 *weight, int item, in
* @outpos: our position in that vector
* @firstn: true if choosing "first n" items, false if choosing "indep"
* @recurse_to_leaf: true if we want one device under each item of given type
* @descend_once: true if we should only try one descent before giving up
* @out2: second output vector for leaf items (if @recurse_to_leaf)
*/
static int crush_choose(const struct crush_map *map,
@ -295,7 +296,7 @@ static int crush_choose(const struct crush_map *map,
int x, int numrep, int type,
int *out, int outpos,
int firstn, int recurse_to_leaf,
int *out2)
int descend_once, int *out2)
{
int rep;
unsigned int ftotal, flocal;
@ -391,7 +392,7 @@ static int crush_choose(const struct crush_map *map,
}
reject = 0;
if (recurse_to_leaf) {
if (!collide && recurse_to_leaf) {
if (item < 0) {
if (crush_choose(map,
map->buckets[-1-item],
@ -399,6 +400,7 @@ static int crush_choose(const struct crush_map *map,
x, outpos+1, 0,
out2, outpos,
firstn, 0,
map->chooseleaf_descend_once,
NULL) <= outpos)
/* didn't get leaf */
reject = 1;
@ -422,7 +424,10 @@ reject:
ftotal++;
flocal++;
if (collide && flocal <= map->choose_local_tries)
if (reject && descend_once)
/* let outer call try again */
skip_rep = 1;
else if (collide && flocal <= map->choose_local_tries)
/* retry locally a few times */
retry_bucket = 1;
else if (map->choose_local_fallback_tries > 0 &&
@ -485,6 +490,7 @@ int crush_do_rule(const struct crush_map *map,
int i, j;
int numrep;
int firstn;
const int descend_once = 0;
if ((__u32)ruleno >= map->max_rules) {
dprintk(" bad ruleno %d\n", ruleno);
@ -544,7 +550,8 @@ int crush_do_rule(const struct crush_map *map,
curstep->arg2,
o+osize, j,
firstn,
recurse_to_leaf, c+osize);
recurse_to_leaf,
descend_once, c+osize);
}
if (recurse_to_leaf)

View File

@ -423,7 +423,8 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
}
}
int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
static int ceph_key_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
size_t datalen = prep->datalen;
@ -458,12 +459,12 @@ err:
return ret;
}
int ceph_key_match(const struct key *key, const void *description)
static int ceph_key_match(const struct key *key, const void *description)
{
return strcmp(key->description, description) == 0;
}
void ceph_key_destroy(struct key *key) {
static void ceph_key_destroy(struct key *key) {
struct ceph_crypto_key *ckey = key->payload.data;
ceph_crypto_key_destroy(ckey);

View File

@ -66,9 +66,9 @@ static int osdmap_show(struct seq_file *s, void *p)
for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pool =
rb_entry(n, struct ceph_pg_pool_info, node);
seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
pool->id, pool->v.pg_num, pool->pg_num_mask,
pool->v.lpg_num, pool->lpg_num_mask);
seq_printf(s, "pg_pool %llu pg_num %d / %d\n",
(unsigned long long)pool->id, pool->pg_num,
pool->pg_num_mask);
}
for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
struct ceph_entity_addr *addr =
@ -123,26 +123,16 @@ static int osdc_show(struct seq_file *s, void *pp)
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
struct ceph_osd_request *req;
struct ceph_osd_request_head *head;
struct ceph_osd_op *op;
int num_ops;
int opcode, olen;
int opcode;
int i;
req = rb_entry(p, struct ceph_osd_request, r_node);
seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1,
le32_to_cpu(req->r_pgid.pool),
le16_to_cpu(req->r_pgid.ps));
req->r_pgid.pool, req->r_pgid.seed);
head = req->r_request->front.iov_base;
op = (void *)(head + 1);
num_ops = le16_to_cpu(head->num_ops);
olen = le32_to_cpu(head->object_len);
seq_printf(s, "%.*s", olen,
(const char *)(head->ops + num_ops));
seq_printf(s, "%.*s", req->r_oid_len, req->r_oid);
if (req->r_reassert_version.epoch)
seq_printf(s, "\t%u'%llu",
@ -151,10 +141,9 @@ static int osdc_show(struct seq_file *s, void *pp)
else
seq_printf(s, "\t");
for (i = 0; i < num_ops; i++) {
opcode = le16_to_cpu(op->op);
for (i = 0; i < req->r_num_ops; i++) {
opcode = le16_to_cpu(req->r_request_ops[i].op);
seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
op++;
}
seq_printf(s, "\n");

View File

@ -9,8 +9,9 @@
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#include <linux/blkdev.h>
#endif /* CONFIG_BLOCK */
#include <linux/dns_resolver.h>
#include <net/tcp.h>
@ -97,6 +98,57 @@
#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
static bool con_flag_valid(unsigned long con_flag)
{
switch (con_flag) {
case CON_FLAG_LOSSYTX:
case CON_FLAG_KEEPALIVE_PENDING:
case CON_FLAG_WRITE_PENDING:
case CON_FLAG_SOCK_CLOSED:
case CON_FLAG_BACKOFF:
return true;
default:
return false;
}
}
static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
clear_bit(con_flag, &con->flags);
}
static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
set_bit(con_flag, &con->flags);
}
static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_bit(con_flag, &con->flags);
}
static bool con_flag_test_and_clear(struct ceph_connection *con,
unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_and_clear_bit(con_flag, &con->flags);
}
static bool con_flag_test_and_set(struct ceph_connection *con,
unsigned long con_flag)
{
BUG_ON(!con_flag_valid(con_flag));
return test_and_set_bit(con_flag, &con->flags);
}
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
@ -114,7 +166,7 @@ static struct lock_class_key socket_class;
static void queue_con(struct ceph_connection *con);
static void con_work(struct work_struct *);
static void ceph_fault(struct ceph_connection *con);
static void con_fault(struct ceph_connection *con);
/*
* Nicely render a sockaddr as a string. An array of formatted
@ -171,7 +223,7 @@ static void encode_my_addr(struct ceph_messenger *msgr)
*/
static struct workqueue_struct *ceph_msgr_wq;
void _ceph_msgr_exit(void)
static void _ceph_msgr_exit(void)
{
if (ceph_msgr_wq) {
destroy_workqueue(ceph_msgr_wq);
@ -308,7 +360,7 @@ static void ceph_sock_write_space(struct sock *sk)
* buffer. See net/ipv4/tcp_input.c:tcp_check_space()
* and net/core/stream.c:sk_stream_write_space().
*/
if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@ -333,7 +385,7 @@ static void ceph_sock_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
con_flag_set(con, CON_FLAG_SOCK_CLOSED);
queue_con(con);
break;
case TCP_ESTABLISHED:
@ -474,7 +526,7 @@ static int con_close_socket(struct ceph_connection *con)
* received a socket close event before we had the chance to
* shut the socket down.
*/
clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
con_sock_state_closed(con);
return rc;
@ -538,11 +590,10 @@ void ceph_con_close(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr));
con->state = CON_STATE_CLOSED;
clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
clear_bit(CON_FLAG_BACKOFF, &con->flags);
con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
con_flag_clear(con, CON_FLAG_WRITE_PENDING);
con_flag_clear(con, CON_FLAG_BACKOFF);
reset_connection(con);
con->peer_global_seq = 0;
@ -798,7 +849,7 @@ static void prepare_write_message(struct ceph_connection *con)
/* no, queue up footer too and be done */
prepare_write_message_footer(con);
set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@ -819,7 +870,7 @@ static void prepare_write_ack(struct ceph_connection *con)
&con->out_temp_ack);
con->out_more = 1; /* more will follow.. eventually.. */
set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@ -830,7 +881,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
dout("prepare_write_keepalive %p\n", con);
con_out_kvec_reset(con);
con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
/*
@ -873,7 +924,7 @@ static void prepare_write_banner(struct ceph_connection *con)
&con->msgr->my_enc_addr);
con->out_more = 0;
set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
static int prepare_write_connect(struct ceph_connection *con)
@ -923,7 +974,7 @@ static int prepare_write_connect(struct ceph_connection *con)
auth->authorizer_buf);
con->out_more = 0;
set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_set(con, CON_FLAG_WRITE_PENDING);
return 0;
}
@ -1643,7 +1694,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.connect_seq));
if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
set_bit(CON_FLAG_LOSSYTX, &con->flags);
con_flag_set(con, CON_FLAG_LOSSYTX);
con->delay = 0; /* reset backoff memory */
@ -2080,15 +2131,14 @@ do_next:
prepare_write_ack(con);
goto more;
}
if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
&con->flags)) {
if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
prepare_write_keepalive(con);
goto more;
}
}
/* Nothing to do! */
clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_clear(con, CON_FLAG_WRITE_PENDING);
dout("try_write nothing else to write.\n");
ret = 0;
out:
@ -2268,7 +2318,7 @@ static void queue_con(struct ceph_connection *con)
static bool con_sock_closed(struct ceph_connection *con)
{
if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
return false;
#define CASE(x) \
@ -2295,6 +2345,41 @@ static bool con_sock_closed(struct ceph_connection *con)
return true;
}
static bool con_backoff(struct ceph_connection *con)
{
int ret;
if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
return false;
ret = queue_con_delay(con, round_jiffies_relative(con->delay));
if (ret) {
dout("%s: con %p FAILED to back off %lu\n", __func__,
con, con->delay);
BUG_ON(ret == -ENOENT);
con_flag_set(con, CON_FLAG_BACKOFF);
}
return true;
}
/* Finish fault handling; con->mutex must *not* be held here */
static void con_fault_finish(struct ceph_connection *con)
{
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
*/
if (con->auth_retry && con->ops->invalidate_authorizer) {
dout("calling invalidate_authorizer()\n");
con->ops->invalidate_authorizer(con);
}
if (con->ops->fault)
con->ops->fault(con);
}
/*
* Do some work on a connection. Drop a connection ref when we're done.
*/
@ -2302,73 +2387,68 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
int ret;
bool fault;
mutex_lock(&con->mutex);
restart:
if (con_sock_closed(con))
goto fault;
while (true) {
int ret;
if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
ret = queue_con_delay(con, round_jiffies_relative(con->delay));
if (ret) {
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
BUG_ON(ret == -ENOENT);
set_bit(CON_FLAG_BACKOFF, &con->flags);
if ((fault = con_sock_closed(con))) {
dout("%s: con %p SOCK_CLOSED\n", __func__, con);
break;
}
if (con_backoff(con)) {
dout("%s: con %p BACKOFF\n", __func__, con);
break;
}
if (con->state == CON_STATE_STANDBY) {
dout("%s: con %p STANDBY\n", __func__, con);
break;
}
if (con->state == CON_STATE_CLOSED) {
dout("%s: con %p CLOSED\n", __func__, con);
BUG_ON(con->sock);
break;
}
if (con->state == CON_STATE_PREOPEN) {
dout("%s: con %p PREOPEN\n", __func__, con);
BUG_ON(con->sock);
}
goto done;
}
if (con->state == CON_STATE_STANDBY) {
dout("con_work %p STANDBY\n", con);
goto done;
}
if (con->state == CON_STATE_CLOSED) {
dout("con_work %p CLOSED\n", con);
BUG_ON(con->sock);
goto done;
}
if (con->state == CON_STATE_PREOPEN) {
dout("con_work OPENING\n");
BUG_ON(con->sock);
}
ret = try_read(con);
if (ret < 0) {
if (ret == -EAGAIN)
continue;
con->error_msg = "socket error on read";
fault = true;
break;
}
ret = try_read(con);
if (ret == -EAGAIN)
goto restart;
if (ret < 0) {
con->error_msg = "socket error on read";
goto fault;
}
ret = try_write(con);
if (ret < 0) {
if (ret == -EAGAIN)
continue;
con->error_msg = "socket error on write";
fault = true;
}
ret = try_write(con);
if (ret == -EAGAIN)
goto restart;
if (ret < 0) {
con->error_msg = "socket error on write";
goto fault;
break; /* If we make it to here, we're done */
}
done:
if (fault)
con_fault(con);
mutex_unlock(&con->mutex);
done_unlocked:
if (fault)
con_fault_finish(con);
con->ops->put(con);
return;
fault:
ceph_fault(con); /* error/fault path */
goto done_unlocked;
}
/*
* Generic error/fault handler. A retry mechanism is used with
* exponential backoff
*/
static void ceph_fault(struct ceph_connection *con)
__releases(con->mutex)
static void con_fault(struct ceph_connection *con)
{
pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
@ -2381,10 +2461,10 @@ static void ceph_fault(struct ceph_connection *con)
con_close_socket(con);
if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
dout("fault on LOSSYTX channel, marking CLOSED\n");
con->state = CON_STATE_CLOSED;
goto out_unlock;
return;
}
if (con->in_msg) {
@ -2401,9 +2481,9 @@ static void ceph_fault(struct ceph_connection *con)
/* If there are no messages queued or keepalive pending, place
* the connection in a STANDBY state */
if (list_empty(&con->out_queue) &&
!test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
!con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
con_flag_clear(con, CON_FLAG_WRITE_PENDING);
con->state = CON_STATE_STANDBY;
} else {
/* retry after a delay. */
@ -2412,23 +2492,9 @@ static void ceph_fault(struct ceph_connection *con)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
con->delay *= 2;
set_bit(CON_FLAG_BACKOFF, &con->flags);
con_flag_set(con, CON_FLAG_BACKOFF);
queue_con(con);
}
out_unlock:
mutex_unlock(&con->mutex);
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
*/
if (con->auth_retry && con->ops->invalidate_authorizer) {
dout("calling invalidate_authorizer()\n");
con->ops->invalidate_authorizer(con);
}
if (con->ops->fault)
con->ops->fault(con);
}
@ -2469,8 +2535,8 @@ static void clear_standby(struct ceph_connection *con)
dout("clear_standby %p and ++connect_seq\n", con);
con->state = CON_STATE_PREOPEN;
con->connect_seq++;
WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
}
}
@ -2511,7 +2577,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
/* if there wasn't anything waiting to send before, queue
* new work */
if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_send);
@ -2600,8 +2666,8 @@ void ceph_con_keepalive(struct ceph_connection *con)
mutex_lock(&con->mutex);
clear_standby(con);
mutex_unlock(&con->mutex);
if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
@ -2651,9 +2717,11 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
m->page_alignment = 0;
m->pages = NULL;
m->pagelist = NULL;
#ifdef CONFIG_BLOCK
m->bio = NULL;
m->bio_iter = NULL;
m->bio_seg = 0;
#endif /* CONFIG_BLOCK */
m->trail = NULL;
/* front */

View File

@ -697,7 +697,7 @@ int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
u32 pool, u64 snapid)
{
return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
pool, snapid, 0, 0);
pool, snapid, NULL, 0);
}

File diff suppressed because it is too large Load Diff

View File

@ -13,26 +13,18 @@
char *ceph_osdmap_state_str(char *str, int len, int state)
{
int flag = 0;
if (!len)
goto done;
return str;
*str = '\0';
if (state) {
if (state & CEPH_OSD_EXISTS) {
snprintf(str, len, "exists");
flag = 1;
}
if (state & CEPH_OSD_UP) {
snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
"up");
flag = 1;
}
} else {
if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
snprintf(str, len, "exists, up");
else if (state & CEPH_OSD_EXISTS)
snprintf(str, len, "exists");
else if (state & CEPH_OSD_UP)
snprintf(str, len, "up");
else
snprintf(str, len, "doesn't exist");
}
done:
return str;
}
@ -53,13 +45,8 @@ static int calc_bits_of(unsigned int t)
*/
static void calc_pg_masks(struct ceph_pg_pool_info *pi)
{
pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
pi->pgp_num_mask =
(1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
pi->lpg_num_mask =
(1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
pi->lpgp_num_mask =
(1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
}
/*
@ -170,6 +157,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
c->choose_local_tries = 2;
c->choose_local_fallback_tries = 5;
c->choose_total_tries = 19;
c->chooseleaf_descend_once = 0;
ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p);
@ -336,6 +324,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
dout("crush decode tunable choose_total_tries = %d",
c->choose_total_tries);
ceph_decode_need(p, end, sizeof(u32), done);
c->chooseleaf_descend_once = ceph_decode_32(p);
dout("crush decode tunable chooseleaf_descend_once = %d",
c->chooseleaf_descend_once);
done:
dout("crush_decode success\n");
return c;
@ -354,12 +347,13 @@ bad:
*/
static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
{
u64 a = *(u64 *)&l;
u64 b = *(u64 *)&r;
if (a < b)
if (l.pool < r.pool)
return -1;
if (a > b)
if (l.pool > r.pool)
return 1;
if (l.seed < r.seed)
return -1;
if (l.seed > r.seed)
return 1;
return 0;
}
@ -405,8 +399,8 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
} else if (c > 0) {
n = n->rb_right;
} else {
dout("__lookup_pg_mapping %llx got %p\n",
*(u64 *)&pgid, pg);
dout("__lookup_pg_mapping %lld.%x got %p\n",
pgid.pool, pgid.seed, pg);
return pg;
}
}
@ -418,12 +412,13 @@ static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
if (pg) {
dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
pg);
rb_erase(&pg->node, root);
kfree(pg);
return 0;
}
dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
return -ENOENT;
}
@ -452,7 +447,7 @@ static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
return 0;
}
static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
{
struct ceph_pg_pool_info *pi;
struct rb_node *n = root->rb_node;
@ -508,24 +503,57 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
{
unsigned int n, m;
u8 ev, cv;
unsigned len, num;
void *pool_end;
ceph_decode_copy(p, &pi->v, sizeof(pi->v));
calc_pg_masks(pi);
ceph_decode_need(p, end, 2 + 4, bad);
ev = ceph_decode_8(p); /* encoding version */
cv = ceph_decode_8(p); /* compat version */
if (ev < 5) {
pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
return -EINVAL;
}
if (cv > 7) {
pr_warning("got v %d cv %d > 7 of ceph_pg_pool\n", ev, cv);
return -EINVAL;
}
len = ceph_decode_32(p);
ceph_decode_need(p, end, len, bad);
pool_end = *p + len;
/* num_snaps * snap_info_t */
n = le32_to_cpu(pi->v.num_snaps);
while (n--) {
ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
sizeof(struct ceph_timespec), bad);
*p += sizeof(u64) + /* key */
1 + sizeof(u64) + /* u8, snapid */
sizeof(struct ceph_timespec);
m = ceph_decode_32(p); /* snap name */
*p += m;
pi->type = ceph_decode_8(p);
pi->size = ceph_decode_8(p);
pi->crush_ruleset = ceph_decode_8(p);
pi->object_hash = ceph_decode_8(p);
pi->pg_num = ceph_decode_32(p);
pi->pgp_num = ceph_decode_32(p);
*p += 4 + 4; /* skip lpg* */
*p += 4; /* skip last_change */
*p += 8 + 4; /* skip snap_seq, snap_epoch */
/* skip snaps */
num = ceph_decode_32(p);
while (num--) {
*p += 8; /* snapid key */
*p += 1 + 1; /* versions */
len = ceph_decode_32(p);
*p += len;
}
*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
/* skip removed snaps */
num = ceph_decode_32(p);
*p += num * (8 + 8);
*p += 8; /* skip auid */
pi->flags = ceph_decode_64(p);
/* ignore the rest */
*p = pool_end;
calc_pg_masks(pi);
return 0;
bad:
@ -535,14 +563,15 @@ bad:
static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
{
struct ceph_pg_pool_info *pi;
u32 num, len, pool;
u32 num, len;
u64 pool;
ceph_decode_32_safe(p, end, num, bad);
dout(" %d pool names\n", num);
while (num--) {
ceph_decode_32_safe(p, end, pool, bad);
ceph_decode_64_safe(p, end, pool, bad);
ceph_decode_32_safe(p, end, len, bad);
dout(" pool %d len %d\n", pool, len);
dout(" pool %llu len %d\n", pool, len);
ceph_decode_need(p, end, len, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
@ -633,7 +662,6 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
struct ceph_osdmap *map;
u16 version;
u32 len, max, i;
u8 ev;
int err = -EINVAL;
void *start = *p;
struct ceph_pg_pool_info *pi;
@ -646,9 +674,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
map->pg_temp = RB_ROOT;
ceph_decode_16_safe(p, end, version, bad);
if (version > CEPH_OSDMAP_VERSION) {
pr_warning("got unknown v %d > %d of osdmap\n", version,
CEPH_OSDMAP_VERSION);
if (version > 6) {
pr_warning("got unknown v %d > 6 of osdmap\n", version);
goto bad;
}
if (version < 6) {
pr_warning("got old v %d < 6 of osdmap\n", version);
goto bad;
}
@ -660,20 +691,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
ceph_decode_need(p, end, 8 + 2, bad);
err = -ENOMEM;
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
pi->id = ceph_decode_32(p);
err = -EINVAL;
ev = ceph_decode_8(p); /* encoding version */
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
ev, CEPH_PG_POOL_VERSION);
kfree(pi);
goto bad;
}
pi->id = ceph_decode_64(p);
err = __decode_pool(p, end, pi);
if (err < 0) {
kfree(pi);
@ -682,12 +705,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__insert_pg_pool(&map->pg_pools, pi);
}
if (version >= 5) {
err = __decode_pool_names(p, end, map);
if (err < 0) {
dout("fail to decode pool names");
goto bad;
}
err = __decode_pool_names(p, end, map);
if (err < 0) {
dout("fail to decode pool names");
goto bad;
}
ceph_decode_32_safe(p, end, map->pool_max, bad);
@ -724,10 +745,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
for (i = 0; i < len; i++) {
int n, j;
struct ceph_pg pgid;
struct ceph_pg_v1 pgid_v1;
struct ceph_pg_mapping *pg;
ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
ceph_decode_copy(p, &pgid, sizeof(pgid));
ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
pgid.pool = le32_to_cpu(pgid_v1.pool);
pgid.seed = le16_to_cpu(pgid_v1.ps);
n = ceph_decode_32(p);
err = -EINVAL;
if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@ -745,7 +769,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
err = __insert_pg_mapping(pg, &map->pg_temp);
if (err)
goto bad;
dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed,
len);
}
/* crush */
@ -784,16 +809,17 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
struct ceph_fsid fsid;
u32 epoch = 0;
struct ceph_timespec modified;
u32 len, pool;
__s32 new_pool_max, new_flags, max;
s32 len;
u64 pool;
__s64 new_pool_max;
__s32 new_flags, max;
void *start = *p;
int err = -EINVAL;
u16 version;
ceph_decode_16_safe(p, end, version, bad);
if (version > CEPH_OSDMAP_INC_VERSION) {
pr_warning("got unknown v %d > %d of inc osdmap\n", version,
CEPH_OSDMAP_INC_VERSION);
if (version > 6) {
pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6);
goto bad;
}
@ -803,7 +829,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
epoch = ceph_decode_32(p);
BUG_ON(epoch != map->epoch+1);
ceph_decode_copy(p, &modified, sizeof(modified));
new_pool_max = ceph_decode_32(p);
new_pool_max = ceph_decode_64(p);
new_flags = ceph_decode_32(p);
/* full map? */
@ -853,18 +879,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
/* new_pool */
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
__u8 ev;
struct ceph_pg_pool_info *pi;
ceph_decode_32_safe(p, end, pool, bad);
ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
ev = ceph_decode_8(p); /* encoding version */
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
ev, CEPH_PG_POOL_VERSION);
err = -EINVAL;
goto bad;
}
ceph_decode_64_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (!pi) {
pi = kzalloc(sizeof(*pi), GFP_NOFS);
@ -890,7 +907,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
while (len--) {
struct ceph_pg_pool_info *pi;
ceph_decode_32_safe(p, end, pool, bad);
ceph_decode_64_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi)
__remove_pg_pool(&map->pg_pools, pi);
@ -946,10 +963,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
while (len--) {
struct ceph_pg_mapping *pg;
int j;
struct ceph_pg_v1 pgid_v1;
struct ceph_pg pgid;
u32 pglen;
ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
ceph_decode_copy(p, &pgid, sizeof(pgid));
ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
pgid.pool = le32_to_cpu(pgid_v1.pool);
pgid.seed = le16_to_cpu(pgid_v1.ps);
pglen = ceph_decode_32(p);
if (pglen) {
@ -975,8 +995,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
kfree(pg);
goto bad;
}
dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
pglen);
dout(" added pg_temp %lld.%x len %d\n", pgid.pool,
pgid.seed, pglen);
} else {
/* remove */
__remove_pg_mapping(&map->pg_temp, pgid);
@ -1010,7 +1030,7 @@ bad:
* pass a stride back to the caller.
*/
int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 *plen,
u64 off, u64 len,
u64 *ono,
u64 *oxoff, u64 *oxlen)
{
@ -1021,7 +1041,7 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u32 su_per_object;
u64 t, su_offset;
dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
osize, su);
if (su == 0 || sc == 0)
goto invalid;
@ -1054,11 +1074,10 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
/*
* Calculate the length of the extent being written to the selected
* object. This is the minimum of the full length requested (plen) or
* object. This is the minimum of the full length requested (len) or
* the remainder of the current stripe being written to.
*/
*oxlen = min_t(u64, *plen, su - su_offset);
*plen = *oxlen;
*oxlen = min_t(u64, len, su - su_offset);
dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
return 0;
@ -1076,33 +1095,24 @@ EXPORT_SYMBOL(ceph_calc_file_object_mapping);
* calculate an object layout (i.e. pgid) from an oid,
* file_layout, and osdmap
*/
int ceph_calc_object_layout(struct ceph_object_layout *ol,
int ceph_calc_object_layout(struct ceph_pg *pg,
const char *oid,
struct ceph_file_layout *fl,
struct ceph_osdmap *osdmap)
{
unsigned int num, num_mask;
struct ceph_pg pgid;
int poolid = le32_to_cpu(fl->fl_pg_pool);
struct ceph_pg_pool_info *pool;
unsigned int ps;
BUG_ON(!osdmap);
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
pg->pool = le32_to_cpu(fl->fl_pg_pool);
pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool);
if (!pool)
return -EIO;
ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
num = le32_to_cpu(pool->v.pg_num);
pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid));
num = pool->pg_num;
num_mask = pool->pg_num_mask;
pgid.ps = cpu_to_le16(ps);
pgid.preferred = cpu_to_le16(-1);
pgid.pool = fl->fl_pg_pool;
dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
ol->ol_pgid = pgid;
ol->ol_stripe_unit = fl->fl_object_stripe_unit;
dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed);
return 0;
}
EXPORT_SYMBOL(ceph_calc_object_layout);
@ -1117,19 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
unsigned int poolid, ps, pps, t, r;
int r;
u32 pps;
poolid = le32_to_cpu(pgid.pool);
ps = le16_to_cpu(pgid.ps);
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
if (!pool)
return NULL;
/* pg_temp? */
t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
pool->pgp_num_mask);
pgid.ps = cpu_to_le16(t);
pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
pool->pgp_num_mask);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
*num = pg->len;
@ -1137,26 +1144,39 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
}
/* crush */
ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
pool->v.type, pool->v.size);
ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
pool->type, pool->size);
if (ruleno < 0) {
pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
poolid, pool->v.crush_ruleset, pool->v.type,
pool->v.size);
pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
pgid.pool, pool->crush_ruleset, pool->type,
pool->size);
return NULL;
}
pps = ceph_stable_mod(ps,
le32_to_cpu(pool->v.pgp_num),
pool->pgp_num_mask);
pps += poolid;
if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
/* hash pool id and seed sothat pool PGs do not overlap */
pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask),
pgid.pool);
} else {
/*
* legacy ehavior: add ps and pool together. this is
* not a great approach because the PGs from each pool
* will overlap on top of each other: 0.5 == 1.4 ==
* 2.3 == ...
*/
pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask) +
(unsigned)pgid.pool;
}
r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
min_t(int, pool->v.size, *num),
min_t(int, pool->size, *num),
osdmap->osd_weight);
if (r < 0) {
pr_err("error %d from crush rule: pool %d ruleset %d type %d"
" size %d\n", r, poolid, pool->v.crush_ruleset,
pool->v.type, pool->v.size);
pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
" size %d\n", r, pgid.pool, pool->crush_ruleset,
pool->type, pool->size);
return NULL;
}
*num = r;

View File

@ -12,7 +12,7 @@
/*
* build a vector of user pages
*/
struct page **ceph_get_direct_page_vector(const char __user *data,
struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages, bool write_page)
{
struct page **pages;
@ -93,7 +93,7 @@ EXPORT_SYMBOL(ceph_alloc_page_vector);
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
const char __user *data,
const void __user *data,
loff_t off, size_t len)
{
int i = 0;
@ -118,17 +118,17 @@ int ceph_copy_user_to_page_vector(struct page **pages,
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
int ceph_copy_to_page_vector(struct page **pages,
const char *data,
void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
size_t l;
while (left > 0) {
l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
@ -138,21 +138,20 @@ int ceph_copy_to_page_vector(struct page **pages,
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
int ceph_copy_from_page_vector(struct page **pages,
char *data,
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_CACHE_MASK;
size_t left = len;
size_t l;
while (left > 0) {
l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
memcpy(data, page_address(pages[i]) + po, l);
data += l;
left -= l;
@ -162,7 +161,6 @@ int ceph_copy_from_page_vector(struct page **pages,
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_from_page_vector);
@ -170,7 +168,7 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
* copy user data from a page vector into a user pointer
*/
int ceph_copy_page_vector_to_user(struct page **pages,
char __user *data,
void __user *data,
loff_t off, size_t len)
{
int i = 0;