2005-04-16 22:20:36 +00:00
|
|
|
#ifndef _RAID10_H
|
|
|
|
#define _RAID10_H
|
|
|
|
|
|
|
|
struct mirror_info {
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2005-04-16 22:20:36 +00:00
|
|
|
sector_t head_position;
|
2011-07-27 01:00:36 +00:00
|
|
|
int recovery_disabled; /* matches
|
|
|
|
* mddev->recovery_disabled
|
|
|
|
* when we shouldn't try
|
|
|
|
* recovering this device.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-10-11 05:49:02 +00:00
|
|
|
struct r10conf {
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev;
|
2011-10-11 05:48:46 +00:00
|
|
|
struct mirror_info *mirrors;
|
2005-04-16 22:20:36 +00:00
|
|
|
int raid_disks;
|
|
|
|
spinlock_t device_lock;
|
|
|
|
|
|
|
|
/* geometry */
|
2011-03-31 01:57:33 +00:00
|
|
|
int near_copies; /* number of copies laid out raid0 style */
|
|
|
|
int far_copies; /* number of copies laid out
|
2005-04-16 22:20:36 +00:00
|
|
|
* at large strides across drives
|
|
|
|
*/
|
2006-06-26 07:27:41 +00:00
|
|
|
int far_offset; /* far_copies are offset by 1 stripe
|
|
|
|
* instead of many
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
int copies; /* near_copies * far_copies.
|
|
|
|
* must be <= raid_disks
|
|
|
|
*/
|
|
|
|
sector_t stride; /* distance between far copies.
|
2006-06-26 07:27:41 +00:00
|
|
|
* This is size / far_copies unless
|
|
|
|
* far_offset, in which case it is
|
|
|
|
* 1 stripe.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2010-03-08 05:02:45 +00:00
|
|
|
sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int chunk_shift; /* shift from chunks to sectors */
|
|
|
|
sector_t chunk_mask;
|
|
|
|
|
|
|
|
struct list_head retry_list;
|
2006-01-06 08:20:16 +00:00
|
|
|
/* queue pending writes and submit them on unplug */
|
|
|
|
struct bio_list pending_bio_list;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spinlock_t resync_lock;
|
|
|
|
int nr_pending;
|
2006-01-06 08:20:13 +00:00
|
|
|
int nr_waiting;
|
2006-01-06 08:20:28 +00:00
|
|
|
int nr_queued;
|
2005-04-16 22:20:36 +00:00
|
|
|
int barrier;
|
|
|
|
sector_t next_resync;
|
2006-01-06 08:20:16 +00:00
|
|
|
int fullsync; /* set to 1 if a full sync is needed,
|
|
|
|
* (fresh device added).
|
|
|
|
* Cleared when a sync completes.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-06 08:20:13 +00:00
|
|
|
wait_queue_head_t wait_barrier;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
mempool_t *r10bio_pool;
|
|
|
|
mempool_t *r10buf_pool;
|
2006-01-06 08:20:28 +00:00
|
|
|
struct page *tmppage;
|
2010-03-08 05:02:45 +00:00
|
|
|
|
|
|
|
/* When taking over an array from a different personality, we store
|
|
|
|
* the new thread here until we fully activate the array.
|
|
|
|
*/
|
2011-10-11 05:48:23 +00:00
|
|
|
struct md_thread *thread;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is our 'private' RAID10 bio.
|
|
|
|
*
|
|
|
|
* it contains information about what kind of IO operations were started
|
|
|
|
* for this RAID10 operation, and about their status:
|
|
|
|
*/
|
|
|
|
|
2011-10-11 05:48:43 +00:00
|
|
|
struct r10bio {
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_t remaining; /* 'have we finished' count,
|
|
|
|
* used from IRQ handlers
|
|
|
|
*/
|
|
|
|
sector_t sector; /* virtual sector number */
|
|
|
|
int sectors;
|
|
|
|
unsigned long state;
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev;
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* original bio going to /dev/mdx
|
|
|
|
*/
|
|
|
|
struct bio *master_bio;
|
|
|
|
/*
|
|
|
|
* if the IO is in READ direction, then this is where we read
|
|
|
|
*/
|
|
|
|
int read_slot;
|
|
|
|
|
|
|
|
struct list_head retry_list;
|
|
|
|
/*
|
|
|
|
* if the IO is in WRITE direction, then multiple bios are used,
|
|
|
|
* one for each copy.
|
|
|
|
* When resyncing we also use one for each copy.
|
|
|
|
* When reconstructing, we use 2 bios, one for read, one for write.
|
|
|
|
* We choose the number when they are allocated.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
struct bio *bio;
|
|
|
|
sector_t addr;
|
|
|
|
int devnum;
|
|
|
|
} devs[0];
|
|
|
|
};
|
|
|
|
|
2006-01-06 08:20:29 +00:00
|
|
|
/* when we get a read error on a read-only array, we redirect to another
|
|
|
|
* device without failing the first device, or trying to over-write to
|
|
|
|
* correct the read error. To keep track of bad blocks on a per-bio
|
|
|
|
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
|
|
|
|
*/
|
|
|
|
#define IO_BLOCKED ((struct bio*)1)
|
2011-07-28 01:39:24 +00:00
|
|
|
/* When we successfully write to a known bad-block, we need to remove the
|
|
|
|
* bad-block marking which must be done from process context. So we record
|
|
|
|
* the success by setting devs[n].bio to IO_MADE_GOOD
|
|
|
|
*/
|
|
|
|
#define IO_MADE_GOOD ((struct bio *)2)
|
|
|
|
|
|
|
|
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
|
2006-01-06 08:20:29 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* bits for r10bio.state */
|
|
|
|
#define R10BIO_Uptodate 0
|
|
|
|
#define R10BIO_IsSync 1
|
|
|
|
#define R10BIO_IsRecover 2
|
2006-01-06 08:20:16 +00:00
|
|
|
#define R10BIO_Degraded 3
|
2011-07-28 01:39:23 +00:00
|
|
|
/* Set ReadError on bios that experience a read error
|
|
|
|
* so that raid10d knows what to do with them.
|
|
|
|
*/
|
|
|
|
#define R10BIO_ReadError 4
|
2011-07-28 01:39:24 +00:00
|
|
|
/* If a write for this request means we can clear some
|
|
|
|
* known-bad-block records, we set this flag.
|
|
|
|
*/
|
|
|
|
#define R10BIO_MadeGood 5
|
2011-07-28 01:39:24 +00:00
|
|
|
#define R10BIO_WriteError 6
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|