mdev = b->w.mdev;
nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ if (test_and_clear_bit(CREATE_BARRIER, &tconn->flags)) {
_tl_add_barrier(tconn, b);
if (nob)
tconn->oldest_tle = nob;
req = list_entry(le, struct drbd_request, tl_requests);
rv = _req_mod(req, what);
- n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
- n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
+ if (rv & MR_WRITE)
+ n_writes++;
+ if (rv & MR_READ)
+ n_reads++;
}
tmp = b->next;
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
inc_ap_pending(b->w.mdev);
- set_bit(CREATE_BARRIER, &b->w.mdev->flags);
+ set_bit(CREATE_BARRIER, &tconn->flags);
}
drbd_queue_work(&tconn->data.work, &b->w);
*/
void tl_clear(struct drbd_tconn *tconn)
{
- struct drbd_conf *mdev;
struct list_head *le, *tle;
struct drbd_request *r;
- int vnr;
spin_lock_irq(&tconn->req_lock);
}
/* ensure bit indicating barrier is required is clear */
- rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr)
- clear_bit(CREATE_BARRIER, &mdev->flags);
- rcu_read_unlock();
+ clear_bit(CREATE_BARRIER, &tconn->flags);
spin_unlock_irq(&tconn->req_lock);
}
}
/**
- * tl_apply() - Applies an event to all requests for a certain mdev in the TL
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
* @mdev: DRBD device.
- * @what: The action/event to perform with all request objects
- *
- * @what might ony be ABORT_DISK_IO.
*/
-void tl_apply(struct drbd_conf *mdev, enum drbd_req_event what)
+void tl_abort_disk_io(struct drbd_conf *mdev)
{
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_tl_epoch *b;
struct list_head *le, *tle;
struct drbd_request *req;
- D_ASSERT(what == ABORT_DISK_IO);
-
spin_lock_irq(&tconn->req_lock);
b = tconn->oldest_tle;
while (b) {
list_for_each_safe(le, tle, &b->requests) {
req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
if (req->w.mdev == mdev)
- _req_mod(req, what);
+ _req_mod(req, ABORT_DISK_IO);
}
b = b->next;
}
list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
if (req->w.mdev == mdev)
- _req_mod(req, what);
+ _req_mod(req, ABORT_DISK_IO);
}
spin_unlock_irq(&tconn->req_lock);
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct drbd_socket *sock;
struct p_state *p;
return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct drbd_socket *sock;
+ struct p_state *p;
+
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
+
int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
{
struct drbd_socket *sock;
p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i);
return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
-
}
int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
int err;
err = _drbd_no_send_page(mdev, bvec->bv_page,
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
int err;
err = _drbd_send_page(mdev, bvec->bv_page,
sock = &mdev->tconn->data;
p = drbd_prepare_command(mdev, sock);
- dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
- crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
- p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
sock = &mdev->tconn->data;
p = drbd_prepare_command(mdev, sock);
- dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
- crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
if (!p)
return -EIO;
p->sector = cpu_to_be64(peer_req->i.sector);
p->block_id = peer_req->block_id;
p->seq_num = 0; /* unused */
+ p->dp_flags = 0;
if (dgs)
drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
init_waitqueue_head(&mdev->al_wait);
init_waitqueue_head(&mdev->seq_wait);
- mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->current_epoch);
if (mdev->bitmap) /* should no longer be there. */
drbd_bm_cleanup(mdev);
__free_page(mdev->md_io_page);
if (!tl_init(tconn))
goto fail;
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
+ tconn->write_ordering = WO_bdev_flush;
+
tconn->cstate = C_STANDALONE;
mutex_init(&tconn->cstate_mutex);
spin_lock_init(&tconn->req_lock);
return tconn;
fail:
+ kfree(tconn->current_epoch);
tl_cleanup(tconn);
free_cpumask_var(tconn->cpu_mask);
drbd_free_socket(&tconn->meta);
{
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
idr_destroy(&tconn->volumes);
free_cpumask_var(tconn->cpu_mask);
mdev->read_requests = RB_ROOT;
mdev->write_requests = RB_ROOT;
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
if (!idr_pre_get(&minors, GFP_KERNEL))
goto out_no_minor_idr;
if (idr_get_new_above(&minors, mdev, minor, &minor_got))
idr_remove(&minors, minor_got);
synchronize_rcu();
out_no_minor_idr:
- kfree(mdev->current_epoch);
-out_no_epoch:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
- buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
+ buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
* @bdev: Device from which the meta data should be read in.
*
* Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
- * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
+ * something goes wrong.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
+ u32 magic, flags;
int i, rv = NO_ERROR;
if (!get_ldev_if_state(mdev, D_ATTACHING))
goto err;
}
- if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
- dev_err(DEV, "Error while reading metadata, magic not found.\n");
+ magic = be32_to_cpu(buffer->magic);
+ flags = be32_to_cpu(buffer->flags);
+ if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
+ (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
+ /* btw: that's Activity Log clean, not "all" clean. */
+ dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ rv = ERR_MD_UNCLEAN;
+ goto err;
+ }
+ if (magic != DRBD_MD_MAGIC_08) {
+ if (magic == DRBD_MD_MAGIC_07)
+ dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ else
+ dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
rv = ERR_MD_INVALID;
goto err;
}
}
spin_unlock_irq(&mdev->tconn->req_lock);
- /* This blocks wants to be get removed... */
- bdev->disk_conf->al_extents = be32_to_cpu(buffer->al_nr_extents);
- if (bdev->disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
- bdev->disk_conf->al_extents = DRBD_AL_EXTENTS_DEF;
-
err:
drbd_md_put_buffer(mdev);
out: