Commit 2a842aca authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: introduce new block status code type


Currently we use nornal Linux errno values in the block layer, and while
we accept any error a few have overloaded magic meanings.  This patch
instead introduces a new  blk_status_t value that holds block layer specific
status codes and explicitly explains their meaning.  Helpers to convert from
and to the previous special meanings are provided for now, but I suspect
we want to get rid of them in the long run - those drivers that have a
errno input (e.g. networking) usually get errnos that don't know about
the special block layer overloads, and similarly returning them to userspace
will usually return somethings that strictly speaking isn't correct
for file system operations, but that's left as an exercise for later.

For now the set of errors is a very limited set that closely corresponds
to the previous overloaded errno values, but there is some low hanging
fruite to improve it.

blk_status_t (ab)uses the sparse __bitwise annotations to allow for sparse
typechecking, so that we can easily catch places passing the wrong values.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 1be56909
Showing with 156 additions and 126 deletions
+156 -126
......@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/blkdev.h>
struct arqb {
u64 data;
......@@ -105,13 +106,14 @@ struct scm_driver {
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev, enum scm_event event);
void (*handler) (struct scm_device *scmdev, void *data, int error);
void (*handler) (struct scm_device *scmdev, void *data,
blk_status_t error);
};
int scm_driver_register(struct scm_driver *scmdrv);
void scm_driver_unregister(struct scm_driver *scmdrv);
int eadm_start_aob(struct aob *aob);
void scm_irq_handler(struct aob *aob, int error);
void scm_irq_handler(struct aob *aob, blk_status_t error);
#endif /* _ASM_S390_EADM_H */
......@@ -534,7 +534,7 @@ static void ubd_handler(void)
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
blk_end_request(
(*irq_req_buffer)[count]->req,
0,
BLK_STS_OK,
(*irq_req_buffer)[count]->length
);
kfree((*irq_req_buffer)[count]);
......
......@@ -129,11 +129,66 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL(blk_rq_init);
static const struct {
int errno;
const char *name;
} blk_errors[] = {
[BLK_STS_OK] = { 0, "" },
[BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
[BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
[BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
/* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" },
};
blk_status_t errno_to_blk_status(int errno)
{
int i;
for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
if (blk_errors[i].errno == errno)
return (__force blk_status_t)i;
}
return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);
int blk_status_to_errno(blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
return -EIO;
return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
static void print_req_error(struct request *req, blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
return;
printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
__func__, blk_errors[idx].name, req->rq_disk ?
req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
}
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
unsigned int nbytes, blk_status_t error)
{
if (error)
bio->bi_error = error;
bio->bi_error = blk_status_to_errno(error);
if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);
......@@ -2177,29 +2232,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
* @q: the queue to submit the request
* @rq: the request being queued
*/
int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
unsigned long flags;
int where = ELEVATOR_INSERT_BACK;
if (blk_cloned_rq_check_limits(q, rq))
return -EIO;
return BLK_STS_IOERR;
if (rq->rq_disk &&
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return -EIO;
return BLK_STS_IOERR;
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
blk_mq_sched_insert_request(rq, false, true, false, false);
return 0;
return BLK_STS_OK;
}
spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
return BLK_STS_IOERR;
}
/*
......@@ -2216,7 +2271,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
__blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
......@@ -2450,15 +2505,14 @@ struct request *blk_peek_request(struct request_queue *q)
rq = NULL;
break;
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
rq->rq_flags |= RQF_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
*/
blk_start_request(rq);
__blk_end_request_all(rq, err);
__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
BLK_STS_TARGET : BLK_STS_IOERR);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
......@@ -2547,7 +2601,7 @@ EXPORT_SYMBOL(blk_fetch_request);
/**
* blk_update_request - Special helper function for request stacking drivers
* @req: the request being processed
* @error: %0 for success, < %0 for error
* @error: block status code
* @nr_bytes: number of bytes to complete @req
*
* Description:
......@@ -2566,49 +2620,19 @@ EXPORT_SYMBOL(blk_fetch_request);
* %false - this request doesn't have any more data
* %true - this request has more data
**/
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
bool blk_update_request(struct request *req, blk_status_t error,
unsigned int nr_bytes)
{
int total_bytes;
trace_block_rq_complete(req, error, nr_bytes);
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
if (!req->bio)
return false;
if (error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)) {
char *error_type;
switch (error) {
case -ENOLINK:
error_type = "recoverable transport";
break;
case -EREMOTEIO:
error_type = "critical target";
break;
case -EBADE:
error_type = "critical nexus";
break;
case -ETIMEDOUT:
error_type = "timeout";
break;
case -ENOSPC:
error_type = "critical space allocation";
break;
case -ENODATA:
error_type = "critical medium";
break;
case -EIO:
default:
error_type = "I/O";
break;
}
printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
__func__, error_type, req->rq_disk ?
req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
}
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error);
blk_account_io_completion(req, nr_bytes);
......@@ -2674,7 +2698,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
}
EXPORT_SYMBOL_GPL(blk_update_request);
static bool blk_update_bidi_request(struct request *rq, int error,
static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes,
unsigned int bidi_bytes)
{
......@@ -2715,7 +2739,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
/*
* queue lock must be held
*/
void blk_finish_request(struct request *req, int error)
void blk_finish_request(struct request *req, blk_status_t error)
{
struct request_queue *q = req->q;
......@@ -2752,7 +2776,7 @@ EXPORT_SYMBOL(blk_finish_request);
/**
* blk_end_bidi_request - Complete a bidi request
* @rq: the request to complete
* @error: %0 for success, < %0 for error
* @error: block status code
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
*
......@@ -2766,7 +2790,7 @@ EXPORT_SYMBOL(blk_finish_request);
* %false - we are done with this request
* %true - still buffers pending for this request
**/
static bool blk_end_bidi_request(struct request *rq, int error,
static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
struct request_queue *q = rq->q;
......@@ -2785,7 +2809,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
/**
* __blk_end_bidi_request - Complete a bidi request with queue lock held
* @rq: the request to complete
* @error: %0 for success, < %0 for error
* @error: block status code
* @nr_bytes: number of bytes to complete @rq
* @bidi_bytes: number of bytes to complete @rq->next_rq
*
......@@ -2797,7 +2821,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
* %false - we are done with this request
* %true - still buffers pending for this request
**/
static bool __blk_end_bidi_request(struct request *rq, int error,
static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
......@@ -2811,7 +2835,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
/**
* blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: %0 for success, < %0 for error
* @error: block status code
* @nr_bytes: number of bytes to complete
*
* Description:
......@@ -2822,7 +2846,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
* %false - we are done with this request
* %true - still buffers pending for this request
**/
bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
bool blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes)
{
return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
......@@ -2831,12 +2856,12 @@ EXPORT_SYMBOL(blk_end_request);
/**
* blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @error: %0 for success, < %0 for error
* @error: block status code
*
* Description:
* Completely finish @rq.
*/
void blk_end_request_all(struct request *rq, int error)
void blk_end_request_all(struct request *rq, blk_status_t error)
{
bool pending;
unsigned int bidi_bytes = 0;
......@@ -2852,7 +2877,7 @@ EXPORT_SYMBOL(blk_end_request_all);
/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: %0 for success, < %0 for error
* @error: block status code
* @nr_bytes: number of bytes to complete
*
* Description:
......@@ -2862,7 +2887,8 @@ EXPORT_SYMBOL(blk_end_request_all);
* %false - we are done with this request
* %true - still buffers pending for this request
**/
bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes)
{
return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
......@@ -2871,12 +2897,12 @@ EXPORT_SYMBOL(__blk_end_request);
/**
* __blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
* @error: %0 for success, < %0 for error
* @error: block status code
*
* Description:
* Completely finish @rq. Must be called with queue lock held.
*/
void __blk_end_request_all(struct request *rq, int error)
void __blk_end_request_all(struct request *rq, blk_status_t error)
{
bool pending;
unsigned int bidi_bytes = 0;
......@@ -2892,7 +2918,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
/**
* __blk_end_request_cur - Helper function to finish the current request chunk.
* @rq: the request to finish the current chunk for
* @error: %0 for success, < %0 for error
* @error: block status code
*
* Description:
* Complete the current consecutively mapped chunk from @rq. Must
......@@ -2902,7 +2928,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
* %false - we are done with this request
* %true - still buffers pending for this request
*/
bool __blk_end_request_cur(struct request *rq, int error)
bool __blk_end_request_cur(struct request *rq, blk_status_t error)
{
return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
......@@ -3243,7 +3269,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* Short-circuit if @q is dead
*/
if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
__blk_end_request_all(rq, BLK_STS_IOERR);
continue;
}
......
......@@ -16,7 +16,7 @@
* @rq: request to complete
* @error: end I/O status of the request
*/
static void blk_end_sync_rq(struct request *rq, int error)
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
struct completion *waiting = rq->end_io_data;
......@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
if (unlikely(blk_queue_dying(q))) {
rq->rq_flags |= RQF_QUIET;
__blk_end_request_all(rq, -ENXIO);
__blk_end_request_all(rq, BLK_STS_IOERR);
spin_unlock_irq(q->queue_lock);
return;
}
......
......@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
*/
static bool blk_flush_complete_seq(struct request *rq,
struct blk_flush_queue *fq,
unsigned int seq, int error)
unsigned int seq, blk_status_t error)
{
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
......@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq,
return kicked | queued;
}
static void flush_end_io(struct request *flush_rq, int error)
static void flush_end_io(struct request *flush_rq, blk_status_t error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
......@@ -341,7 +341,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
return blk_flush_queue_rq(flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
static void flush_data_end_io(struct request *rq, blk_status_t error)
{
struct request_queue *q = rq->q;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
......@@ -382,7 +382,7 @@ static void flush_data_end_io(struct request *rq, int error)
blk_run_queue_async(q);
}
static void mq_flush_data_end_io(struct request *rq, int error)
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
......
......@@ -394,7 +394,7 @@ void blk_mq_free_request(struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error)
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
blk_account_io_done(rq);
......@@ -409,7 +409,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
}
EXPORT_SYMBOL(__blk_mq_end_request);
void blk_mq_end_request(struct request *rq, int error)
void blk_mq_end_request(struct request *rq, blk_status_t error)
{
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
BUG();
......@@ -988,7 +988,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
pr_err("blk-mq: bad return on queue: %d\n", ret);
case BLK_MQ_RQ_QUEUE_ERROR:
errors++;
blk_mq_end_request(rq, -EIO);
blk_mq_end_request(rq, BLK_STS_IOERR);
break;
}
......@@ -1433,7 +1433,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
*cookie = BLK_QC_T_NONE;
blk_mq_end_request(rq, -EIO);
blk_mq_end_request(rq, BLK_STS_IOERR);
return;
}
......
......@@ -37,7 +37,7 @@ static void bsg_destroy_job(struct kref *kref)
struct bsg_job *job = container_of(kref, struct bsg_job, kref);
struct request *rq = job->req;
blk_end_request_all(rq, scsi_req(rq)->result);
blk_end_request_all(rq, BLK_STS_OK);
put_device(job->dev); /* release reference for the request */
......@@ -202,7 +202,7 @@ static void bsg_request_fn(struct request_queue *q)
ret = bsg_create_job(dev, req);
if (ret) {
scsi_req(req)->result = ret;
blk_end_request_all(req, ret);
blk_end_request_all(req, BLK_STS_OK);
spin_lock_irq(q->queue_lock);
continue;
}
......
......@@ -294,14 +294,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
* async completion call-back from the block layer, when scsi/ide/whatever
* calls end_that_request_last() on a request
*/
static void bsg_rq_end_io(struct request *rq, int uptodate)
static void bsg_rq_end_io(struct request *rq, blk_status_t status)
{
struct bsg_command *bc = rq->end_io_data;
struct bsg_device *bd = bc->bd;
unsigned long flags;
dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
bd->name, rq, bc, bc->bio, uptodate);
dprintk("%s: finished rq %p bc %p, bio %p\n",
bd->name, rq, bc, bc->bio);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
......
......@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
bool SuccessfulIO)
{
struct request *Request = Command->Request;
int Error = SuccessfulIO ? 0 : -EIO;
blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
Command->SegmentCount, Command->DmaDirection);
......
......@@ -1378,7 +1378,7 @@ static void redo_fd_request(void)
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
int err;
blk_status_t err;
next_req:
rq = set_next_request();
......@@ -1392,7 +1392,7 @@ static void redo_fd_request(void)
next_segment:
/* Here someone could investigate to be more efficient */
for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
......@@ -1400,7 +1400,7 @@ static void redo_fd_request(void)
#endif
block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
err = -EIO;
err = BLK_STS_IOERR;
break;
}
......@@ -1413,7 +1413,7 @@ static void redo_fd_request(void)
#endif
if (get_track(drive, track) == -1) {
err = -EIO;
err = BLK_STS_IOERR;
break;
}
......@@ -1424,7 +1424,7 @@ static void redo_fd_request(void)
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
err = -EIO;
err = BLK_STS_IOERR;
break;
}
/*
......
......@@ -1071,7 +1071,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && !bio->bi_error;
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
......
......@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
static void fd_end_request_cur(int err)
static void fd_end_request_cur(blk_status_t err)
{
if (!__blk_end_request_cur(fd_request, err))
fd_request = NULL;
......@@ -620,7 +620,7 @@ static void fd_error( void )
fd_request->error_count++;
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(-EIO);
fd_end_request_cur(BLK_STS_IOERR);
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
......@@ -739,7 +739,7 @@ static void do_fd_action( int drive )
}
else {
/* all sectors finished */
fd_end_request_cur(0);
fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
return;
}
......@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status)
}
else {
/* all sectors finished */
fd_end_request_cur(0);
fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
}
return;
......@@ -1445,7 +1445,7 @@ static void redo_fd_request(void)
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(-EIO);
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
......@@ -1461,12 +1461,12 @@ static void redo_fd_request(void)
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(-EIO);
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(-EIO);
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
type = minor2disktype[type].index;
......@@ -1476,7 +1476,7 @@ static void redo_fd_request(void)
}
if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
fd_end_request_cur(-EIO);
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
......
......@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq)
/* set the residual count for pc requests */
if (blk_rq_is_passthrough(rq))
scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0);
blk_end_request_all(rq, scsi_req(rq)->result ?
BLK_STS_IOERR : BLK_STS_OK);
spin_lock_irqsave(&h->lock, flags);
cmd_free(h, c);
......
......@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
* =============================
*/
static void floppy_end_request(struct request *req, int error)
static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
unsigned int drive = (unsigned long)req->rq_disk->private_data;
......@@ -2263,7 +2263,7 @@ static void request_done(int uptodate)
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
floppy_end_request(req, -EIO);
floppy_end_request(req, BLK_STS_IOERR);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
......
......@@ -464,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
zero_fill_bio(bio);
}
blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0);
blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
......
......@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib);
static void mtip_complete_command(struct mtip_cmd *cmd, int status)
static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
......@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
mtip_complete_command(cmd, -EIO);
mtip_complete_command(cmd, BLK_STS_IOERR);
return;
}
......@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
tag,
fail_reason != NULL ?
fail_reason : "unknown");
mtip_complete_command(cmd, -ENODATA);
mtip_complete_command(cmd, BLK_STS_MEDIUM);
continue;
}
}
......@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
mtip_complete_command(cmd, -EIO);
mtip_complete_command(cmd, BLK_STS_IOERR);
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
......@@ -2753,7 +2753,7 @@ static void mtip_abort_cmd(struct request *req, void *data,
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
clear_bit(req->tag, dd->port->cmds_to_issue);
cmd->status = -EIO;
cmd->status = BLK_STS_IOERR;
mtip_softirq_done_fn(req);
}
......@@ -3597,7 +3597,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
int err;
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
blk_mq_end_request(rq, err);
blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
return 0;
}
......@@ -3730,7 +3730,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
if (reserved) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = -ETIME;
cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
......@@ -3961,7 +3961,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->status = -ENODEV;
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(rq);
}
......
......@@ -342,7 +342,7 @@ struct mtip_cmd {
int retries; /* The number of retries left for this command. */
int direction; /* Data transfer direction */
int status;
blk_status_t status;
};
/* Structure used to describe a port. */
......
......@@ -116,7 +116,7 @@ struct nbd_cmd {
int index;
int cookie;
struct completion send_complete;
int status;
blk_status_t status;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
......@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
struct nbd_config *config;
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = -EIO;
cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
......@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
"Connection timed out\n");
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = -EIO;
cmd->status = BLK_STS_IOERR;
sock_shutdown(nbd);
nbd_config_put(nbd);
......@@ -578,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
cmd->status = -EIO;
cmd->status = BLK_STS_IOERR;
return cmd;
}
......@@ -603,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
*/
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
cmd->status = -EIO;
cmd->status = BLK_STS_IOERR;
return cmd;
}
return ERR_PTR(-EIO);
......@@ -655,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
if (!blk_mq_request_started(req))
return;
cmd = blk_mq_rq_to_pdu(req);
cmd->status = -EIO;
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(req);
}
......@@ -744,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
nbd_config_put(nbd);
return -EINVAL;
}
cmd->status = 0;
cmd->status = BLK_STS_OK;
again:
nsock = config->socks[index];
mutex_lock(&nsock->tx_lock);
......
......@@ -229,11 +229,11 @@ static void end_cmd(struct nullb_cmd *cmd)
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
blk_mq_end_request(cmd->rq, BLK_STS_OK);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
blk_end_request_all(cmd->rq, 0);
blk_end_request_all(cmd->rq, BLK_STS_OK);
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
......@@ -422,11 +422,12 @@ static void cleanup_queues(struct nullb *nullb)
#ifdef CONFIG_NVM
static void null_lnvm_end_io(struct request *rq, int error)
static void null_lnvm_end_io(struct request *rq, blk_status_t status)
{
struct nvm_rq *rqd = rq->end_io_data;
rqd->error = error;
/* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
rqd->error = status ? -EIO : 0;
nvm_end_io(rqd);
blk_put_request(rq);
......
......@@ -783,7 +783,7 @@ static void pcd_request(void)
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
} else {
__blk_end_request_all(pcd_req, -EIO);
__blk_end_request_all(pcd_req, BLK_STS_IOERR);
pcd_req = NULL;
}
}
......@@ -794,7 +794,7 @@ static void do_pcd_request(struct request_queue *q)
pcd_request();
}
static inline void next_request(int err)
static inline void next_request(blk_status_t err)
{
unsigned long saved_flags;
......@@ -837,7 +837,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
next_request(-EIO);
next_request(BLK_STS_IOERR);
return;
}
......@@ -871,7 +871,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
next_request(-EIO);
next_request(BLK_STS_IOERR);
return;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment