2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* drivers/s390/char/tape_core.c
|
|
|
|
* basic function of the tape device driver
|
|
|
|
*
|
|
|
|
* S390 and zSeries version
|
2009-06-16 08:30:39 +00:00
|
|
|
* Copyright IBM Corp. 2001, 2009
|
2005-04-16 22:20:36 +00:00
|
|
|
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
|
|
|
* Michael Holzheu <holzheu@de.ibm.com>
|
|
|
|
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
|
|
|
|
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
2005-07-27 18:45:04 +00:00
|
|
|
* Stefan Bader <shbader@de.ibm.com>
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2009-03-26 14:24:38 +00:00
|
|
|
#define KMSG_COMPONENT "tape"
|
2009-12-18 16:43:21 +00:00
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
2011-01-05 11:47:33 +00:00
|
|
|
#include <linux/kernel_stat.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h> // for kernel parameters
|
|
|
|
#include <linux/kmod.h> // for requesting modules
|
|
|
|
#include <linux/spinlock.h> // for locks
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/list.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/types.h> // for variable types
|
|
|
|
|
|
|
|
#define TAPE_DBF_AREA tape_core_dbf
|
|
|
|
|
|
|
|
#include "tape.h"
|
|
|
|
#include "tape_std.h"
|
|
|
|
|
2007-02-05 20:18:26 +00:00
|
|
|
#define LONG_BUSY_TIMEOUT 180 /* seconds */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
|
2006-12-08 14:53:57 +00:00
|
|
|
static void tape_delayed_next_request(struct work_struct *);
|
2007-02-05 20:18:26 +00:00
|
|
|
static void tape_long_busy_timeout(unsigned long data);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* One list to contain all tape devices of all disciplines, so
|
|
|
|
* we can assign the devices to minor numbers of the same major
|
|
|
|
* The list is protected by the rwlock
|
|
|
|
*/
|
2008-01-26 13:11:13 +00:00
|
|
|
static LIST_HEAD(tape_device_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
static DEFINE_RWLOCK(tape_device_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pointer to debug area.
|
|
|
|
*/
|
|
|
|
debug_info_t *TAPE_DBF_AREA = NULL;
|
|
|
|
EXPORT_SYMBOL(TAPE_DBF_AREA);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Printable strings for tape enumerations.
|
|
|
|
*/
|
|
|
|
const char *tape_state_verbose[TS_SIZE] =
|
|
|
|
{
|
|
|
|
[TS_UNUSED] = "UNUSED",
|
|
|
|
[TS_IN_USE] = "IN_USE",
|
|
|
|
[TS_BLKUSE] = "BLKUSE",
|
|
|
|
[TS_INIT] = "INIT ",
|
|
|
|
[TS_NOT_OPER] = "NOT_OP"
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *tape_op_verbose[TO_SIZE] =
|
|
|
|
{
|
|
|
|
[TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
|
|
|
|
[TO_BSF] = "BSF", [TO_DSE] = "DSE",
|
|
|
|
[TO_FSB] = "FSB", [TO_FSF] = "FSF",
|
|
|
|
[TO_LBL] = "LBL", [TO_NOP] = "NOP",
|
|
|
|
[TO_RBA] = "RBA", [TO_RBI] = "RBI",
|
|
|
|
[TO_RFO] = "RFO", [TO_REW] = "REW",
|
|
|
|
[TO_RUN] = "RUN", [TO_WRI] = "WRI",
|
|
|
|
[TO_WTM] = "WTM", [TO_MSEN] = "MSN",
|
|
|
|
[TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
|
|
|
|
[TO_READ_ATTMSG] = "RAT",
|
|
|
|
[TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
|
2007-02-05 20:18:26 +00:00
|
|
|
[TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
|
|
|
|
[TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
|
2007-05-04 16:47:53 +00:00
|
|
|
[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-05-15 14:52:37 +00:00
|
|
|
static int devid_to_int(struct ccw_dev_id *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-15 14:52:37 +00:00
|
|
|
return dev_id->devno + (dev_id->ssid << 16);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some channel attached tape specific attributes.
|
|
|
|
*
|
|
|
|
* FIXME: In the future the first_minor and blocksize attribute should be
|
|
|
|
* replaced by a link to the cdev tree.
|
|
|
|
*/
|
|
|
|
static ssize_t
|
2005-05-17 10:43:27 +00:00
|
|
|
tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *tdev;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
tdev = dev_get_drvdata(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t
|
2005-05-17 10:43:27 +00:00
|
|
|
tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *tdev;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
tdev = dev_get_drvdata(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t
|
2005-05-17 10:43:27 +00:00
|
|
|
tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *tdev;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
tdev = dev_get_drvdata(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
|
|
|
|
"OFFLINE" : tape_state_verbose[tdev->tape_state]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
DEVICE_ATTR(state, 0444, tape_state_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t
|
2005-05-17 10:43:27 +00:00
|
|
|
tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *tdev;
|
|
|
|
ssize_t rc;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
tdev = dev_get_drvdata(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (tdev->first_minor < 0)
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "N/A\n");
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(tdev->cdev));
|
|
|
|
if (list_empty(&tdev->req_queue))
|
|
|
|
rc = scnprintf(buf, PAGE_SIZE, "---\n");
|
|
|
|
else {
|
|
|
|
struct tape_request *req;
|
|
|
|
|
|
|
|
req = list_entry(tdev->req_queue.next, struct tape_request,
|
|
|
|
list);
|
|
|
|
rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t
|
2005-05-17 10:43:27 +00:00
|
|
|
tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *tdev;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
tdev = dev_get_drvdata(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
|
|
|
|
|
|
|
|
static struct attribute *tape_attrs[] = {
|
|
|
|
&dev_attr_medium_state.attr,
|
|
|
|
&dev_attr_first_minor.attr,
|
|
|
|
&dev_attr_state.attr,
|
|
|
|
&dev_attr_operation.attr,
|
|
|
|
&dev_attr_blocksize.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group tape_attr_group = {
|
|
|
|
.attrs = tape_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tape state functions
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
tape_state_set(struct tape_device *device, enum tape_state newstate)
|
|
|
|
{
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
if (device->tape_state == TS_NOT_OPER) {
|
|
|
|
DBF_EVENT(3, "ts_set err: not oper\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
|
2006-04-11 05:53:49 +00:00
|
|
|
DBF_EVENT(4, "old ts:\t\n");
|
|
|
|
if (device->tape_state < TS_SIZE && device->tape_state >=0 )
|
2005-04-16 22:20:36 +00:00
|
|
|
str = tape_state_verbose[device->tape_state];
|
|
|
|
else
|
|
|
|
str = "UNKNOWN TS";
|
|
|
|
DBF_EVENT(4, "%s\n", str);
|
|
|
|
DBF_EVENT(4, "new ts:\t\n");
|
2006-04-11 05:53:49 +00:00
|
|
|
if (newstate < TS_SIZE && newstate >= 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
str = tape_state_verbose[newstate];
|
|
|
|
else
|
|
|
|
str = "UNKNOWN TS";
|
|
|
|
DBF_EVENT(4, "%s\n", str);
|
|
|
|
device->tape_state = newstate;
|
|
|
|
wake_up(&device->state_change_wq);
|
|
|
|
}
|
|
|
|
|
2010-11-10 09:05:52 +00:00
|
|
|
struct tape_med_state_work_data {
|
|
|
|
struct tape_device *device;
|
|
|
|
enum tape_medium_state state;
|
|
|
|
struct work_struct work;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
tape_med_state_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
|
|
|
|
static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
|
|
|
|
struct tape_med_state_work_data *p =
|
|
|
|
container_of(work, struct tape_med_state_work_data, work);
|
|
|
|
struct tape_device *device = p->device;
|
|
|
|
char *envp[] = { NULL, NULL };
|
|
|
|
|
|
|
|
switch (p->state) {
|
|
|
|
case MS_UNLOADED:
|
|
|
|
pr_info("%s: The tape cartridge has been successfully "
|
|
|
|
"unloaded\n", dev_name(&device->cdev->dev));
|
|
|
|
envp[0] = env_state_unloaded;
|
|
|
|
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
|
|
|
|
break;
|
|
|
|
case MS_LOADED:
|
|
|
|
pr_info("%s: A tape cartridge has been mounted\n",
|
|
|
|
dev_name(&device->cdev->dev));
|
|
|
|
envp[0] = env_state_loaded;
|
|
|
|
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tape_put_device(device);
|
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
|
|
|
|
{
|
|
|
|
struct tape_med_state_work_data *p;
|
|
|
|
|
|
|
|
p = kzalloc(sizeof(*p), GFP_ATOMIC);
|
|
|
|
if (p) {
|
|
|
|
INIT_WORK(&p->work, tape_med_state_work_handler);
|
|
|
|
p->device = tape_get_device(device);
|
|
|
|
p->state = state;
|
|
|
|
schedule_work(&p->work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void
|
|
|
|
tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
|
|
|
|
{
|
2010-11-10 09:05:52 +00:00
|
|
|
enum tape_medium_state oldstate;
|
|
|
|
|
|
|
|
oldstate = device->medium_state;
|
|
|
|
if (oldstate == newstate)
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
2010-11-10 09:05:52 +00:00
|
|
|
device->medium_state = newstate;
|
2005-04-16 22:20:36 +00:00
|
|
|
switch(newstate){
|
|
|
|
case MS_UNLOADED:
|
|
|
|
device->tape_generic_status |= GMT_DR_OPEN(~0);
|
2010-11-10 09:05:52 +00:00
|
|
|
if (oldstate == MS_LOADED)
|
|
|
|
tape_med_state_work(device, MS_UNLOADED);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case MS_LOADED:
|
|
|
|
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
|
2010-11-10 09:05:52 +00:00
|
|
|
if (oldstate == MS_UNLOADED)
|
|
|
|
tape_med_state_work(device, MS_LOADED);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
wake_up(&device->state_change_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stop running ccw. Has to be called with the device lock held.
|
|
|
|
*/
|
2007-02-05 20:18:53 +00:00
|
|
|
static int
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_cancel_io(struct tape_device *device, struct tape_request *request)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int retries;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Check if interrupt has already been processed */
|
|
|
|
if (request->callback == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
for (retries = 0; retries < 5; retries++) {
|
|
|
|
rc = ccw_device_clear(device->cdev, (long) request);
|
|
|
|
|
2005-07-27 18:45:04 +00:00
|
|
|
switch (rc) {
|
|
|
|
case 0:
|
|
|
|
request->status = TAPE_REQUEST_DONE;
|
|
|
|
return 0;
|
|
|
|
case -EBUSY:
|
|
|
|
request->status = TAPE_REQUEST_CANCEL;
|
2006-12-08 14:53:57 +00:00
|
|
|
schedule_delayed_work(&device->tape_dnr, 0);
|
2005-07-27 18:45:04 +00:00
|
|
|
return 0;
|
|
|
|
case -ENODEV:
|
|
|
|
DBF_EXCEPTION(2, "device gone, retry\n");
|
|
|
|
break;
|
|
|
|
case -EIO:
|
|
|
|
DBF_EXCEPTION(2, "I/O error, retry\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add device into the sorted list, giving it the first
|
|
|
|
* available minor number.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
tape_assign_minor(struct tape_device *device)
|
|
|
|
{
|
|
|
|
struct tape_device *tmp;
|
|
|
|
int minor;
|
|
|
|
|
|
|
|
minor = 0;
|
|
|
|
write_lock(&tape_device_lock);
|
|
|
|
list_for_each_entry(tmp, &tape_device_list, node) {
|
|
|
|
if (minor < tmp->first_minor)
|
|
|
|
break;
|
|
|
|
minor += TAPE_MINORS_PER_DEV;
|
|
|
|
}
|
|
|
|
if (minor >= 256) {
|
|
|
|
write_unlock(&tape_device_lock);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
device->first_minor = minor;
|
|
|
|
list_add_tail(&device->node, &tmp->node);
|
|
|
|
write_unlock(&tape_device_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* remove device from the list */
|
|
|
|
static void
|
|
|
|
tape_remove_minor(struct tape_device *device)
|
|
|
|
{
|
|
|
|
write_lock(&tape_device_lock);
|
|
|
|
list_del_init(&device->node);
|
|
|
|
device->first_minor = -1;
|
|
|
|
write_unlock(&tape_device_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a device online.
|
|
|
|
*
|
|
|
|
* This function is called by the common I/O layer to move a device from the
|
|
|
|
* detected but offline into the online state.
|
|
|
|
* If we return an error (RC < 0) the device remains in the offline state. This
|
|
|
|
* can happen if the device is assigned somewhere else, for example.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_generic_online(struct tape_device *device,
|
|
|
|
struct tape_discipline *discipline)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
|
|
|
|
|
|
|
|
if (device->tape_state != TS_INIT) {
|
|
|
|
DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:26 +00:00
|
|
|
init_timer(&device->lb_timeout);
|
|
|
|
device->lb_timeout.function = tape_long_busy_timeout;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Let the discipline have a go at the device. */
|
|
|
|
device->discipline = discipline;
|
|
|
|
if (!try_module_get(discipline->owner)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = discipline->setup_device(device);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
rc = tape_assign_minor(device);
|
|
|
|
if (rc)
|
|
|
|
goto out_discipline;
|
|
|
|
|
|
|
|
rc = tapechar_setup_device(device);
|
|
|
|
if (rc)
|
|
|
|
goto out_minor;
|
|
|
|
rc = tapeblock_setup_device(device);
|
|
|
|
if (rc)
|
|
|
|
goto out_char;
|
|
|
|
|
|
|
|
tape_state_set(device, TS_UNUSED);
|
|
|
|
|
|
|
|
DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_char:
|
|
|
|
tapechar_cleanup_device(device);
|
2009-09-11 08:28:55 +00:00
|
|
|
out_minor:
|
|
|
|
tape_remove_minor(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
out_discipline:
|
|
|
|
device->discipline->cleanup_device(device);
|
|
|
|
device->discipline = NULL;
|
|
|
|
out:
|
|
|
|
module_put(discipline->owner);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static void
|
2005-04-16 22:20:36 +00:00
|
|
|
tape_cleanup_device(struct tape_device *device)
|
|
|
|
{
|
|
|
|
tapeblock_cleanup_device(device);
|
|
|
|
tapechar_cleanup_device(device);
|
|
|
|
device->discipline->cleanup_device(device);
|
|
|
|
module_put(device->discipline->owner);
|
|
|
|
tape_remove_minor(device);
|
|
|
|
tape_med_state_set(device, MS_UNKNOWN);
|
|
|
|
}
|
|
|
|
|
2009-06-16 08:30:39 +00:00
|
|
|
/*
|
|
|
|
* Suspend device.
|
|
|
|
*
|
|
|
|
* Called by the common I/O layer if the drive should be suspended on user
|
|
|
|
* request. We refuse to suspend if the device is loaded or in use for the
|
|
|
|
* following reason:
|
|
|
|
* While the Linux guest is suspended, it might be logged off which causes
|
|
|
|
* devices to be detached. Tape devices are automatically rewound and unloaded
|
|
|
|
* during DETACH processing (unless the tape device was attached with the
|
|
|
|
* NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
|
|
|
|
* resume the original state of the tape device, since we would need to
|
|
|
|
* manually re-load the cartridge which was active at suspend time.
|
|
|
|
*/
|
|
|
|
int tape_generic_pm_suspend(struct ccw_device *cdev)
|
|
|
|
{
|
|
|
|
struct tape_device *device;
|
|
|
|
|
2009-06-22 10:08:19 +00:00
|
|
|
device = dev_get_drvdata(&cdev->dev);
|
2009-06-16 08:30:39 +00:00
|
|
|
if (!device) {
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
|
|
|
|
device->cdev_id, device);
|
|
|
|
|
|
|
|
if (device->medium_state != MS_UNLOADED) {
|
|
|
|
pr_err("A cartridge is loaded in tape device %s, "
|
|
|
|
"refusing to suspend\n", dev_name(&cdev->dev));
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
switch (device->tape_state) {
|
|
|
|
case TS_INIT:
|
|
|
|
case TS_NOT_OPER:
|
|
|
|
case TS_UNUSED:
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Tape device %s is busy, refusing to "
|
|
|
|
"suspend\n", dev_name(&cdev->dev));
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Set device offline.
|
|
|
|
*
|
|
|
|
* Called by the common I/O layer if the drive should set offline on user
|
|
|
|
* request. We may prevent this by returning an error.
|
|
|
|
* Manual offline is only allowed while the drive is not in use.
|
|
|
|
*/
|
|
|
|
int
|
2009-04-23 11:58:09 +00:00
|
|
|
tape_generic_offline(struct ccw_device *cdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-04-23 11:58:09 +00:00
|
|
|
struct tape_device *device;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
device = dev_get_drvdata(&cdev->dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!device) {
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
|
|
|
|
device->cdev_id, device);
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
switch (device->tape_state) {
|
|
|
|
case TS_INIT:
|
|
|
|
case TS_NOT_OPER:
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
break;
|
|
|
|
case TS_UNUSED:
|
|
|
|
tape_state_set(device, TS_INIT);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
tape_cleanup_device(device);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DBF_EVENT(3, "(%08x): Set offline failed "
|
|
|
|
"- drive in use.\n",
|
|
|
|
device->cdev_id);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate memory for a new device structure.
|
|
|
|
*/
|
|
|
|
static struct tape_device *
|
|
|
|
tape_alloc_device(void)
|
|
|
|
{
|
|
|
|
struct tape_device *device;
|
|
|
|
|
2006-03-24 11:15:31 +00:00
|
|
|
device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device == NULL) {
|
|
|
|
DBF_EXCEPTION(2, "ti:no mem\n");
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2006-03-24 11:15:31 +00:00
|
|
|
device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device->modeset_byte == NULL) {
|
|
|
|
DBF_EXCEPTION(2, "ti:no mem\n");
|
|
|
|
kfree(device);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2009-12-07 11:52:04 +00:00
|
|
|
mutex_init(&device->mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
INIT_LIST_HEAD(&device->req_queue);
|
|
|
|
INIT_LIST_HEAD(&device->node);
|
|
|
|
init_waitqueue_head(&device->state_change_wq);
|
2008-05-30 08:03:33 +00:00
|
|
|
init_waitqueue_head(&device->wait_queue);
|
2005-04-16 22:20:36 +00:00
|
|
|
device->tape_state = TS_INIT;
|
|
|
|
device->medium_state = MS_UNKNOWN;
|
|
|
|
*device->modeset_byte = 0;
|
|
|
|
device->first_minor = -1;
|
|
|
|
atomic_set(&device->ref_count, 1);
|
2006-12-08 14:53:57 +00:00
|
|
|
INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return device;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a reference to an existing device structure. This will automatically
|
|
|
|
* increment the reference count.
|
|
|
|
*/
|
|
|
|
struct tape_device *
|
2009-12-07 11:52:03 +00:00
|
|
|
tape_get_device(struct tape_device *device)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-12-07 11:52:03 +00:00
|
|
|
int count;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-07 11:52:03 +00:00
|
|
|
count = atomic_inc_return(&device->ref_count);
|
|
|
|
DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
|
2005-04-16 22:20:36 +00:00
|
|
|
return device;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrease the reference counter of a devices structure. If the
|
|
|
|
* reference counter reaches zero free the device structure.
|
|
|
|
* The function returns a NULL pointer to be used by the caller
|
|
|
|
* for clearing reference pointers.
|
|
|
|
*/
|
2009-12-07 11:52:03 +00:00
|
|
|
void
|
2005-04-16 22:20:36 +00:00
|
|
|
tape_put_device(struct tape_device *device)
|
|
|
|
{
|
2009-12-07 11:52:03 +00:00
|
|
|
int count;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-07 11:52:03 +00:00
|
|
|
count = atomic_dec_return(&device->ref_count);
|
|
|
|
DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
|
|
|
|
BUG_ON(count < 0);
|
|
|
|
if (count == 0) {
|
|
|
|
kfree(device->modeset_byte);
|
|
|
|
kfree(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find tape device by a device index.
|
|
|
|
*/
|
|
|
|
struct tape_device *
|
2009-12-07 11:52:03 +00:00
|
|
|
tape_find_device(int devindex)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct tape_device *device, *tmp;
|
|
|
|
|
|
|
|
device = ERR_PTR(-ENODEV);
|
|
|
|
read_lock(&tape_device_lock);
|
|
|
|
list_for_each_entry(tmp, &tape_device_list, node) {
|
|
|
|
if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
|
2009-12-07 11:52:03 +00:00
|
|
|
device = tape_get_device(tmp);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock(&tape_device_lock);
|
|
|
|
return device;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Driverfs tape probe function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_generic_probe(struct ccw_device *cdev)
|
|
|
|
{
|
|
|
|
struct tape_device *device;
|
2006-07-18 11:46:58 +00:00
|
|
|
int ret;
|
2008-05-15 14:52:37 +00:00
|
|
|
struct ccw_dev_id dev_id;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
device = tape_alloc_device();
|
|
|
|
if (IS_ERR(device))
|
|
|
|
return -ENODEV;
|
2009-12-07 11:51:30 +00:00
|
|
|
ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
|
|
|
|
CCWDEV_DO_MULTIPATH);
|
2006-07-18 11:46:58 +00:00
|
|
|
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
|
|
|
|
if (ret) {
|
|
|
|
tape_put_device(device);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-05-04 19:40:54 +00:00
|
|
|
dev_set_drvdata(&cdev->dev, device);
|
2006-07-18 11:46:58 +00:00
|
|
|
cdev->handler = __tape_do_irq;
|
2005-04-16 22:20:36 +00:00
|
|
|
device->cdev = cdev;
|
2008-05-15 14:52:37 +00:00
|
|
|
ccw_device_get_id(cdev, &dev_id);
|
|
|
|
device->cdev_id = devid_to_int(&dev_id);
|
2006-07-18 11:46:58 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static void
|
2005-04-16 22:20:36 +00:00
|
|
|
__tape_discard_requests(struct tape_device *device)
|
|
|
|
{
|
|
|
|
struct tape_request * request;
|
|
|
|
struct list_head * l, *n;
|
|
|
|
|
|
|
|
list_for_each_safe(l, n, &device->req_queue) {
|
|
|
|
request = list_entry(l, struct tape_request, list);
|
|
|
|
if (request->status == TAPE_REQUEST_IN_IO)
|
|
|
|
request->status = TAPE_REQUEST_DONE;
|
|
|
|
list_del(&request->list);
|
|
|
|
|
|
|
|
/* Decrease ref_count for removed request. */
|
2009-12-07 11:52:03 +00:00
|
|
|
request->device = NULL;
|
|
|
|
tape_put_device(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
request->rc = -EIO;
|
|
|
|
if (request->callback != NULL)
|
|
|
|
request->callback(request, request->callback_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Driverfs tape remove function.
|
|
|
|
*
|
|
|
|
* This function is called whenever the common I/O layer detects the device
|
|
|
|
* gone. This can happen at any time and we cannot refuse.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
tape_generic_remove(struct ccw_device *cdev)
|
|
|
|
{
|
|
|
|
struct tape_device * device;
|
|
|
|
|
2009-05-04 19:40:54 +00:00
|
|
|
device = dev_get_drvdata(&cdev->dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!device) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
switch (device->tape_state) {
|
|
|
|
case TS_INIT:
|
|
|
|
tape_state_set(device, TS_NOT_OPER);
|
|
|
|
case TS_NOT_OPER:
|
|
|
|
/*
|
|
|
|
* Nothing to do.
|
|
|
|
*/
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
break;
|
|
|
|
case TS_UNUSED:
|
|
|
|
/*
|
|
|
|
* Need only to release the device.
|
|
|
|
*/
|
|
|
|
tape_state_set(device, TS_NOT_OPER);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
tape_cleanup_device(device);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* There may be requests on the queue. We will not get
|
|
|
|
* an interrupt for a request that was running. So we
|
|
|
|
* just post them all as I/O errors.
|
|
|
|
*/
|
|
|
|
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
|
|
|
|
device->cdev_id);
|
2009-09-11 08:29:07 +00:00
|
|
|
pr_warning("%s: A tape unit was detached while in "
|
|
|
|
"use\n", dev_name(&device->cdev->dev));
|
2005-04-16 22:20:36 +00:00
|
|
|
tape_state_set(device, TS_NOT_OPER);
|
|
|
|
__tape_discard_requests(device);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
tape_cleanup_device(device);
|
|
|
|
}
|
|
|
|
|
2009-12-07 11:52:03 +00:00
|
|
|
device = dev_get_drvdata(&cdev->dev);
|
|
|
|
if (device) {
|
2005-04-16 22:20:36 +00:00
|
|
|
sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
|
2009-12-07 11:52:03 +00:00
|
|
|
dev_set_drvdata(&cdev->dev, NULL);
|
|
|
|
tape_put_device(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new tape ccw request
|
|
|
|
*/
|
|
|
|
struct tape_request *
|
|
|
|
tape_alloc_request(int cplength, int datasize)
|
|
|
|
{
|
|
|
|
struct tape_request *request;
|
|
|
|
|
2009-03-26 14:24:47 +00:00
|
|
|
BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
|
|
|
|
|
2006-03-24 11:15:31 +00:00
|
|
|
request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (request == NULL) {
|
|
|
|
DBF_EXCEPTION(1, "cqra nomem\n");
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
/* allocate channel program */
|
|
|
|
if (cplength > 0) {
|
2006-03-24 11:15:31 +00:00
|
|
|
request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
|
2005-04-16 22:20:36 +00:00
|
|
|
GFP_ATOMIC | GFP_DMA);
|
|
|
|
if (request->cpaddr == NULL) {
|
|
|
|
DBF_EXCEPTION(1, "cqra nomem\n");
|
|
|
|
kfree(request);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* alloc small kernel buffer */
|
|
|
|
if (datasize > 0) {
|
2006-03-24 11:15:31 +00:00
|
|
|
request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (request->cpdata == NULL) {
|
|
|
|
DBF_EXCEPTION(1, "cqra nomem\n");
|
2005-11-07 09:01:30 +00:00
|
|
|
kfree(request->cpaddr);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(request);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
|
|
|
|
request->cpdata);
|
|
|
|
|
|
|
|
return request;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free tape ccw request
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
tape_free_request (struct tape_request * request)
|
|
|
|
{
|
|
|
|
DBF_LH(6, "Free request %p\n", request);
|
|
|
|
|
2009-12-07 11:52:03 +00:00
|
|
|
if (request->device)
|
|
|
|
tape_put_device(request->device);
|
2005-11-07 09:01:30 +00:00
|
|
|
kfree(request->cpdata);
|
|
|
|
kfree(request->cpaddr);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(request);
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static int
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_start_io(struct tape_device *device, struct tape_request *request)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
#ifdef CONFIG_S390_TAPE_BLOCK
|
|
|
|
if (request->op == TO_BLOCK)
|
|
|
|
device->discipline->check_locate(device, request);
|
|
|
|
#endif
|
|
|
|
rc = ccw_device_start(
|
|
|
|
device->cdev,
|
|
|
|
request->cpaddr,
|
|
|
|
(unsigned long) request,
|
|
|
|
0x00,
|
|
|
|
request->options
|
|
|
|
);
|
|
|
|
if (rc == 0) {
|
|
|
|
request->status = TAPE_REQUEST_IN_IO;
|
|
|
|
} else if (rc == -EBUSY) {
|
|
|
|
/* The common I/O subsystem is currently busy. Retry later. */
|
|
|
|
request->status = TAPE_REQUEST_QUEUED;
|
2006-12-08 14:53:57 +00:00
|
|
|
schedule_delayed_work(&device->tape_dnr, 0);
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = 0;
|
|
|
|
} else {
|
|
|
|
/* Start failed. Remove request and indicate failure. */
|
|
|
|
DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static void
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_start_next_request(struct tape_device *device)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct list_head *l, *n;
|
|
|
|
struct tape_request *request;
|
|
|
|
int rc;
|
|
|
|
|
2005-07-27 18:45:04 +00:00
|
|
|
DBF_LH(6, "__tape_start_next_request(%p)\n", device);
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Try to start each request on request queue until one is
|
|
|
|
* started successful.
|
|
|
|
*/
|
|
|
|
list_for_each_safe(l, n, &device->req_queue) {
|
|
|
|
request = list_entry(l, struct tape_request, list);
|
2005-07-27 18:45:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid race condition if bottom-half was triggered more than
|
|
|
|
* once.
|
|
|
|
*/
|
|
|
|
if (request->status == TAPE_REQUEST_IN_IO)
|
|
|
|
return;
|
2006-03-24 11:15:28 +00:00
|
|
|
/*
|
|
|
|
* Request has already been stopped. We have to wait until
|
|
|
|
* the request is removed from the queue in the interrupt
|
|
|
|
* handling.
|
|
|
|
*/
|
|
|
|
if (request->status == TAPE_REQUEST_DONE)
|
|
|
|
return;
|
2005-07-27 18:45:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We wanted to cancel the request but the common I/O layer
|
|
|
|
* was busy at that time. This can only happen if this
|
|
|
|
* function is called by delayed_next_request.
|
|
|
|
* Otherwise we start the next request on the queue.
|
|
|
|
*/
|
|
|
|
if (request->status == TAPE_REQUEST_CANCEL) {
|
|
|
|
rc = __tape_cancel_io(device, request);
|
|
|
|
} else {
|
|
|
|
rc = __tape_start_io(device, request);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-07-27 18:45:04 +00:00
|
|
|
if (rc == 0)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-27 18:45:04 +00:00
|
|
|
/* Set ending status. */
|
2005-04-16 22:20:36 +00:00
|
|
|
request->rc = rc;
|
|
|
|
request->status = TAPE_REQUEST_DONE;
|
2005-07-27 18:45:04 +00:00
|
|
|
|
|
|
|
/* Remove from request queue. */
|
|
|
|
list_del(&request->list);
|
|
|
|
|
|
|
|
/* Do callback. */
|
|
|
|
if (request->callback != NULL)
|
|
|
|
request->callback(request, request->callback_data);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2006-12-08 14:53:57 +00:00
|
|
|
tape_delayed_next_request(struct work_struct *work)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-12-08 14:53:57 +00:00
|
|
|
struct tape_device *device =
|
|
|
|
container_of(work, struct tape_device, tape_dnr.work);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-27 18:45:04 +00:00
|
|
|
DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
__tape_start_next_request(device);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:26 +00:00
|
|
|
static void tape_long_busy_timeout(unsigned long data)
|
|
|
|
{
|
|
|
|
struct tape_request *request;
|
|
|
|
struct tape_device *device;
|
|
|
|
|
|
|
|
device = (struct tape_device *) data;
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
request = list_entry(device->req_queue.next, struct tape_request, list);
|
2009-03-26 14:24:47 +00:00
|
|
|
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
|
2007-02-05 20:18:26 +00:00
|
|
|
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
|
|
|
|
__tape_start_next_request(device);
|
2009-12-07 11:52:03 +00:00
|
|
|
device->lb_timeout.data = 0UL;
|
|
|
|
tape_put_device(device);
|
2007-02-05 20:18:26 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
}
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static void
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_end_request(
|
|
|
|
struct tape_device * device,
|
|
|
|
struct tape_request * request,
|
|
|
|
int rc)
|
|
|
|
{
|
|
|
|
DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
|
|
|
|
if (request) {
|
|
|
|
request->rc = rc;
|
|
|
|
request->status = TAPE_REQUEST_DONE;
|
|
|
|
|
|
|
|
/* Remove from request queue. */
|
|
|
|
list_del(&request->list);
|
|
|
|
|
|
|
|
/* Do callback. */
|
|
|
|
if (request->callback != NULL)
|
|
|
|
request->callback(request, request->callback_data);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Start next request. */
|
|
|
|
if (!list_empty(&device->req_queue))
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_start_next_request(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write sense data to dbf
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
|
|
|
|
struct irb *irb)
|
|
|
|
{
|
|
|
|
unsigned int *sptr;
|
|
|
|
const char* op;
|
|
|
|
|
|
|
|
if (request != NULL)
|
|
|
|
op = tape_op_verbose[request->op];
|
|
|
|
else
|
|
|
|
op = "---";
|
|
|
|
DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
|
2008-07-14 07:58:50 +00:00
|
|
|
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
|
2005-04-16 22:20:36 +00:00
|
|
|
DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
|
|
|
|
sptr = (unsigned int *) irb->ecw;
|
|
|
|
DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
|
|
|
|
DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
|
|
|
|
DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
|
|
|
|
DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I/O helper function. Adds the request to the request queue
|
|
|
|
* and starts it if the tape is idle. Has to be called with
|
|
|
|
* the device lock held.
|
|
|
|
*/
|
2007-02-05 20:18:53 +00:00
|
|
|
static int
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_start_request(struct tape_device *device, struct tape_request *request)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
switch (request->op) {
|
|
|
|
case TO_MSEN:
|
|
|
|
case TO_ASSIGN:
|
|
|
|
case TO_UNASSIGN:
|
|
|
|
case TO_READ_ATTMSG:
|
2007-05-04 16:47:53 +00:00
|
|
|
case TO_RDC:
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device->tape_state == TS_INIT)
|
|
|
|
break;
|
|
|
|
if (device->tape_state == TS_UNUSED)
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (device->tape_state == TS_BLKUSE)
|
|
|
|
break;
|
|
|
|
if (device->tape_state != TS_IN_USE)
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increase use count of device for the added request. */
|
2009-12-07 11:52:03 +00:00
|
|
|
request->device = tape_get_device(device);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (list_empty(&device->req_queue)) {
|
|
|
|
/* No other requests are on the queue. Start this one. */
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = __tape_start_io(device, request);
|
|
|
|
if (rc)
|
2005-04-16 22:20:36 +00:00
|
|
|
return rc;
|
2005-07-27 18:45:04 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
DBF_LH(5, "Request %p added for execution.\n", request);
|
|
|
|
list_add(&request->list, &device->req_queue);
|
|
|
|
} else {
|
|
|
|
DBF_LH(5, "Request %p add to queue.\n", request);
|
|
|
|
request->status = TAPE_REQUEST_QUEUED;
|
2005-07-27 18:45:04 +00:00
|
|
|
list_add_tail(&request->list, &device->req_queue);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the request to the request queue, try to start it if the
|
|
|
|
* tape is idle. Return without waiting for end of i/o.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_do_io_async(struct tape_device *device, struct tape_request *request)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
/* Add request to request queue and try to start it. */
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = __tape_start_request(device, request);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tape_do_io/__tape_wake_up
|
|
|
|
* Add the request to the request queue, try to start it if the
|
|
|
|
* tape is idle and wait uninterruptible for its completion.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__tape_wake_up(struct tape_request *request, void *data)
|
|
|
|
{
|
|
|
|
request->callback = NULL;
|
|
|
|
wake_up((wait_queue_head_t *) data);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
tape_do_io(struct tape_device *device, struct tape_request *request)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
/* Setup callback */
|
|
|
|
request->callback = __tape_wake_up;
|
2008-05-30 08:03:33 +00:00
|
|
|
request->callback_data = &device->wait_queue;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Add request to request queue and try to start it. */
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = __tape_start_request(device, request);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* Request added to the queue. Wait for its completion. */
|
2008-05-30 08:03:33 +00:00
|
|
|
wait_event(device->wait_queue, (request->callback == NULL));
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Get rc from request */
|
|
|
|
return request->rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tape_do_io_interruptible/__tape_wake_up_interruptible
|
|
|
|
* Add the request to the request queue, try to start it if the
|
|
|
|
* tape is idle and wait uninterruptible for its completion.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__tape_wake_up_interruptible(struct tape_request *request, void *data)
|
|
|
|
{
|
|
|
|
request->callback = NULL;
|
|
|
|
wake_up_interruptible((wait_queue_head_t *) data);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
tape_do_io_interruptible(struct tape_device *device,
|
|
|
|
struct tape_request *request)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
/* Setup callback */
|
|
|
|
request->callback = __tape_wake_up_interruptible;
|
2008-05-30 08:03:33 +00:00
|
|
|
request->callback_data = &device->wait_queue;
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = __tape_start_request(device, request);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* Request added to the queue. Wait for its completion. */
|
2008-05-30 08:03:33 +00:00
|
|
|
rc = wait_event_interruptible(device->wait_queue,
|
|
|
|
(request->callback == NULL));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (rc != -ERESTARTSYS)
|
|
|
|
/* Request finished normally. */
|
|
|
|
return request->rc;
|
2005-07-27 18:45:04 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Interrupted by a signal. We have to stop the current request. */
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
2005-07-27 18:45:04 +00:00
|
|
|
rc = __tape_cancel_io(device, request);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (rc == 0) {
|
2005-07-27 18:45:04 +00:00
|
|
|
/* Wait for the interrupt that acknowledges the halt. */
|
|
|
|
do {
|
|
|
|
rc = wait_event_interruptible(
|
2008-05-30 08:03:33 +00:00
|
|
|
device->wait_queue,
|
2005-07-27 18:45:04 +00:00
|
|
|
(request->callback == NULL)
|
|
|
|
);
|
2006-03-24 11:15:27 +00:00
|
|
|
} while (rc == -ERESTARTSYS);
|
2005-07-27 18:45:04 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
|
|
|
|
rc = -ERESTARTSYS;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2006-03-24 11:15:28 +00:00
|
|
|
/*
|
|
|
|
* Stop running ccw.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_cancel_io(struct tape_device *device, struct tape_request *request)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
rc = __tape_cancel_io(device, request);
|
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Tape interrupt routine, called from the ccw_device layer
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
|
|
|
|
{
|
|
|
|
struct tape_device *device;
|
|
|
|
struct tape_request *request;
|
|
|
|
int rc;
|
|
|
|
|
2011-01-05 11:47:33 +00:00
|
|
|
kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
|
2009-05-04 19:40:54 +00:00
|
|
|
device = dev_get_drvdata(&cdev->dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
request = (struct tape_request *) intparm;
|
|
|
|
|
|
|
|
DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
|
|
|
|
|
|
|
|
/* On special conditions irb is an error pointer */
|
|
|
|
if (IS_ERR(irb)) {
|
2005-07-27 18:45:04 +00:00
|
|
|
/* FIXME: What to do with the request? */
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (PTR_ERR(irb)) {
|
|
|
|
case -ETIMEDOUT:
|
2010-10-29 14:50:44 +00:00
|
|
|
DBF_LH(1, "(%08x): Request timed out\n",
|
|
|
|
device->cdev_id);
|
2005-04-16 22:20:36 +00:00
|
|
|
case -EIO:
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_end_request(device, request, -EIO);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
2010-10-29 14:50:44 +00:00
|
|
|
DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
|
|
|
|
device->cdev_id, PTR_ERR(irb));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-07-27 18:45:04 +00:00
|
|
|
/*
|
|
|
|
* If the condition code is not zero and the start function bit is
|
|
|
|
* still set, this is an deferred error and the last start I/O did
|
2006-03-24 11:15:26 +00:00
|
|
|
* not succeed. At this point the condition that caused the deferred
|
|
|
|
* error might still apply. So we just schedule the request to be
|
|
|
|
* started later.
|
2005-07-27 18:45:04 +00:00
|
|
|
*/
|
2008-07-14 07:58:50 +00:00
|
|
|
if (irb->scsw.cmd.cc != 0 &&
|
|
|
|
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
|
2006-03-24 11:15:28 +00:00
|
|
|
(request->status == TAPE_REQUEST_IN_IO)) {
|
|
|
|
DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
|
2008-07-14 07:58:50 +00:00
|
|
|
device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
|
2006-03-24 11:15:26 +00:00
|
|
|
request->status = TAPE_REQUEST_QUEUED;
|
2006-03-24 11:15:28 +00:00
|
|
|
schedule_delayed_work(&device->tape_dnr, HZ);
|
2005-07-27 18:45:04 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* May be an unsolicited irq */
|
|
|
|
if(request != NULL)
|
2008-07-14 07:58:50 +00:00
|
|
|
request->rescnt = irb->scsw.cmd.count;
|
|
|
|
else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
|
2007-02-05 20:18:26 +00:00
|
|
|
!list_empty(&device->req_queue)) {
|
|
|
|
/* Not Ready to Ready after long busy ? */
|
|
|
|
struct tape_request *req;
|
|
|
|
req = list_entry(device->req_queue.next,
|
|
|
|
struct tape_request, list);
|
|
|
|
if (req->status == TAPE_REQUEST_LONG_BUSY) {
|
|
|
|
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
|
|
|
|
if (del_timer(&device->lb_timeout)) {
|
2009-12-07 11:52:03 +00:00
|
|
|
device->lb_timeout.data = 0UL;
|
|
|
|
tape_put_device(device);
|
2007-02-05 20:18:26 +00:00
|
|
|
__tape_start_next_request(device);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2008-07-14 07:58:50 +00:00
|
|
|
if (irb->scsw.cmd.dstat != 0x0c) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Set the 'ONLINE' flag depending on sense byte 1 */
|
|
|
|
if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
|
|
|
|
device->tape_generic_status |= GMT_ONLINE(~0);
|
|
|
|
else
|
|
|
|
device->tape_generic_status &= ~GMT_ONLINE(~0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Any request that does not come back with channel end
|
|
|
|
* and device end is unusual. Log the sense data.
|
|
|
|
*/
|
|
|
|
DBF_EVENT(3,"-- Tape Interrupthandler --\n");
|
|
|
|
tape_dump_sense_dbf(device, request, irb);
|
|
|
|
} else {
|
|
|
|
/* Upon normal completion the device _is_ online */
|
|
|
|
device->tape_generic_status |= GMT_ONLINE(~0);
|
|
|
|
}
|
|
|
|
if (device->tape_state == TS_NOT_OPER) {
|
|
|
|
DBF_EVENT(6, "tape:device is not operational\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request that were canceled still come back with an interrupt.
|
|
|
|
* To detect these request the state will be set to TAPE_REQUEST_DONE.
|
|
|
|
*/
|
|
|
|
if(request != NULL && request->status == TAPE_REQUEST_DONE) {
|
2005-07-27 18:45:04 +00:00
|
|
|
__tape_end_request(device, request, -EIO);
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = device->discipline->irq(device, request, irb);
|
|
|
|
/*
|
|
|
|
* rc < 0 : request finished unsuccessfully.
|
|
|
|
* rc == TAPE_IO_SUCCESS: request finished successfully.
|
|
|
|
* rc == TAPE_IO_PENDING: request is still running. Ignore rc.
|
|
|
|
* rc == TAPE_IO_RETRY: request finished but needs another go.
|
|
|
|
* rc == TAPE_IO_STOP: request needs to get terminated.
|
|
|
|
*/
|
|
|
|
switch (rc) {
|
2005-07-27 18:45:04 +00:00
|
|
|
case TAPE_IO_SUCCESS:
|
|
|
|
/* Upon normal completion the device _is_ online */
|
|
|
|
device->tape_generic_status |= GMT_ONLINE(~0);
|
|
|
|
__tape_end_request(device, request, rc);
|
|
|
|
break;
|
|
|
|
case TAPE_IO_PENDING:
|
|
|
|
break;
|
2007-02-05 20:18:26 +00:00
|
|
|
case TAPE_IO_LONG_BUSY:
|
|
|
|
device->lb_timeout.data =
|
2009-12-07 11:52:03 +00:00
|
|
|
(unsigned long) tape_get_device(device);
|
2007-02-05 20:18:26 +00:00
|
|
|
device->lb_timeout.expires = jiffies +
|
|
|
|
LONG_BUSY_TIMEOUT * HZ;
|
|
|
|
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
|
|
|
|
add_timer(&device->lb_timeout);
|
|
|
|
request->status = TAPE_REQUEST_LONG_BUSY;
|
|
|
|
break;
|
2005-07-27 18:45:04 +00:00
|
|
|
case TAPE_IO_RETRY:
|
|
|
|
rc = __tape_start_io(device, request);
|
|
|
|
if (rc)
|
|
|
|
__tape_end_request(device, request, rc);
|
|
|
|
break;
|
|
|
|
case TAPE_IO_STOP:
|
|
|
|
rc = __tape_cancel_io(device, request);
|
|
|
|
if (rc)
|
|
|
|
__tape_end_request(device, request, rc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (rc > 0) {
|
|
|
|
DBF_EVENT(6, "xunknownrc\n");
|
|
|
|
__tape_end_request(device, request, -EIO);
|
|
|
|
} else {
|
|
|
|
__tape_end_request(device, request, rc);
|
|
|
|
}
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tape device open function used by tape_char & tape_block frontends.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_open(struct tape_device *device)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2008-10-28 10:10:19 +00:00
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device->tape_state == TS_NOT_OPER) {
|
|
|
|
DBF_EVENT(6, "TAPE:nodev\n");
|
|
|
|
rc = -ENODEV;
|
|
|
|
} else if (device->tape_state == TS_IN_USE) {
|
|
|
|
DBF_EVENT(6, "TAPE:dbusy\n");
|
|
|
|
rc = -EBUSY;
|
|
|
|
} else if (device->tape_state == TS_BLKUSE) {
|
|
|
|
DBF_EVENT(6, "TAPE:dbusy\n");
|
|
|
|
rc = -EBUSY;
|
|
|
|
} else if (device->discipline != NULL &&
|
|
|
|
!try_module_get(device->discipline->owner)) {
|
|
|
|
DBF_EVENT(6, "TAPE:nodisc\n");
|
|
|
|
rc = -ENODEV;
|
|
|
|
} else {
|
|
|
|
tape_state_set(device, TS_IN_USE);
|
|
|
|
rc = 0;
|
|
|
|
}
|
2008-10-28 10:10:19 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tape device release function used by tape_char & tape_block frontends.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_release(struct tape_device *device)
|
|
|
|
{
|
2008-10-28 10:10:19 +00:00
|
|
|
spin_lock_irq(get_ccwdev_lock(device->cdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (device->tape_state == TS_IN_USE)
|
|
|
|
tape_state_set(device, TS_UNUSED);
|
|
|
|
module_put(device->discipline->owner);
|
2008-10-28 10:10:19 +00:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Execute a magnetic tape command a number of times.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
tape_mtop(struct tape_device *device, int mt_op, int mt_count)
|
|
|
|
{
|
|
|
|
tape_mtop_fn fn;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
DBF_EVENT(6, "TAPE:mtio\n");
|
|
|
|
DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
|
|
|
|
DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
|
|
|
|
|
|
|
|
if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
|
|
|
|
return -EINVAL;
|
|
|
|
fn = device->discipline->mtop_array[mt_op];
|
|
|
|
if (fn == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* We assume that the backends can handle count up to 500. */
|
|
|
|
if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
|
|
|
|
mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
|
|
|
|
rc = 0;
|
|
|
|
for (; mt_count > 500; mt_count -= 500)
|
|
|
|
if ((rc = fn(device, 500)) != 0)
|
|
|
|
break;
|
|
|
|
if (rc == 0)
|
|
|
|
rc = fn(device, mt_count);
|
|
|
|
} else
|
|
|
|
rc = fn(device, mt_count);
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tape init function.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
tape_init (void)
|
|
|
|
{
|
2005-06-25 21:55:33 +00:00
|
|
|
TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
|
2005-04-16 22:20:36 +00:00
|
|
|
debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
|
|
|
|
#ifdef DBF_LIKE_HELL
|
|
|
|
debug_set_level(TAPE_DBF_AREA, 6);
|
|
|
|
#endif
|
2006-02-01 11:06:31 +00:00
|
|
|
DBF_EVENT(3, "tape init\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
tape_proc_init();
|
|
|
|
tapechar_init ();
|
|
|
|
tapeblock_init ();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tape exit function.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
tape_exit(void)
|
|
|
|
{
|
|
|
|
DBF_EVENT(6, "tape exit\n");
|
|
|
|
|
|
|
|
/* Get rid of the frontends */
|
|
|
|
tapechar_exit();
|
|
|
|
tapeblock_exit();
|
|
|
|
tape_proc_cleanup();
|
|
|
|
debug_unregister (TAPE_DBF_AREA);
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
|
|
|
|
"Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
|
2006-02-01 11:06:31 +00:00
|
|
|
MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
module_init(tape_init);
|
|
|
|
module_exit(tape_exit);
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(tape_generic_remove);
|
|
|
|
EXPORT_SYMBOL(tape_generic_probe);
|
|
|
|
EXPORT_SYMBOL(tape_generic_online);
|
|
|
|
EXPORT_SYMBOL(tape_generic_offline);
|
2009-06-16 08:30:39 +00:00
|
|
|
EXPORT_SYMBOL(tape_generic_pm_suspend);
|
2005-04-16 22:20:36 +00:00
|
|
|
EXPORT_SYMBOL(tape_put_device);
|
2009-12-07 11:52:03 +00:00
|
|
|
EXPORT_SYMBOL(tape_get_device);
|
2005-04-16 22:20:36 +00:00
|
|
|
EXPORT_SYMBOL(tape_state_verbose);
|
|
|
|
EXPORT_SYMBOL(tape_op_verbose);
|
|
|
|
EXPORT_SYMBOL(tape_state_set);
|
|
|
|
EXPORT_SYMBOL(tape_med_state_set);
|
|
|
|
EXPORT_SYMBOL(tape_alloc_request);
|
|
|
|
EXPORT_SYMBOL(tape_free_request);
|
|
|
|
EXPORT_SYMBOL(tape_dump_sense_dbf);
|
|
|
|
EXPORT_SYMBOL(tape_do_io);
|
|
|
|
EXPORT_SYMBOL(tape_do_io_async);
|
|
|
|
EXPORT_SYMBOL(tape_do_io_interruptible);
|
2006-03-24 11:15:28 +00:00
|
|
|
EXPORT_SYMBOL(tape_cancel_io);
|
2005-04-16 22:20:36 +00:00
|
|
|
EXPORT_SYMBOL(tape_mtop);
|