mirror of
https://github.com/torvalds/linux.git
synced 2024-12-23 03:11:46 +00:00
staging: unisys: remove BOOL,TRUE,FALSE definitions
These shouldn't be defined in the code and can be replaced with the standard bool, true, and false usage that the kernel uses. Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Benjamin Romer <benjamin.romer@unisys.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
434cbf28b5
commit
779d0752bc
@ -31,8 +31,8 @@ struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
|
||||
void *workfuncarg,
|
||||
const char *devnam);
|
||||
void visor_periodic_work_destroy(struct periodic_work *pw);
|
||||
BOOL visor_periodic_work_nextperiod(struct periodic_work *pw);
|
||||
BOOL visor_periodic_work_start(struct periodic_work *pw);
|
||||
BOOL visor_periodic_work_stop(struct periodic_work *pw);
|
||||
bool visor_periodic_work_nextperiod(struct periodic_work *pw);
|
||||
bool visor_periodic_work_start(struct periodic_work *pw);
|
||||
bool visor_periodic_work_stop(struct periodic_work *pw);
|
||||
|
||||
#endif
|
||||
|
@ -52,11 +52,6 @@
|
||||
#include <linux/mm.h>
|
||||
|
||||
/* #define DEBUG */
|
||||
#ifndef BOOL
|
||||
#define BOOL int
|
||||
#endif
|
||||
#define FALSE 0
|
||||
#define TRUE 1
|
||||
#if !defined SUCCESS
|
||||
#define SUCCESS 0
|
||||
#endif
|
||||
|
@ -182,9 +182,9 @@ int visorchannel_write(struct visorchannel *channel, ulong offset,
|
||||
void *local, ulong nbytes);
|
||||
int visorchannel_clear(struct visorchannel *channel, ulong offset,
|
||||
u8 ch, ulong nbytes);
|
||||
BOOL visorchannel_signalremove(struct visorchannel *channel, u32 queue,
|
||||
bool visorchannel_signalremove(struct visorchannel *channel, u32 queue,
|
||||
void *msg);
|
||||
BOOL visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
|
||||
bool visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
|
||||
void *msg);
|
||||
int visorchannel_signalqueue_slots_avail(struct visorchannel *channel,
|
||||
u32 queue);
|
||||
|
@ -1076,7 +1076,7 @@ visordriver_probe_device(struct device *xdev)
|
||||
drv = to_visor_driver(xdev->driver);
|
||||
dev = to_visor_device(xdev);
|
||||
down(&dev->visordriver_callback_lock);
|
||||
dev->being_removed = FALSE;
|
||||
dev->being_removed = false;
|
||||
/*
|
||||
* ensure that the dev->being_removed flag is cleared before
|
||||
* we start the probe
|
||||
@ -1106,7 +1106,7 @@ away:
|
||||
* initialized.
|
||||
*/
|
||||
if (!dev->responded_to_device_create) {
|
||||
dev->responded_to_device_create = TRUE;
|
||||
dev->responded_to_device_create = true;
|
||||
if (chipset_responders.device_create)
|
||||
(*chipset_responders.device_create)(dev->chipset_bus_no,
|
||||
dev->chipset_dev_no,
|
||||
@ -1129,7 +1129,7 @@ visordriver_remove_device(struct device *xdev)
|
||||
dev = to_visor_device(xdev);
|
||||
drv = to_visor_driver(xdev->driver);
|
||||
down(&dev->visordriver_callback_lock);
|
||||
dev->being_removed = TRUE;
|
||||
dev->being_removed = true;
|
||||
/*
|
||||
* ensure that the dev->being_removed flag is set before we start the
|
||||
* actual removal
|
||||
@ -1303,7 +1303,7 @@ create_visor_device(struct visorbus_devdata *devdata,
|
||||
int rc = -1;
|
||||
struct visorchannel *visorchannel = NULL;
|
||||
struct visor_device *dev = NULL;
|
||||
bool gotten = FALSE, registered1 = FALSE, registered2 = FALSE;
|
||||
bool gotten = false, registered1 = false, registered2 = false;
|
||||
|
||||
POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
|
||||
POSTCODE_SEVERITY_INFO);
|
||||
@ -1337,7 +1337,7 @@ create_visor_device(struct visorbus_devdata *devdata,
|
||||
dev->device.release = visorbus_release_device;
|
||||
/* keep a reference just for us (now 2) */
|
||||
get_device(&dev->device);
|
||||
gotten = TRUE;
|
||||
gotten = true;
|
||||
dev->periodic_work =
|
||||
visor_periodic_work_create(POLLJIFFIES_NORMALCHANNEL,
|
||||
periodic_dev_workqueue,
|
||||
@ -1387,7 +1387,7 @@ create_visor_device(struct visorbus_devdata *devdata,
|
||||
goto away;
|
||||
}
|
||||
|
||||
registered1 = TRUE;
|
||||
registered1 = true;
|
||||
|
||||
rc = register_devmajorminor_attributes(dev);
|
||||
if (rc < 0) {
|
||||
@ -1396,7 +1396,7 @@ create_visor_device(struct visorbus_devdata *devdata,
|
||||
goto away;
|
||||
}
|
||||
|
||||
registered2 = TRUE;
|
||||
registered2 = true;
|
||||
rc = 0;
|
||||
|
||||
away:
|
||||
@ -1687,7 +1687,7 @@ create_bus_instance(int id)
|
||||
if (get_vbus_header_info(devdata->chan,
|
||||
&devdata->
|
||||
vbus_hdr_info) >= 0) {
|
||||
devdata->vbus_valid = TRUE;
|
||||
devdata->vbus_valid = true;
|
||||
write_vbus_chp_info(devdata->chan,
|
||||
&devdata->
|
||||
vbus_hdr_info,
|
||||
@ -1775,7 +1775,7 @@ remove_all_visor_devices(void)
|
||||
}
|
||||
}
|
||||
|
||||
static bool entered_testing_mode = FALSE;
|
||||
static bool entered_testing_mode;
|
||||
static struct visorchipset_channel_info test_channel_infos[MAXDEVICETEST];
|
||||
static unsigned long test_bus_nos[MAXDEVICETEST];
|
||||
static unsigned long test_dev_nos[MAXDEVICETEST];
|
||||
@ -1909,7 +1909,7 @@ pause_state_change_complete(struct visor_device *dev, int status)
|
||||
if (!dev->pausing)
|
||||
return;
|
||||
|
||||
dev->pausing = FALSE;
|
||||
dev->pausing = false;
|
||||
if (!chipset_responders.device_pause) /* this can never happen! */
|
||||
return;
|
||||
|
||||
@ -1930,7 +1930,7 @@ resume_state_change_complete(struct visor_device *dev, int status)
|
||||
if (!dev->resuming)
|
||||
return;
|
||||
|
||||
dev->resuming = FALSE;
|
||||
dev->resuming = false;
|
||||
if (!chipset_responders.device_resume) /* this can never happen! */
|
||||
return;
|
||||
|
||||
@ -1986,7 +1986,7 @@ initiate_chipset_device_pause_resume(u32 bus_no, u32 dev_no, bool is_pause)
|
||||
if (!drv->pause)
|
||||
goto away;
|
||||
|
||||
dev->pausing = TRUE;
|
||||
dev->pausing = true;
|
||||
x = drv->pause(dev, pause_state_change_complete);
|
||||
} else {
|
||||
/* This should be done at BUS resume time, but an
|
||||
@ -1998,14 +1998,14 @@ initiate_chipset_device_pause_resume(u32 bus_no, u32 dev_no, bool is_pause)
|
||||
if (!drv->resume)
|
||||
goto away;
|
||||
|
||||
dev->resuming = TRUE;
|
||||
dev->resuming = true;
|
||||
x = drv->resume(dev, resume_state_change_complete);
|
||||
}
|
||||
if (x < 0) {
|
||||
if (is_pause)
|
||||
dev->pausing = FALSE;
|
||||
dev->pausing = false;
|
||||
else
|
||||
dev->resuming = FALSE;
|
||||
dev->resuming = false;
|
||||
goto away;
|
||||
}
|
||||
rc = 0;
|
||||
@ -2019,13 +2019,13 @@ away:
|
||||
static void
|
||||
chipset_device_pause(u32 bus_no, u32 dev_no)
|
||||
{
|
||||
initiate_chipset_device_pause_resume(bus_no, dev_no, TRUE);
|
||||
initiate_chipset_device_pause_resume(bus_no, dev_no, true);
|
||||
}
|
||||
|
||||
static void
|
||||
chipset_device_resume(u32 bus_no, u32 dev_no)
|
||||
{
|
||||
initiate_chipset_device_pause_resume(bus_no, dev_no, FALSE);
|
||||
initiate_chipset_device_pause_resume(bus_no, dev_no, false);
|
||||
}
|
||||
|
||||
struct channel_size_info {
|
||||
|
@ -33,7 +33,7 @@ struct visorchannel {
|
||||
struct channel_header chan_hdr;
|
||||
uuid_le guid;
|
||||
ulong size;
|
||||
BOOL needs_lock; /* channel creator knows if more than one
|
||||
bool needs_lock; /* channel creator knows if more than one
|
||||
* thread will be inserting or removing */
|
||||
spinlock_t insert_lock; /* protect head writes in chan_hdr */
|
||||
spinlock_t remove_lock; /* protect tail writes in chan_hdr */
|
||||
@ -51,7 +51,7 @@ struct visorchannel {
|
||||
*/
|
||||
static struct visorchannel *
|
||||
visorchannel_create_guts(HOSTADDRESS physaddr, ulong channel_bytes,
|
||||
ulong off, uuid_le guid, BOOL needs_lock)
|
||||
ulong off, uuid_le guid, bool needs_lock)
|
||||
{
|
||||
struct visorchannel *channel;
|
||||
int err;
|
||||
@ -115,7 +115,7 @@ struct visorchannel *
|
||||
visorchannel_create(HOSTADDRESS physaddr, ulong channel_bytes, uuid_le guid)
|
||||
{
|
||||
return visorchannel_create_guts(physaddr, channel_bytes, 0, guid,
|
||||
FALSE);
|
||||
false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visorchannel_create);
|
||||
|
||||
@ -124,7 +124,7 @@ visorchannel_create_with_lock(HOSTADDRESS physaddr, ulong channel_bytes,
|
||||
uuid_le guid)
|
||||
{
|
||||
return visorchannel_create_guts(physaddr, channel_bytes, 0, guid,
|
||||
TRUE);
|
||||
true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visorchannel_create_with_lock);
|
||||
|
||||
@ -292,26 +292,26 @@ EXPORT_SYMBOL_GPL(visorchannel_get_header);
|
||||
&((sig_hdr)->FIELD), \
|
||||
sizeof((sig_hdr)->FIELD)) >= 0)
|
||||
|
||||
static BOOL
|
||||
static bool
|
||||
sig_read_header(struct visorchannel *channel, u32 queue,
|
||||
struct signal_queue_header *sig_hdr)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header))
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
/* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */
|
||||
err = visorchannel_read(channel,
|
||||
SIG_QUEUE_OFFSET(&channel->chan_hdr, queue),
|
||||
sig_hdr, sizeof(struct signal_queue_header));
|
||||
if (err)
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline BOOL
|
||||
static inline bool
|
||||
sig_read_data(struct visorchannel *channel, u32 queue,
|
||||
struct signal_queue_header *sig_hdr, u32 slot, void *data)
|
||||
{
|
||||
@ -322,12 +322,12 @@ sig_read_data(struct visorchannel *channel, u32 queue,
|
||||
err = visorchannel_read(channel, signal_data_offset,
|
||||
data, sig_hdr->signal_size);
|
||||
if (err)
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline BOOL
|
||||
static inline bool
|
||||
sig_write_data(struct visorchannel *channel, u32 queue,
|
||||
struct signal_queue_header *sig_hdr, u32 slot, void *data)
|
||||
{
|
||||
@ -338,24 +338,24 @@ sig_write_data(struct visorchannel *channel, u32 queue,
|
||||
err = visorchannel_write(channel, signal_data_offset,
|
||||
data, sig_hdr->signal_size);
|
||||
if (err)
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
static BOOL
|
||||
static bool
|
||||
signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
|
||||
{
|
||||
struct signal_queue_header sig_hdr;
|
||||
|
||||
if (!sig_read_header(channel, queue, &sig_hdr))
|
||||
return FALSE;
|
||||
return false;
|
||||
if (sig_hdr.head == sig_hdr.tail)
|
||||
return FALSE; /* no signals to remove */
|
||||
return false; /* no signals to remove */
|
||||
|
||||
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
|
||||
if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg))
|
||||
return FALSE;
|
||||
return false;
|
||||
sig_hdr.num_received++;
|
||||
|
||||
/* For each data field in SIGNAL_QUEUE_HEADER that was modified,
|
||||
@ -363,16 +363,16 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
|
||||
*/
|
||||
mb(); /* required for channel synch */
|
||||
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail))
|
||||
return FALSE;
|
||||
return false;
|
||||
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOL
|
||||
bool
|
||||
visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
|
||||
{
|
||||
BOOL rc;
|
||||
bool rc;
|
||||
|
||||
if (channel->needs_lock) {
|
||||
spin_lock(&channel->remove_lock);
|
||||
@ -386,13 +386,13 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
|
||||
|
||||
static BOOL
|
||||
static bool
|
||||
signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
|
||||
{
|
||||
struct signal_queue_header sig_hdr;
|
||||
|
||||
if (!sig_read_header(channel, queue, &sig_hdr))
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
|
||||
if (sig_hdr.head == sig_hdr.tail) {
|
||||
@ -403,11 +403,11 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
|
||||
num_overflows),
|
||||
&(sig_hdr.num_overflows),
|
||||
sizeof(sig_hdr.num_overflows));
|
||||
return FALSE;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg))
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
sig_hdr.num_sent++;
|
||||
|
||||
@ -416,17 +416,17 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
|
||||
*/
|
||||
mb(); /* required for channel synch */
|
||||
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head))
|
||||
return FALSE;
|
||||
return false;
|
||||
if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent))
|
||||
return FALSE;
|
||||
return false;
|
||||
|
||||
return TRUE;
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOL
|
||||
bool
|
||||
visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
|
||||
{
|
||||
BOOL rc;
|
||||
bool rc;
|
||||
|
||||
if (channel->needs_lock) {
|
||||
spin_lock(&channel->insert_lock);
|
||||
|
@ -2045,7 +2045,7 @@ parahotplug_process_message(struct controlvm_message *inmsg)
|
||||
|
||||
/* Process a controlvm message.
|
||||
* Return result:
|
||||
* false - this function will return FALSE only in the case where the
|
||||
* false - this function will return false only in the case where the
|
||||
* controlvm message was NOT processed, but processing must be
|
||||
* retried before reading the next controlvm message; a
|
||||
* scenario where this can occur is when we need to throttle
|
||||
|
@ -64,9 +64,9 @@ void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visor_charqueue_enqueue);
|
||||
|
||||
BOOL visor_charqueue_is_empty(struct charqueue *charqueue)
|
||||
bool visor_charqueue_is_empty(struct charqueue *charqueue)
|
||||
{
|
||||
BOOL b;
|
||||
bool b;
|
||||
|
||||
spin_lock(&charqueue->lock);
|
||||
b = IS_EMPTY(charqueue);
|
||||
|
@ -30,7 +30,7 @@ void visor_charqueue_enqueue(struct charqueue *charqueue, unsigned char c);
|
||||
int charqueue_dequeue(struct charqueue *charqueue);
|
||||
int visor_charqueue_dequeue_n(struct charqueue *charqueue, unsigned char *buf,
|
||||
int n);
|
||||
BOOL visor_charqueue_is_empty(struct charqueue *charqueue);
|
||||
bool visor_charqueue_is_empty(struct charqueue *charqueue);
|
||||
void visor_charqueue_destroy(struct charqueue *charqueue);
|
||||
|
||||
#endif
|
||||
|
@ -29,8 +29,8 @@ struct periodic_work {
|
||||
struct delayed_work work;
|
||||
void (*workfunc)(void *);
|
||||
void *workfuncarg;
|
||||
BOOL is_scheduled;
|
||||
BOOL want_to_stop;
|
||||
bool is_scheduled;
|
||||
bool want_to_stop;
|
||||
ulong jiffy_interval;
|
||||
struct workqueue_struct *workqueue;
|
||||
const char *devnam;
|
||||
@ -74,64 +74,64 @@ EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
|
||||
|
||||
/** Call this from your periodic work worker function to schedule the next
|
||||
* call.
|
||||
* If this function returns FALSE, there was a failure and the
|
||||
* If this function returns false, there was a failure and the
|
||||
* periodic work is no longer scheduled
|
||||
*/
|
||||
BOOL visor_periodic_work_nextperiod(struct periodic_work *pw)
|
||||
bool visor_periodic_work_nextperiod(struct periodic_work *pw)
|
||||
{
|
||||
BOOL rc = FALSE;
|
||||
bool rc = false;
|
||||
|
||||
write_lock(&pw->lock);
|
||||
if (pw->want_to_stop) {
|
||||
pw->is_scheduled = FALSE;
|
||||
pw->want_to_stop = FALSE;
|
||||
rc = TRUE; /* yes, TRUE; see visor_periodic_work_stop() */
|
||||
pw->is_scheduled = false;
|
||||
pw->want_to_stop = false;
|
||||
rc = true; /* yes, true; see visor_periodic_work_stop() */
|
||||
goto unlock;
|
||||
} else if (queue_delayed_work(pw->workqueue, &pw->work,
|
||||
pw->jiffy_interval) < 0) {
|
||||
pw->is_scheduled = FALSE;
|
||||
rc = FALSE;
|
||||
pw->is_scheduled = false;
|
||||
rc = false;
|
||||
goto unlock;
|
||||
}
|
||||
rc = TRUE;
|
||||
rc = true;
|
||||
unlock:
|
||||
write_unlock(&pw->lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
|
||||
|
||||
/** This function returns TRUE iff new periodic work was actually started.
|
||||
* If this function returns FALSE, then no work was started
|
||||
/** This function returns true iff new periodic work was actually started.
|
||||
* If this function returns false, then no work was started
|
||||
* (either because it was already started, or because of a failure).
|
||||
*/
|
||||
BOOL visor_periodic_work_start(struct periodic_work *pw)
|
||||
bool visor_periodic_work_start(struct periodic_work *pw)
|
||||
{
|
||||
BOOL rc = FALSE;
|
||||
bool rc = false;
|
||||
|
||||
write_lock(&pw->lock);
|
||||
if (pw->is_scheduled) {
|
||||
rc = FALSE;
|
||||
rc = false;
|
||||
goto unlock;
|
||||
}
|
||||
if (pw->want_to_stop) {
|
||||
rc = FALSE;
|
||||
rc = false;
|
||||
goto unlock;
|
||||
}
|
||||
INIT_DELAYED_WORK(&pw->work, &periodic_work_func);
|
||||
if (queue_delayed_work(pw->workqueue, &pw->work,
|
||||
pw->jiffy_interval) < 0) {
|
||||
rc = FALSE;
|
||||
rc = false;
|
||||
goto unlock;
|
||||
}
|
||||
pw->is_scheduled = TRUE;
|
||||
rc = TRUE;
|
||||
pw->is_scheduled = true;
|
||||
rc = true;
|
||||
unlock:
|
||||
write_unlock(&pw->lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visor_periodic_work_start);
|
||||
|
||||
/** This function returns TRUE iff your call actually stopped the periodic
|
||||
/** This function returns true iff your call actually stopped the periodic
|
||||
* work.
|
||||
*
|
||||
* -- PAY ATTENTION... this is important --
|
||||
@ -165,20 +165,20 @@ EXPORT_SYMBOL_GPL(visor_periodic_work_start);
|
||||
* this deadlock, you will get hung up in an infinite loop saying
|
||||
* "waiting for delayed work...".
|
||||
*/
|
||||
BOOL visor_periodic_work_stop(struct periodic_work *pw)
|
||||
bool visor_periodic_work_stop(struct periodic_work *pw)
|
||||
{
|
||||
BOOL stopped_something = FALSE;
|
||||
bool stopped_something = false;
|
||||
|
||||
write_lock(&pw->lock);
|
||||
stopped_something = pw->is_scheduled && (!pw->want_to_stop);
|
||||
while (pw->is_scheduled) {
|
||||
pw->want_to_stop = TRUE;
|
||||
pw->want_to_stop = true;
|
||||
if (cancel_delayed_work(&pw->work)) {
|
||||
/* We get here if the delayed work was pending as
|
||||
* delayed work, but was NOT run.
|
||||
*/
|
||||
WARN_ON(!pw->is_scheduled);
|
||||
pw->is_scheduled = FALSE;
|
||||
pw->is_scheduled = false;
|
||||
} else {
|
||||
/* If we get here, either the delayed work:
|
||||
* - was run, OR,
|
||||
@ -195,7 +195,7 @@ BOOL visor_periodic_work_stop(struct periodic_work *pw)
|
||||
SLEEPJIFFIES(10);
|
||||
write_lock(&pw->lock);
|
||||
} else {
|
||||
pw->want_to_stop = FALSE;
|
||||
pw->want_to_stop = false;
|
||||
}
|
||||
}
|
||||
write_unlock(&pw->lock);
|
||||
|
@ -53,7 +53,7 @@ static __init uint32_t visorutil_spar_detect(void)
|
||||
static __init int visorutil_mod_init(void)
|
||||
{
|
||||
if (visorutil_spar_detect()) {
|
||||
unisys_spar_platform = TRUE;
|
||||
unisys_spar_platform = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -ENODEV;
|
||||
|
Loading…
Reference in New Issue
Block a user