2008-01-27 21:10:22 +00:00
|
|
|
#ifndef __I8254_H
|
|
|
|
#define __I8254_H
|
|
|
|
|
2012-04-24 14:40:17 +00:00
|
|
|
#include <linux/kthread.h>
|
|
|
|
|
2015-03-26 14:39:29 +00:00
|
|
|
#include <kvm/iodev.h>
|
2008-01-27 21:10:22 +00:00
|
|
|
|
|
|
|
struct kvm_kpit_channel_state {
|
|
|
|
u32 count; /* can be 65536 */
|
|
|
|
u16 latched_count;
|
|
|
|
u8 count_latched;
|
|
|
|
u8 status_latched;
|
|
|
|
u8 status;
|
|
|
|
u8 read_state;
|
|
|
|
u8 write_state;
|
|
|
|
u8 write_latch;
|
|
|
|
u8 rw_mode;
|
|
|
|
u8 mode;
|
|
|
|
u8 bcd; /* not supported */
|
|
|
|
u8 gate; /* timer start */
|
|
|
|
ktime_t count_load_time;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_kpit_state {
|
|
|
|
struct kvm_kpit_channel_state channels[3];
|
2009-07-07 15:50:38 +00:00
|
|
|
u32 flags;
|
2009-02-23 13:57:41 +00:00
|
|
|
bool is_periodic;
|
2012-07-26 15:01:53 +00:00
|
|
|
s64 period; /* unit: ns */
|
|
|
|
struct hrtimer timer;
|
|
|
|
atomic_t pending; /* accumulated triggered timers */
|
|
|
|
bool reinject;
|
|
|
|
struct kvm *kvm;
|
2008-01-27 21:10:22 +00:00
|
|
|
u32 speaker_data_on;
|
|
|
|
struct mutex lock;
|
|
|
|
struct kvm_pit *pit;
|
KVM: i8254: use atomic_t instead of pit.inject_lock
The lock was an overkill, the same can be done with atomics.
A mb() was added in kvm_pit_ack_irq, to pair with implicit barrier
between pit_timer_fn and pit_do_work. The mb() prevents a race that
could happen if pending == 0 and irq_ack == 0:
kvm_pit_ack_irq: | pit_timer_fn:
p = atomic_read(&ps->pending); |
| atomic_inc(&ps->pending);
| queue_work(pit_do_work);
| pit_do_work:
| atomic_xchg(&ps->irq_ack, 0);
| return;
atomic_set(&ps->irq_ack, 1); |
if (p == 0) return; |
where the interrupt would not be delivered in this tick of pit_timer_fn.
PIT would have eventually delivered the interrupt, but we sacrifice
perofmance to make sure that interrupts are not needlessly delayed.
sfence isn't enough: atomic_dec_if_positive does atomic_read first and
x86 can reorder loads before stores. lfence isn't enough: store can
pass lfence, turning it into a nop. A compiler barrier would be more
than enough as CPU needs to stall for unbelievably long to use fences.
This patch doesn't do anything in kvm_pit_reset_reinject, because any
order of resets can race, but the result differs by at most one
interrupt, which is ok, because it's the same result as if the reset
happened at a slightly different time. (Original code didn't protect
the reset path with a proper lock, so users have to be robust.)
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-02 21:56:41 +00:00
|
|
|
atomic_t irq_ack;
|
2008-07-26 20:01:01 +00:00
|
|
|
struct kvm_irq_ack_notifier irq_ack_notifier;
|
2008-01-27 21:10:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kvm_pit {
|
|
|
|
struct kvm_io_device dev;
|
|
|
|
struct kvm_io_device speaker_dev;
|
|
|
|
struct kvm *kvm;
|
|
|
|
struct kvm_kpit_state pit_state;
|
2008-10-15 12:15:06 +00:00
|
|
|
int irq_source_id;
|
2009-01-04 16:06:06 +00:00
|
|
|
struct kvm_irq_mask_notifier mask_notifier;
|
2012-04-24 14:40:17 +00:00
|
|
|
struct kthread_worker worker;
|
|
|
|
struct task_struct *worker_task;
|
|
|
|
struct kthread_work expired;
|
2008-01-27 21:10:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define KVM_PIT_BASE_ADDRESS 0x40
|
|
|
|
#define KVM_SPEAKER_BASE_ADDRESS 0x61
|
|
|
|
#define KVM_PIT_MEM_LENGTH 4
|
|
|
|
#define KVM_PIT_FREQ 1193181
|
|
|
|
#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
|
|
|
|
#define KVM_PIT_CHANNEL_MASK 0x3
|
|
|
|
|
2009-05-14 20:42:53 +00:00
|
|
|
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
|
2008-01-27 21:10:22 +00:00
|
|
|
void kvm_free_pit(struct kvm *kvm);
|
2016-03-02 21:56:43 +00:00
|
|
|
|
2008-03-13 02:22:26 +00:00
|
|
|
void kvm_pit_reset(struct kvm_pit *pit);
|
2016-03-02 21:56:43 +00:00
|
|
|
void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
|
|
|
|
int hpet_legacy_start);
|
2008-01-27 21:10:22 +00:00
|
|
|
|
|
|
|
#endif
|