Merge 5.16-rc4 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
|
|||||||
virtual address size configured by the kernel. For example, with a
|
virtual address size configured by the kernel. For example, with a
|
||||||
virtual address size of 48, the PAC is 7 bits wide.
|
virtual address size of 48, the PAC is 7 bits wide.
|
||||||
|
|
||||||
Recent versions of GCC can compile code with APIAKey-based return
|
When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
|
||||||
address protection when passed the -msign-return-address option. This
|
with HINT space pointer authentication instructions protecting
|
||||||
uses instructions in the HINT space (unless -march=armv8.3-a or higher
|
function returns. Kernels built with this option will work on hardware
|
||||||
is also passed), and such code can run on systems without the pointer
|
with or without pointer authentication support.
|
||||||
authentication extension.
|
|
||||||
|
|
||||||
In addition to exec(), keys can also be reinitialized to random values
|
In addition to exec(), keys can also be reinitialized to random values
|
||||||
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
|
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
|
||||||
|
|||||||
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
|
|||||||
The third argument is a struct cpufreq_freqs with the following
|
The third argument is a struct cpufreq_freqs with the following
|
||||||
values:
|
values:
|
||||||
|
|
||||||
===== ===========================
|
====== ======================================
|
||||||
cpu number of the affected CPU
|
policy a pointer to the struct cpufreq_policy
|
||||||
old old frequency
|
old old frequency
|
||||||
new new frequency
|
new new frequency
|
||||||
flags flags of the cpufreq driver
|
flags flags of the cpufreq driver
|
||||||
===== ===========================
|
====== ======================================
|
||||||
|
|
||||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||||
==================================================================
|
==================================================================
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
.. SPDX-License-Identifier: GPL-2.0
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
=================================
|
=================================
|
||||||
NETWORK FILESYSTEM HELPER LIBRARY
|
Network Filesystem Helper Library
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
.. Contents:
|
.. Contents:
|
||||||
@@ -37,22 +37,22 @@ into a common call framework.
|
|||||||
|
|
||||||
The following services are provided:
|
The following services are provided:
|
||||||
|
|
||||||
* Handles transparent huge pages (THPs).
|
* Handle folios that span multiple pages.
|
||||||
|
|
||||||
* Insulates the netfs from VM interface changes.
|
* Insulate the netfs from VM interface changes.
|
||||||
|
|
||||||
* Allows the netfs to arbitrarily split reads up into pieces, even ones that
|
* Allow the netfs to arbitrarily split reads up into pieces, even ones that
|
||||||
don't match page sizes or page alignments and that may cross pages.
|
don't match folio sizes or folio alignments and that may cross folios.
|
||||||
|
|
||||||
* Allows the netfs to expand a readahead request in both directions to meet
|
* Allow the netfs to expand a readahead request in both directions to meet its
|
||||||
its needs.
|
needs.
|
||||||
|
|
||||||
* Allows the netfs to partially fulfil a read, which will then be resubmitted.
|
* Allow the netfs to partially fulfil a read, which will then be resubmitted.
|
||||||
|
|
||||||
* Handles local caching, allowing cached data and server-read data to be
|
* Handle local caching, allowing cached data and server-read data to be
|
||||||
interleaved for a single request.
|
interleaved for a single request.
|
||||||
|
|
||||||
* Handles clearing of bufferage that aren't on the server.
|
* Handle clearing of bufferage that aren't on the server.
|
||||||
|
|
||||||
* Handle retrying of reads that failed, switching reads from the cache to the
|
* Handle retrying of reads that failed, switching reads from the cache to the
|
||||||
server as necessary.
|
server as necessary.
|
||||||
@@ -70,22 +70,22 @@ Read Helper Functions
|
|||||||
|
|
||||||
Three read helpers are provided::
|
Three read helpers are provided::
|
||||||
|
|
||||||
* void netfs_readahead(struct readahead_control *ractl,
|
void netfs_readahead(struct readahead_control *ractl,
|
||||||
const struct netfs_read_request_ops *ops,
|
const struct netfs_read_request_ops *ops,
|
||||||
void *netfs_priv);``
|
void *netfs_priv);
|
||||||
* int netfs_readpage(struct file *file,
|
int netfs_readpage(struct file *file,
|
||||||
struct page *page,
|
struct folio *folio,
|
||||||
const struct netfs_read_request_ops *ops,
|
const struct netfs_read_request_ops *ops,
|
||||||
void *netfs_priv);
|
void *netfs_priv);
|
||||||
* int netfs_write_begin(struct file *file,
|
int netfs_write_begin(struct file *file,
|
||||||
struct address_space *mapping,
|
struct address_space *mapping,
|
||||||
loff_t pos,
|
loff_t pos,
|
||||||
unsigned int len,
|
unsigned int len,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
struct page **_page,
|
struct folio **_folio,
|
||||||
void **_fsdata,
|
void **_fsdata,
|
||||||
const struct netfs_read_request_ops *ops,
|
const struct netfs_read_request_ops *ops,
|
||||||
void *netfs_priv);
|
void *netfs_priv);
|
||||||
|
|
||||||
Each corresponds to a VM operation, with the addition of a couple of parameters
|
Each corresponds to a VM operation, with the addition of a couple of parameters
|
||||||
for the use of the read helpers:
|
for the use of the read helpers:
|
||||||
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
|
|||||||
For ->readahead() and ->readpage(), the network filesystem should just jump
|
For ->readahead() and ->readpage(), the network filesystem should just jump
|
||||||
into the corresponding read helper; whereas for ->write_begin(), it may be a
|
into the corresponding read helper; whereas for ->write_begin(), it may be a
|
||||||
little more complicated as the network filesystem might want to flush
|
little more complicated as the network filesystem might want to flush
|
||||||
conflicting writes or track dirty data and needs to put the acquired page if an
|
conflicting writes or track dirty data and needs to put the acquired folio if
|
||||||
error occurs after calling the helper.
|
an error occurs after calling the helper.
|
||||||
|
|
||||||
The helpers manage the read request, calling back into the network filesystem
|
The helpers manage the read request, calling back into the network filesystem
|
||||||
through the suppplied table of operations. Waits will be performed as
|
through the suppplied table of operations. Waits will be performed as
|
||||||
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
|
|||||||
void (*issue_op)(struct netfs_read_subrequest *subreq);
|
void (*issue_op)(struct netfs_read_subrequest *subreq);
|
||||||
bool (*is_still_valid)(struct netfs_read_request *rreq);
|
bool (*is_still_valid)(struct netfs_read_request *rreq);
|
||||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||||
struct page *page, void **_fsdata);
|
struct folio *folio, void **_fsdata);
|
||||||
void (*done)(struct netfs_read_request *rreq);
|
void (*done)(struct netfs_read_request *rreq);
|
||||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||||
};
|
};
|
||||||
@@ -313,13 +313,14 @@ The operations are as follows:
|
|||||||
|
|
||||||
There is no return value; the netfs_subreq_terminated() function should be
|
There is no return value; the netfs_subreq_terminated() function should be
|
||||||
called to indicate whether or not the operation succeeded and how much data
|
called to indicate whether or not the operation succeeded and how much data
|
||||||
it transferred. The filesystem also should not deal with setting pages
|
it transferred. The filesystem also should not deal with setting folios
|
||||||
uptodate, unlocking them or dropping their refs - the helpers need to deal
|
uptodate, unlocking them or dropping their refs - the helpers need to deal
|
||||||
with this as they have to coordinate with copying to the local cache.
|
with this as they have to coordinate with copying to the local cache.
|
||||||
|
|
||||||
Note that the helpers have the pages locked, but not pinned. It is possible
|
Note that the helpers have the folios locked, but not pinned. It is
|
||||||
to use the ITER_XARRAY iov iterator to refer to the range of the inode that
|
possible to use the ITER_XARRAY iov iterator to refer to the range of the
|
||||||
is being operated upon without the need to allocate large bvec tables.
|
inode that is being operated upon without the need to allocate large bvec
|
||||||
|
tables.
|
||||||
|
|
||||||
* ``is_still_valid()``
|
* ``is_still_valid()``
|
||||||
|
|
||||||
@@ -330,15 +331,15 @@ The operations are as follows:
|
|||||||
* ``check_write_begin()``
|
* ``check_write_begin()``
|
||||||
|
|
||||||
[Optional] This is called from the netfs_write_begin() helper once it has
|
[Optional] This is called from the netfs_write_begin() helper once it has
|
||||||
allocated/grabbed the page to be modified to allow the filesystem to flush
|
allocated/grabbed the folio to be modified to allow the filesystem to flush
|
||||||
conflicting state before allowing it to be modified.
|
conflicting state before allowing it to be modified.
|
||||||
|
|
||||||
It should return 0 if everything is now fine, -EAGAIN if the page should be
|
It should return 0 if everything is now fine, -EAGAIN if the folio should be
|
||||||
regrabbed and any other error code to abort the operation.
|
regrabbed and any other error code to abort the operation.
|
||||||
|
|
||||||
* ``done``
|
* ``done``
|
||||||
|
|
||||||
[Optional] This is called after the pages in the request have all been
|
[Optional] This is called after the folios in the request have all been
|
||||||
unlocked (and marked uptodate if applicable).
|
unlocked (and marked uptodate if applicable).
|
||||||
|
|
||||||
* ``cleanup``
|
* ``cleanup``
|
||||||
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
|
|||||||
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
|
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
|
||||||
end of the slice instead of reissuing.
|
end of the slice instead of reissuing.
|
||||||
|
|
||||||
* Once the data is read, the pages that have been fully read/cleared:
|
* Once the data is read, the folios that have been fully read/cleared:
|
||||||
|
|
||||||
* Will be marked uptodate.
|
* Will be marked uptodate.
|
||||||
|
|
||||||
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
|
|||||||
|
|
||||||
* Unlocked
|
* Unlocked
|
||||||
|
|
||||||
* Any pages that need writing to the cache will then have DIO writes issued.
|
* Any folios that need writing to the cache will then have DIO writes issued.
|
||||||
|
|
||||||
* Synchronous operations will wait for reading to be complete.
|
* Synchronous operations will wait for reading to be complete.
|
||||||
|
|
||||||
* Writes to the cache will proceed asynchronously and the pages will have the
|
* Writes to the cache will proceed asynchronously and the folios will have the
|
||||||
PG_fscache mark removed when that completes.
|
PG_fscache mark removed when that completes.
|
||||||
|
|
||||||
* The request structures will be cleaned up when everything has completed.
|
* The request structures will be cleaned up when everything has completed.
|
||||||
@@ -452,6 +453,9 @@ operation table looks like the following::
|
|||||||
netfs_io_terminated_t term_func,
|
netfs_io_terminated_t term_func,
|
||||||
void *term_func_priv);
|
void *term_func_priv);
|
||||||
|
|
||||||
|
int (*prepare_write)(struct netfs_cache_resources *cres,
|
||||||
|
loff_t *_start, size_t *_len, loff_t i_size);
|
||||||
|
|
||||||
int (*write)(struct netfs_cache_resources *cres,
|
int (*write)(struct netfs_cache_resources *cres,
|
||||||
loff_t start_pos,
|
loff_t start_pos,
|
||||||
struct iov_iter *iter,
|
struct iov_iter *iter,
|
||||||
@@ -509,6 +513,14 @@ The methods defined in the table are:
|
|||||||
indicating whether the termination is definitely happening in the caller's
|
indicating whether the termination is definitely happening in the caller's
|
||||||
context.
|
context.
|
||||||
|
|
||||||
|
* ``prepare_write()``
|
||||||
|
|
||||||
|
[Required] Called to adjust a write to the cache and check that there is
|
||||||
|
sufficient space in the cache. The start and length values indicate the
|
||||||
|
size of the write that netfslib is proposing, and this can be adjusted by
|
||||||
|
the cache to respect DIO boundaries. The file size is passed for
|
||||||
|
information.
|
||||||
|
|
||||||
* ``write()``
|
* ``write()``
|
||||||
|
|
||||||
[Required] Called to write to the cache. The start file offset is given
|
[Required] Called to write to the cache. The start file offset is given
|
||||||
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
|
|||||||
there isn't a read request structure as well, such as writing dirty data to the
|
there isn't a read request structure as well, such as writing dirty data to the
|
||||||
cache.
|
cache.
|
||||||
|
|
||||||
|
|
||||||
|
API Function Reference
|
||||||
|
======================
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/netfs.h
|
.. kernel-doc:: include/linux/netfs.h
|
||||||
|
.. kernel-doc:: fs/netfs/read_helper.c
|
||||||
|
|||||||
13
MAINTAINERS
13
MAINTAINERS
@@ -15979,6 +15979,7 @@ F: arch/mips/generic/board-ranchu.c
|
|||||||
|
|
||||||
RANDOM NUMBER DRIVER
|
RANDOM NUMBER DRIVER
|
||||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||||
|
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/char/random.c
|
F: drivers/char/random.c
|
||||||
|
|
||||||
@@ -16501,6 +16502,12 @@ T: git git://linuxtv.org/media_tree.git
|
|||||||
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
|
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
|
||||||
F: drivers/media/platform/sunxi/sun8i-rotate/
|
F: drivers/media/platform/sunxi/sun8i-rotate/
|
||||||
|
|
||||||
|
RPMSG TTY DRIVER
|
||||||
|
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||||
|
L: linux-remoteproc@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: drivers/tty/rpmsg_tty.c
|
||||||
|
|
||||||
RTL2830 MEDIA DRIVER
|
RTL2830 MEDIA DRIVER
|
||||||
M: Antti Palosaari <crope@iki.fi>
|
M: Antti Palosaari <crope@iki.fi>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
@@ -16623,7 +16630,8 @@ F: drivers/iommu/s390-iommu.c
|
|||||||
|
|
||||||
S390 IUCV NETWORK LAYER
|
S390 IUCV NETWORK LAYER
|
||||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||||
|
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
@@ -16634,7 +16642,8 @@ F: net/iucv/
|
|||||||
|
|
||||||
S390 NETWORK DRIVERS
|
S390 NETWORK DRIVERS
|
||||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||||
|
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 16
|
PATCHLEVEL = 16
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc4
|
||||||
NAME = Gobble Gobble
|
NAME = Gobble Gobble
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|||||||
@@ -91,7 +91,7 @@
|
|||||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||||
|
|
||||||
/* TCR_EL2 Registers bits */
|
/* TCR_EL2 Registers bits */
|
||||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
|
||||||
#define TCR_EL2_TBI (1 << 20)
|
#define TCR_EL2_TBI (1 << 20)
|
||||||
#define TCR_EL2_PS_SHIFT 16
|
#define TCR_EL2_PS_SHIFT 16
|
||||||
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
||||||
@@ -276,7 +276,7 @@
|
|||||||
#define CPTR_EL2_TFP_SHIFT 10
|
#define CPTR_EL2_TFP_SHIFT 10
|
||||||
|
|
||||||
/* Hyp Coprocessor Trap Register */
|
/* Hyp Coprocessor Trap Register */
|
||||||
#define CPTR_EL2_TCPAC (1 << 31)
|
#define CPTR_EL2_TCPAC (1U << 31)
|
||||||
#define CPTR_EL2_TAM (1 << 30)
|
#define CPTR_EL2_TAM (1 << 30)
|
||||||
#define CPTR_EL2_TTA (1 << 20)
|
#define CPTR_EL2_TTA (1 << 20)
|
||||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||||
|
|||||||
@@ -77,11 +77,17 @@
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
SYM_CODE_START(ftrace_regs_caller)
|
SYM_CODE_START(ftrace_regs_caller)
|
||||||
|
#ifdef BTI_C
|
||||||
|
BTI_C
|
||||||
|
#endif
|
||||||
ftrace_regs_entry 1
|
ftrace_regs_entry 1
|
||||||
b ftrace_common
|
b ftrace_common
|
||||||
SYM_CODE_END(ftrace_regs_caller)
|
SYM_CODE_END(ftrace_regs_caller)
|
||||||
|
|
||||||
SYM_CODE_START(ftrace_caller)
|
SYM_CODE_START(ftrace_caller)
|
||||||
|
#ifdef BTI_C
|
||||||
|
BTI_C
|
||||||
|
#endif
|
||||||
ftrace_regs_entry 0
|
ftrace_regs_entry 0
|
||||||
b ftrace_common
|
b ftrace_common
|
||||||
SYM_CODE_END(ftrace_caller)
|
SYM_CODE_END(ftrace_caller)
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
kimage->arch.ttbr1 = __pa(trans_pgd);
|
kimage->arch.ttbr1 = __pa(trans_pgd);
|
||||||
kimage->arch.zero_page = __pa(empty_zero_page);
|
kimage->arch.zero_page = __pa_symbol(empty_zero_page);
|
||||||
|
|
||||||
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
|
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
|
||||||
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
|
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
|
||||||
|
|||||||
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
|||||||
|
|
||||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
||||||
*
|
*
|
||||||
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
*/
|
*/
|
||||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||||
|
* early on.
|
||||||
|
*/
|
||||||
|
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check whether we want to repaint the state one way or
|
||||||
|
* another.
|
||||||
|
*/
|
||||||
|
early_exit_filter(vcpu, exit_code);
|
||||||
|
|
||||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||||
|
|
||||||
|
|||||||
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
|||||||
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
|
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
|
||||||
{
|
{
|
||||||
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
|
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
|
||||||
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
/*
|
||||||
|
* Guest PSTATE gets saved at guest fixup time in all
|
||||||
|
* cases. We still need to handle the nVHE host side here.
|
||||||
|
*/
|
||||||
|
if (!has_vhe() && ctxt->__hyp_running_vcpu)
|
||||||
|
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||||
|
|
||||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
||||||
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
|
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
|
||||||
|
|||||||
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
|||||||
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
||||||
* thus should exit to the host, or true if a the guest run loop can continue.
|
* thus should exit to the host, or true if a the guest run loop can continue.
|
||||||
*/
|
*/
|
||||||
static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||||
|
|
||||||
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||||||
vcpu->arch.target = -1;
|
vcpu->arch.target = -1;
|
||||||
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
||||||
*exit_code |= ARM_EXCEPTION_IL;
|
*exit_code |= ARM_EXCEPTION_IL;
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Switch to the guest for legacy non-VHE systems */
|
/* Switch to the guest for legacy non-VHE systems */
|
||||||
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
/* Jump in the fire! */
|
/* Jump in the fire! */
|
||||||
exit_code = __guest_enter(vcpu);
|
exit_code = __guest_enter(vcpu);
|
||||||
|
|
||||||
if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* And we're baaack! */
|
/* And we're baaack! */
|
||||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||||
|
|
||||||
|
|||||||
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
|||||||
return hyp_exit_handlers;
|
return hyp_exit_handlers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/* Switch to the guest for VHE systems running in EL2 */
|
/* Switch to the guest for VHE systems running in EL2 */
|
||||||
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -15,7 +15,12 @@
|
|||||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||||
|
boot := arch/parisc/boot
|
||||||
|
KBUILD_IMAGE := $(boot)/bzImage
|
||||||
|
else
|
||||||
KBUILD_IMAGE := vmlinuz
|
KBUILD_IMAGE := vmlinuz
|
||||||
|
endif
|
||||||
|
|
||||||
NM = sh $(srctree)/arch/parisc/nm
|
NM = sh $(srctree)/arch/parisc/nm
|
||||||
CHECKFLAGS += -D__hppa__=1
|
CHECKFLAGS += -D__hppa__=1
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
CONFIG_LOCALVERSION="-64bit"
|
CONFIG_LOCALVERSION="-64bit"
|
||||||
# CONFIG_LOCALVERSION_AUTO is not set
|
# CONFIG_LOCALVERSION_AUTO is not set
|
||||||
|
CONFIG_KERNEL_LZ4=y
|
||||||
CONFIG_SYSVIPC=y
|
CONFIG_SYSVIPC=y
|
||||||
CONFIG_POSIX_MQUEUE=y
|
CONFIG_POSIX_MQUEUE=y
|
||||||
|
CONFIG_AUDIT=y
|
||||||
CONFIG_BSD_PROCESS_ACCT=y
|
CONFIG_BSD_PROCESS_ACCT=y
|
||||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||||
CONFIG_TASKSTATS=y
|
CONFIG_TASKSTATS=y
|
||||||
@@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
|
|||||||
CONFIG_BLK_DEV_INTEGRITY=y
|
CONFIG_BLK_DEV_INTEGRITY=y
|
||||||
CONFIG_BINFMT_MISC=m
|
CONFIG_BINFMT_MISC=m
|
||||||
# CONFIG_COMPACTION is not set
|
# CONFIG_COMPACTION is not set
|
||||||
|
CONFIG_MEMORY_FAILURE=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
CONFIG_PACKET=y
|
CONFIG_PACKET=y
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
@@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
|
|||||||
CONFIG_SCSI_SRP_ATTRS=y
|
CONFIG_SCSI_SRP_ATTRS=y
|
||||||
CONFIG_ISCSI_BOOT_SYSFS=y
|
CONFIG_ISCSI_BOOT_SYSFS=y
|
||||||
CONFIG_SCSI_MPT2SAS=y
|
CONFIG_SCSI_MPT2SAS=y
|
||||||
CONFIG_SCSI_LASI700=m
|
CONFIG_SCSI_LASI700=y
|
||||||
CONFIG_SCSI_SYM53C8XX_2=y
|
CONFIG_SCSI_SYM53C8XX_2=y
|
||||||
CONFIG_SCSI_ZALON=y
|
CONFIG_SCSI_ZALON=y
|
||||||
CONFIG_SCSI_QLA_ISCSI=m
|
CONFIG_SCSI_QLA_ISCSI=m
|
||||||
CONFIG_SCSI_DH=y
|
CONFIG_SCSI_DH=y
|
||||||
CONFIG_ATA=y
|
CONFIG_ATA=y
|
||||||
|
CONFIG_SATA_SIL=y
|
||||||
|
CONFIG_SATA_SIS=y
|
||||||
|
CONFIG_SATA_VIA=y
|
||||||
CONFIG_PATA_NS87415=y
|
CONFIG_PATA_NS87415=y
|
||||||
CONFIG_PATA_SIL680=y
|
CONFIG_PATA_SIL680=y
|
||||||
CONFIG_ATA_GENERIC=y
|
CONFIG_ATA_GENERIC=y
|
||||||
@@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
|
|||||||
CONFIG_BLK_DEV_DM=m
|
CONFIG_BLK_DEV_DM=m
|
||||||
CONFIG_DM_RAID=m
|
CONFIG_DM_RAID=m
|
||||||
CONFIG_DM_UEVENT=y
|
CONFIG_DM_UEVENT=y
|
||||||
|
CONFIG_DM_AUDIT=y
|
||||||
CONFIG_FUSION=y
|
CONFIG_FUSION=y
|
||||||
CONFIG_FUSION_SPI=y
|
CONFIG_FUSION_SPI=y
|
||||||
CONFIG_FUSION_SAS=y
|
CONFIG_FUSION_SAS=y
|
||||||
@@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
|
|||||||
CONFIG_FB_MATROX_I2C=y
|
CONFIG_FB_MATROX_I2C=y
|
||||||
CONFIG_FB_MATROX_MAVEN=y
|
CONFIG_FB_MATROX_MAVEN=y
|
||||||
CONFIG_FB_RADEON=y
|
CONFIG_FB_RADEON=y
|
||||||
|
CONFIG_LOGO=y
|
||||||
|
# CONFIG_LOGO_LINUX_CLUT224 is not set
|
||||||
CONFIG_HIDRAW=y
|
CONFIG_HIDRAW=y
|
||||||
CONFIG_HID_PID=y
|
CONFIG_HID_PID=y
|
||||||
CONFIG_USB_HIDDEV=y
|
CONFIG_USB_HIDDEV=y
|
||||||
CONFIG_USB=y
|
CONFIG_USB=y
|
||||||
|
CONFIG_USB_EHCI_HCD=y
|
||||||
|
CONFIG_USB_OHCI_HCD=y
|
||||||
|
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||||
CONFIG_UIO=y
|
CONFIG_UIO=y
|
||||||
CONFIG_UIO_PDRV_GENIRQ=m
|
CONFIG_UIO_PDRV_GENIRQ=m
|
||||||
CONFIG_UIO_AEC=m
|
CONFIG_UIO_AEC=m
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ verify "$3"
|
|||||||
if [ -n "${INSTALLKERNEL}" ]; then
|
if [ -n "${INSTALLKERNEL}" ]; then
|
||||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||||
|
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Default install
|
# Default install
|
||||||
|
|||||||
@@ -249,30 +249,16 @@ void __init time_init(void)
|
|||||||
static int __init init_cr16_clocksource(void)
|
static int __init init_cr16_clocksource(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* The cr16 interval timers are not syncronized across CPUs on
|
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||||
* different sockets, so mark them unstable and lower rating on
|
* they share the same socket.
|
||||||
* multi-socket SMP systems.
|
|
||||||
*/
|
*/
|
||||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||||
int cpu;
|
/* mark sched_clock unstable */
|
||||||
unsigned long cpu0_loc;
|
clear_sched_clock_stable();
|
||||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
clocksource_cr16.name = "cr16_unstable";
|
||||||
if (cpu == 0)
|
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||||
continue;
|
clocksource_cr16.rating = 0;
|
||||||
if ((cpu0_loc != 0) &&
|
|
||||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* mark sched_clock unstable */
|
|
||||||
clear_sched_clock_stable();
|
|
||||||
|
|
||||||
clocksource_cr16.name = "cr16_unstable";
|
|
||||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
|
||||||
clocksource_cr16.rating = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* register at clocksource framework */
|
/* register at clocksource framework */
|
||||||
|
|||||||
@@ -12,14 +12,12 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
#include <linux/kvm_types.h>
|
#include <linux/kvm_types.h>
|
||||||
|
#include <asm/csr.h>
|
||||||
#include <asm/kvm_vcpu_fp.h>
|
#include <asm/kvm_vcpu_fp.h>
|
||||||
#include <asm/kvm_vcpu_timer.h>
|
#include <asm/kvm_vcpu_timer.h>
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#define KVM_MAX_VCPUS \
|
||||||
#define KVM_MAX_VCPUS (1U << 16)
|
((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
|
||||||
#else
|
|
||||||
#define KVM_MAX_VCPUS (1U << 9)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||||
|
|
||||||
|
|||||||
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|||||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot)
|
struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
|
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
|
||||||
|
phys_addr_t size = slot->npages << PAGE_SHIFT;
|
||||||
|
|
||||||
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
stage2_unmap_range(kvm, gpa, size, false);
|
||||||
|
spin_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
|||||||
@@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
|
|||||||
CONFIG_CONNECTOR=y
|
CONFIG_CONNECTOR=y
|
||||||
CONFIG_ZRAM=y
|
CONFIG_ZRAM=y
|
||||||
CONFIG_BLK_DEV_LOOP=m
|
CONFIG_BLK_DEV_LOOP=m
|
||||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
|
||||||
CONFIG_BLK_DEV_DRBD=m
|
CONFIG_BLK_DEV_DRBD=m
|
||||||
CONFIG_BLK_DEV_NBD=m
|
CONFIG_BLK_DEV_NBD=m
|
||||||
CONFIG_BLK_DEV_RAM=y
|
CONFIG_BLK_DEV_RAM=y
|
||||||
@@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
|
|||||||
CONFIG_MACVTAP=m
|
CONFIG_MACVTAP=m
|
||||||
CONFIG_VXLAN=m
|
CONFIG_VXLAN=m
|
||||||
CONFIG_BAREUDP=m
|
CONFIG_BAREUDP=m
|
||||||
|
CONFIG_AMT=m
|
||||||
CONFIG_TUN=m
|
CONFIG_TUN=m
|
||||||
CONFIG_VETH=m
|
CONFIG_VETH=m
|
||||||
CONFIG_VIRTIO_NET=m
|
CONFIG_VIRTIO_NET=m
|
||||||
@@ -489,6 +489,7 @@ CONFIG_NLMON=m
|
|||||||
# CONFIG_NET_VENDOR_AMD is not set
|
# CONFIG_NET_VENDOR_AMD is not set
|
||||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||||
# CONFIG_NET_VENDOR_ARC is not set
|
# CONFIG_NET_VENDOR_ARC is not set
|
||||||
|
# CONFIG_NET_VENDOR_ASIX is not set
|
||||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||||
@@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
|
|||||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||||
CONFIG_SOFT_WATCHDOG=m
|
CONFIG_SOFT_WATCHDOG=m
|
||||||
CONFIG_DIAG288_WATCHDOG=m
|
CONFIG_DIAG288_WATCHDOG=m
|
||||||
|
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
|
||||||
CONFIG_FB=y
|
CONFIG_FB=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||||
@@ -775,12 +777,14 @@ CONFIG_CRC4=m
|
|||||||
CONFIG_CRC7=m
|
CONFIG_CRC7=m
|
||||||
CONFIG_CRC8=m
|
CONFIG_CRC8=m
|
||||||
CONFIG_RANDOM32_SELFTEST=y
|
CONFIG_RANDOM32_SELFTEST=y
|
||||||
|
CONFIG_XZ_DEC_MICROLZMA=y
|
||||||
CONFIG_DMA_CMA=y
|
CONFIG_DMA_CMA=y
|
||||||
CONFIG_CMA_SIZE_MBYTES=0
|
CONFIG_CMA_SIZE_MBYTES=0
|
||||||
CONFIG_PRINTK_TIME=y
|
CONFIG_PRINTK_TIME=y
|
||||||
CONFIG_DYNAMIC_DEBUG=y
|
CONFIG_DYNAMIC_DEBUG=y
|
||||||
CONFIG_DEBUG_INFO=y
|
CONFIG_DEBUG_INFO=y
|
||||||
CONFIG_DEBUG_INFO_DWARF4=y
|
CONFIG_DEBUG_INFO_DWARF4=y
|
||||||
|
CONFIG_DEBUG_INFO_BTF=y
|
||||||
CONFIG_GDB_SCRIPTS=y
|
CONFIG_GDB_SCRIPTS=y
|
||||||
CONFIG_HEADERS_INSTALL=y
|
CONFIG_HEADERS_INSTALL=y
|
||||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||||
@@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
|
|||||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||||
CONFIG_KFENCE=y
|
CONFIG_KFENCE=y
|
||||||
|
CONFIG_KFENCE_STATIC_KEYS=y
|
||||||
CONFIG_DEBUG_SHIRQ=y
|
CONFIG_DEBUG_SHIRQ=y
|
||||||
CONFIG_PANIC_ON_OOPS=y
|
CONFIG_PANIC_ON_OOPS=y
|
||||||
CONFIG_DETECT_HUNG_TASK=y
|
CONFIG_DETECT_HUNG_TASK=y
|
||||||
@@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
|
|||||||
CONFIG_SAMPLES=y
|
CONFIG_SAMPLES=y
|
||||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||||
|
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||||
CONFIG_DEBUG_ENTRY=y
|
CONFIG_DEBUG_ENTRY=y
|
||||||
CONFIG_CIO_INJECT=y
|
CONFIG_CIO_INJECT=y
|
||||||
CONFIG_KUNIT=m
|
CONFIG_KUNIT=m
|
||||||
@@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
|
|||||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||||
CONFIG_LKDTM=m
|
CONFIG_LKDTM=m
|
||||||
CONFIG_TEST_MIN_HEAP=y
|
CONFIG_TEST_MIN_HEAP=y
|
||||||
CONFIG_KPROBES_SANITY_TEST=y
|
CONFIG_KPROBES_SANITY_TEST=m
|
||||||
CONFIG_RBTREE_TEST=y
|
CONFIG_RBTREE_TEST=y
|
||||||
CONFIG_INTERVAL_TREE_TEST=m
|
CONFIG_INTERVAL_TREE_TEST=m
|
||||||
CONFIG_PERCPU_TEST=m
|
CONFIG_PERCPU_TEST=m
|
||||||
|
|||||||
@@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
|
|||||||
CONFIG_CONNECTOR=y
|
CONFIG_CONNECTOR=y
|
||||||
CONFIG_ZRAM=y
|
CONFIG_ZRAM=y
|
||||||
CONFIG_BLK_DEV_LOOP=m
|
CONFIG_BLK_DEV_LOOP=m
|
||||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
|
||||||
CONFIG_BLK_DEV_DRBD=m
|
CONFIG_BLK_DEV_DRBD=m
|
||||||
CONFIG_BLK_DEV_NBD=m
|
CONFIG_BLK_DEV_NBD=m
|
||||||
CONFIG_BLK_DEV_RAM=y
|
CONFIG_BLK_DEV_RAM=y
|
||||||
@@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
|
|||||||
CONFIG_MACVTAP=m
|
CONFIG_MACVTAP=m
|
||||||
CONFIG_VXLAN=m
|
CONFIG_VXLAN=m
|
||||||
CONFIG_BAREUDP=m
|
CONFIG_BAREUDP=m
|
||||||
|
CONFIG_AMT=m
|
||||||
CONFIG_TUN=m
|
CONFIG_TUN=m
|
||||||
CONFIG_VETH=m
|
CONFIG_VETH=m
|
||||||
CONFIG_VIRTIO_NET=m
|
CONFIG_VIRTIO_NET=m
|
||||||
@@ -480,6 +480,7 @@ CONFIG_NLMON=m
|
|||||||
# CONFIG_NET_VENDOR_AMD is not set
|
# CONFIG_NET_VENDOR_AMD is not set
|
||||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||||
# CONFIG_NET_VENDOR_ARC is not set
|
# CONFIG_NET_VENDOR_ARC is not set
|
||||||
|
# CONFIG_NET_VENDOR_ASIX is not set
|
||||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||||
@@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
|
|||||||
CONFIG_CRC4=m
|
CONFIG_CRC4=m
|
||||||
CONFIG_CRC7=m
|
CONFIG_CRC7=m
|
||||||
CONFIG_CRC8=m
|
CONFIG_CRC8=m
|
||||||
|
CONFIG_XZ_DEC_MICROLZMA=y
|
||||||
CONFIG_DMA_CMA=y
|
CONFIG_DMA_CMA=y
|
||||||
CONFIG_CMA_SIZE_MBYTES=0
|
CONFIG_CMA_SIZE_MBYTES=0
|
||||||
CONFIG_PRINTK_TIME=y
|
CONFIG_PRINTK_TIME=y
|
||||||
CONFIG_DYNAMIC_DEBUG=y
|
CONFIG_DYNAMIC_DEBUG=y
|
||||||
CONFIG_DEBUG_INFO=y
|
CONFIG_DEBUG_INFO=y
|
||||||
CONFIG_DEBUG_INFO_DWARF4=y
|
CONFIG_DEBUG_INFO_DWARF4=y
|
||||||
|
CONFIG_DEBUG_INFO_BTF=y
|
||||||
CONFIG_GDB_SCRIPTS=y
|
CONFIG_GDB_SCRIPTS=y
|
||||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||||
CONFIG_MAGIC_SYSRQ=y
|
CONFIG_MAGIC_SYSRQ=y
|
||||||
@@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
|
|||||||
CONFIG_SAMPLES=y
|
CONFIG_SAMPLES=y
|
||||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||||
|
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||||
CONFIG_KUNIT=m
|
CONFIG_KUNIT=m
|
||||||
CONFIG_KUNIT_DEBUGFS=y
|
CONFIG_KUNIT_DEBUGFS=y
|
||||||
CONFIG_LKDTM=m
|
CONFIG_LKDTM=m
|
||||||
|
CONFIG_KPROBES_SANITY_TEST=m
|
||||||
CONFIG_PERCPU_TEST=m
|
CONFIG_PERCPU_TEST=m
|
||||||
CONFIG_ATOMIC64_SELFTEST=y
|
CONFIG_ATOMIC64_SELFTEST=y
|
||||||
CONFIG_TEST_BPF=m
|
CONFIG_TEST_BPF=m
|
||||||
|
|||||||
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
|
|||||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||||
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
||||||
# CONFIG_ZLIB_DFLTCC is not set
|
# CONFIG_ZLIB_DFLTCC is not set
|
||||||
|
CONFIG_XZ_DEC_MICROLZMA=y
|
||||||
CONFIG_PRINTK_TIME=y
|
CONFIG_PRINTK_TIME=y
|
||||||
# CONFIG_SYMBOLIC_ERRNAME is not set
|
# CONFIG_SYMBOLIC_ERRNAME is not set
|
||||||
CONFIG_DEBUG_INFO=y
|
CONFIG_DEBUG_INFO=y
|
||||||
|
CONFIG_DEBUG_INFO_BTF=y
|
||||||
CONFIG_DEBUG_FS=y
|
CONFIG_DEBUG_FS=y
|
||||||
CONFIG_DEBUG_KERNEL=y
|
CONFIG_DEBUG_KERNEL=y
|
||||||
CONFIG_PANIC_ON_OOPS=y
|
CONFIG_PANIC_ON_OOPS=y
|
||||||
|
|||||||
@@ -14,12 +14,13 @@
|
|||||||
|
|
||||||
/* I/O Map */
|
/* I/O Map */
|
||||||
#define ZPCI_IOMAP_SHIFT 48
|
#define ZPCI_IOMAP_SHIFT 48
|
||||||
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
|
#define ZPCI_IOMAP_ADDR_SHIFT 62
|
||||||
|
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
|
||||||
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
|
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
|
||||||
#define ZPCI_IOMAP_MAX_ENTRIES \
|
#define ZPCI_IOMAP_MAX_ENTRIES \
|
||||||
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
|
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
|
||||||
#define ZPCI_IOMAP_ADDR_IDX_MASK \
|
#define ZPCI_IOMAP_ADDR_IDX_MASK \
|
||||||
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
|
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
|
||||||
|
|
||||||
struct zpci_iomap_entry {
|
struct zpci_iomap_entry {
|
||||||
u32 fh;
|
u32 fh;
|
||||||
|
|||||||
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trigger specification exception
|
* Trigger operation exception; use insn notation to bypass
|
||||||
|
* llvm's integrated assembler sanity checks.
|
||||||
*/
|
*/
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" mvcl %%r1,%%r1\n"
|
" .insn e,0x0000\n" /* illegal opcode */
|
||||||
"0: nopr %%r7\n"
|
"0: nopr %%r7\n"
|
||||||
EX_TABLE(0b, 0b)
|
EX_TABLE(0b, 0b)
|
||||||
:);
|
:);
|
||||||
|
|||||||
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
|||||||
ud2
|
ud2
|
||||||
1:
|
1:
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_XEN_PV
|
||||||
|
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
|
||||||
|
#endif
|
||||||
|
|
||||||
POP_REGS pop_rdi=0
|
POP_REGS pop_rdi=0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
|||||||
.Lparanoid_entry_checkgs:
|
.Lparanoid_entry_checkgs:
|
||||||
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
||||||
movl $1, %ebx
|
movl $1, %ebx
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The kernel-enforced convention is a negative GSBASE indicates
|
* The kernel-enforced convention is a negative GSBASE indicates
|
||||||
* a kernel value. No SWAPGS needed on entry and exit.
|
* a kernel value. No SWAPGS needed on entry and exit.
|
||||||
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
|||||||
movl $MSR_GS_BASE, %ecx
|
movl $MSR_GS_BASE, %ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
testl %edx, %edx
|
testl %edx, %edx
|
||||||
jns .Lparanoid_entry_swapgs
|
js .Lparanoid_kernel_gsbase
|
||||||
ret
|
|
||||||
|
|
||||||
.Lparanoid_entry_swapgs:
|
|
||||||
swapgs
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
|
||||||
* unconditional CR3 write, even in the PTI case. So do an lfence
|
|
||||||
* to prevent GS speculation, regardless of whether PTI is enabled.
|
|
||||||
*/
|
|
||||||
FENCE_SWAPGS_KERNEL_ENTRY
|
|
||||||
|
|
||||||
/* EBX = 0 -> SWAPGS required on exit */
|
/* EBX = 0 -> SWAPGS required on exit */
|
||||||
xorl %ebx, %ebx
|
xorl %ebx, %ebx
|
||||||
|
swapgs
|
||||||
|
.Lparanoid_kernel_gsbase:
|
||||||
|
|
||||||
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
ret
|
ret
|
||||||
SYM_CODE_END(paranoid_entry)
|
SYM_CODE_END(paranoid_entry)
|
||||||
|
|
||||||
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
pushq %r12
|
pushq %r12
|
||||||
ret
|
ret
|
||||||
|
|
||||||
.Lerror_entry_done_lfence:
|
|
||||||
FENCE_SWAPGS_KERNEL_ENTRY
|
|
||||||
.Lerror_entry_done:
|
|
||||||
ret
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are two places in the kernel that can potentially fault with
|
* There are two places in the kernel that can potentially fault with
|
||||||
* usergs. Handle them here. B stepping K8s sometimes report a
|
* usergs. Handle them here. B stepping K8s sometimes report a
|
||||||
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
* .Lgs_change's error handler with kernel gsbase.
|
* .Lgs_change's error handler with kernel gsbase.
|
||||||
*/
|
*/
|
||||||
SWAPGS
|
SWAPGS
|
||||||
FENCE_SWAPGS_USER_ENTRY
|
|
||||||
jmp .Lerror_entry_done
|
/*
|
||||||
|
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a
|
||||||
|
* kernel or user gsbase.
|
||||||
|
*/
|
||||||
|
.Lerror_entry_done_lfence:
|
||||||
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
|
ret
|
||||||
|
|
||||||
.Lbstep_iret:
|
.Lbstep_iret:
|
||||||
/* Fix truncated RIP */
|
/* Fix truncated RIP */
|
||||||
|
|||||||
@@ -108,7 +108,7 @@
|
|||||||
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
||||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
||||||
|
|
||||||
#define INTEL_FAM6_RAPTOR_LAKE 0xB7
|
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||||
|
|
||||||
/* "Small Core" Processors (Atom) */
|
/* "Small Core" Processors (Atom) */
|
||||||
|
|
||||||
|
|||||||
@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
|
|||||||
#define APICV_INHIBIT_REASON_PIT_REINJ 4
|
#define APICV_INHIBIT_REASON_PIT_REINJ 4
|
||||||
#define APICV_INHIBIT_REASON_X2APIC 5
|
#define APICV_INHIBIT_REASON_X2APIC 5
|
||||||
#define APICV_INHIBIT_REASON_BLOCKIRQ 6
|
#define APICV_INHIBIT_REASON_BLOCKIRQ 6
|
||||||
|
#define APICV_INHIBIT_REASON_ABSENT 7
|
||||||
|
|
||||||
struct kvm_arch {
|
struct kvm_arch {
|
||||||
unsigned long n_used_mmu_pages;
|
unsigned long n_used_mmu_pages;
|
||||||
|
|||||||
@@ -73,4 +73,15 @@
|
|||||||
|
|
||||||
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
|
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Error codes related to GHCB input that can be communicated back to the guest
|
||||||
|
* by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
|
||||||
|
*/
|
||||||
|
#define GHCB_ERR_NOT_REGISTERED 1
|
||||||
|
#define GHCB_ERR_INVALID_USAGE 2
|
||||||
|
#define GHCB_ERR_INVALID_SCRATCH_AREA 3
|
||||||
|
#define GHCB_ERR_MISSING_INPUT 4
|
||||||
|
#define GHCB_ERR_INVALID_INPUT 5
|
||||||
|
#define GHCB_ERR_INVALID_EVENT 6
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
|||||||
struct fpstate *fpstate)
|
struct fpstate *fpstate)
|
||||||
{
|
{
|
||||||
struct xregs_state __user *x = buf;
|
struct xregs_state __user *x = buf;
|
||||||
struct _fpx_sw_bytes sw_bytes;
|
struct _fpx_sw_bytes sw_bytes = {};
|
||||||
u32 xfeatures;
|
u32 xfeatures;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
|||||||
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
|||||||
char *dst, char *buf, size_t size)
|
char *dst, char *buf, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
|
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
|
||||||
char __user *target = (char __user *)dst;
|
|
||||||
u64 d8;
|
|
||||||
u32 d4;
|
|
||||||
u16 d2;
|
|
||||||
u8 d1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function uses __put_user() independent of whether kernel or user
|
* This function uses __put_user() independent of whether kernel or user
|
||||||
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
|||||||
* instructions here would cause infinite nesting.
|
* instructions here would cause infinite nesting.
|
||||||
*/
|
*/
|
||||||
switch (size) {
|
switch (size) {
|
||||||
case 1:
|
case 1: {
|
||||||
|
u8 d1;
|
||||||
|
u8 __user *target = (u8 __user *)dst;
|
||||||
|
|
||||||
memcpy(&d1, buf, 1);
|
memcpy(&d1, buf, 1);
|
||||||
if (__put_user(d1, target))
|
if (__put_user(d1, target))
|
||||||
goto fault;
|
goto fault;
|
||||||
break;
|
break;
|
||||||
case 2:
|
}
|
||||||
|
case 2: {
|
||||||
|
u16 d2;
|
||||||
|
u16 __user *target = (u16 __user *)dst;
|
||||||
|
|
||||||
memcpy(&d2, buf, 2);
|
memcpy(&d2, buf, 2);
|
||||||
if (__put_user(d2, target))
|
if (__put_user(d2, target))
|
||||||
goto fault;
|
goto fault;
|
||||||
break;
|
break;
|
||||||
case 4:
|
}
|
||||||
|
case 4: {
|
||||||
|
u32 d4;
|
||||||
|
u32 __user *target = (u32 __user *)dst;
|
||||||
|
|
||||||
memcpy(&d4, buf, 4);
|
memcpy(&d4, buf, 4);
|
||||||
if (__put_user(d4, target))
|
if (__put_user(d4, target))
|
||||||
goto fault;
|
goto fault;
|
||||||
break;
|
break;
|
||||||
case 8:
|
}
|
||||||
|
case 8: {
|
||||||
|
u64 d8;
|
||||||
|
u64 __user *target = (u64 __user *)dst;
|
||||||
|
|
||||||
memcpy(&d8, buf, 8);
|
memcpy(&d8, buf, 8);
|
||||||
if (__put_user(d8, target))
|
if (__put_user(d8, target))
|
||||||
goto fault;
|
goto fault;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||||
return ES_UNSUPPORTED;
|
return ES_UNSUPPORTED;
|
||||||
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
|||||||
char *src, char *buf, size_t size)
|
char *src, char *buf, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long error_code = X86_PF_PROT;
|
unsigned long error_code = X86_PF_PROT;
|
||||||
char __user *s = (char __user *)src;
|
|
||||||
u64 d8;
|
|
||||||
u32 d4;
|
|
||||||
u16 d2;
|
|
||||||
u8 d1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function uses __get_user() independent of whether kernel or user
|
* This function uses __get_user() independent of whether kernel or user
|
||||||
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
|||||||
* instructions here would cause infinite nesting.
|
* instructions here would cause infinite nesting.
|
||||||
*/
|
*/
|
||||||
switch (size) {
|
switch (size) {
|
||||||
case 1:
|
case 1: {
|
||||||
|
u8 d1;
|
||||||
|
u8 __user *s = (u8 __user *)src;
|
||||||
|
|
||||||
if (__get_user(d1, s))
|
if (__get_user(d1, s))
|
||||||
goto fault;
|
goto fault;
|
||||||
memcpy(buf, &d1, 1);
|
memcpy(buf, &d1, 1);
|
||||||
break;
|
break;
|
||||||
case 2:
|
}
|
||||||
|
case 2: {
|
||||||
|
u16 d2;
|
||||||
|
u16 __user *s = (u16 __user *)src;
|
||||||
|
|
||||||
if (__get_user(d2, s))
|
if (__get_user(d2, s))
|
||||||
goto fault;
|
goto fault;
|
||||||
memcpy(buf, &d2, 2);
|
memcpy(buf, &d2, 2);
|
||||||
break;
|
break;
|
||||||
case 4:
|
}
|
||||||
|
case 4: {
|
||||||
|
u32 d4;
|
||||||
|
u32 __user *s = (u32 __user *)src;
|
||||||
|
|
||||||
if (__get_user(d4, s))
|
if (__get_user(d4, s))
|
||||||
goto fault;
|
goto fault;
|
||||||
memcpy(buf, &d4, 4);
|
memcpy(buf, &d4, 4);
|
||||||
break;
|
break;
|
||||||
case 8:
|
}
|
||||||
|
case 8: {
|
||||||
|
u64 d8;
|
||||||
|
u64 __user *s = (u64 __user *)src;
|
||||||
if (__get_user(d8, s))
|
if (__get_user(d8, s))
|
||||||
goto fault;
|
goto fault;
|
||||||
memcpy(buf, &d8, 8);
|
memcpy(buf, &d8, 8);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||||
return ES_UNSUPPORTED;
|
return ES_UNSUPPORTED;
|
||||||
|
|||||||
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
|
|||||||
|
|
||||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||||
|
|
||||||
|
static void __init tsc_disable_clocksource_watchdog(void)
|
||||||
|
{
|
||||||
|
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||||
|
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init check_system_tsc_reliable(void)
|
static void __init check_system_tsc_reliable(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
||||||
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
|
|||||||
#endif
|
#endif
|
||||||
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
||||||
tsc_clocksource_reliable = 1;
|
tsc_clocksource_reliable = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable the clocksource watchdog when the system has:
|
||||||
|
* - TSC running at constant frequency
|
||||||
|
* - TSC which does not stop in C-States
|
||||||
|
* - the TSC_ADJUST register which allows to detect even minimal
|
||||||
|
* modifications
|
||||||
|
* - not more than two sockets. As the number of sockets cannot be
|
||||||
|
* evaluated at the early boot stage where this has to be
|
||||||
|
* invoked, check the number of online memory nodes as a
|
||||||
|
* fallback solution which is an reasonable estimate.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
|
||||||
|
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
|
||||||
|
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
|
||||||
|
nr_online_nodes <= 2)
|
||||||
|
tsc_disable_clocksource_watchdog();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
|
|||||||
if (tsc_unstable)
|
if (tsc_unstable)
|
||||||
goto unreg;
|
goto unreg;
|
||||||
|
|
||||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
|
||||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||||
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||||
|
|
||||||
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
tsc_disable_clocksource_watchdog();
|
||||||
|
|
||||||
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
||||||
detect_art();
|
detect_art();
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ struct tsc_adjust {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
||||||
|
static struct timer_list tsc_sync_check_timer;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TSC's on different sockets may be reset asynchronously.
|
* TSC's on different sockets may be reset asynchronously.
|
||||||
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Normally the tsc_sync will be checked every time system enters idle
|
||||||
|
* state, but there is still caveat that a system won't enter idle,
|
||||||
|
* either because it's too busy or configured purposely to not enter
|
||||||
|
* idle.
|
||||||
|
*
|
||||||
|
* So setup a periodic timer (every 10 minutes) to make sure the check
|
||||||
|
* is always on.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SYNC_CHECK_INTERVAL (HZ * 600)
|
||||||
|
|
||||||
|
static void tsc_sync_check_timer_fn(struct timer_list *unused)
|
||||||
|
{
|
||||||
|
int next_cpu;
|
||||||
|
|
||||||
|
tsc_verify_tsc_adjust(false);
|
||||||
|
|
||||||
|
/* Run the check for all onlined CPUs in turn */
|
||||||
|
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
||||||
|
if (next_cpu >= nr_cpu_ids)
|
||||||
|
next_cpu = cpumask_first(cpu_online_mask);
|
||||||
|
|
||||||
|
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
|
||||||
|
add_timer_on(&tsc_sync_check_timer, next_cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init start_sync_check_timer(void)
|
||||||
|
{
|
||||||
|
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
|
||||||
|
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
|
||||||
|
add_timer(&tsc_sync_check_timer);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
late_initcall(start_sync_check_timer);
|
||||||
|
|
||||||
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
||||||
unsigned int cpu, bool bootcpu)
|
unsigned int cpu, bool bootcpu)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -81,7 +81,6 @@ struct kvm_ioapic {
|
|||||||
unsigned long irq_states[IOAPIC_NUM_PINS];
|
unsigned long irq_states[IOAPIC_NUM_PINS];
|
||||||
struct kvm_io_device dev;
|
struct kvm_io_device dev;
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
void (*ack_notifier)(void *opaque, int irq);
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct rtc_status rtc_status;
|
struct rtc_status rtc_status;
|
||||||
struct delayed_work eoi_inject;
|
struct delayed_work eoi_inject;
|
||||||
|
|||||||
@@ -56,7 +56,6 @@ struct kvm_pic {
|
|||||||
struct kvm_io_device dev_master;
|
struct kvm_io_device dev_master;
|
||||||
struct kvm_io_device dev_slave;
|
struct kvm_io_device dev_slave;
|
||||||
struct kvm_io_device dev_elcr;
|
struct kvm_io_device dev_elcr;
|
||||||
void (*ack_notifier)(void *opaque, int irq);
|
|
||||||
unsigned long irq_states[PIC_NUM_PINS];
|
unsigned long irq_states[PIC_NUM_PINS];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
|
|||||||
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
||||||
{
|
{
|
||||||
int highest_irr;
|
int highest_irr;
|
||||||
if (apic->vcpu->arch.apicv_active)
|
if (kvm_x86_ops.sync_pir_to_irr)
|
||||||
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
|
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
|
||||||
else
|
else
|
||||||
highest_irr = apic_find_highest_irr(apic);
|
highest_irr = apic_find_highest_irr(apic);
|
||||||
|
|||||||
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
|
|||||||
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
|
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
|
||||||
|
|
||||||
if (is_tdp_mmu_enabled(kvm))
|
if (is_tdp_mmu_enabled(kvm))
|
||||||
flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
|
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
|
||||||
|
|
||||||
return flush;
|
return flush;
|
||||||
}
|
}
|
||||||
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
|
|||||||
|
|
||||||
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
{
|
{
|
||||||
return sp->role.invalid ||
|
if (sp->role.invalid)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* TDP MMU pages due not use the MMU generation. */
|
||||||
|
return !sp->tdp_mmu_page &&
|
||||||
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
|
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
|
|||||||
iterator->shadow_addr = root;
|
iterator->shadow_addr = root;
|
||||||
iterator->level = vcpu->arch.mmu->shadow_root_level;
|
iterator->level = vcpu->arch.mmu->shadow_root_level;
|
||||||
|
|
||||||
if (iterator->level == PT64_ROOT_4LEVEL &&
|
if (iterator->level >= PT64_ROOT_4LEVEL &&
|
||||||
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
|
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
|
||||||
!vcpu->arch.mmu->direct_map)
|
!vcpu->arch.mmu->direct_map)
|
||||||
--iterator->level;
|
iterator->level = PT32E_ROOT_LEVEL;
|
||||||
|
|
||||||
if (iterator->level == PT32E_ROOT_LEVEL) {
|
if (iterator->level == PT32E_ROOT_LEVEL) {
|
||||||
/*
|
/*
|
||||||
@@ -3976,6 +3980,20 @@ out_retry:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns true if the page fault is stale and needs to be retried, i.e. if the
|
||||||
|
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
|
||||||
|
*/
|
||||||
|
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
|
||||||
|
struct kvm_page_fault *fault, int mmu_seq)
|
||||||
|
{
|
||||||
|
if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return fault->slot &&
|
||||||
|
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
|
||||||
|
}
|
||||||
|
|
||||||
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||||
{
|
{
|
||||||
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
|
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
|
||||||
@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
|||||||
else
|
else
|
||||||
write_lock(&vcpu->kvm->mmu_lock);
|
write_lock(&vcpu->kvm->mmu_lock);
|
||||||
|
|
||||||
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
|
if (is_page_fault_stale(vcpu, fault, mmu_seq))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
r = make_mmu_pages_available(vcpu);
|
r = make_mmu_pages_available(vcpu);
|
||||||
if (r)
|
if (r)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@@ -4855,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
|
|||||||
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
|
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
|
||||||
struct kvm_mmu_role_regs regs = {
|
struct kvm_mmu_role_regs regs = {
|
||||||
.cr0 = cr0,
|
.cr0 = cr0,
|
||||||
.cr4 = cr4,
|
.cr4 = cr4 & ~X86_CR4_PKE,
|
||||||
.efer = efer,
|
.efer = efer,
|
||||||
};
|
};
|
||||||
union kvm_mmu_role new_role;
|
union kvm_mmu_role new_role;
|
||||||
@@ -4919,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
|||||||
context->direct_map = false;
|
context->direct_map = false;
|
||||||
|
|
||||||
update_permission_bitmask(context, true);
|
update_permission_bitmask(context, true);
|
||||||
update_pkru_bitmask(context);
|
context->pkru_mask = 0;
|
||||||
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
|
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
|
||||||
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
|
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
|
||||||
}
|
}
|
||||||
@@ -5025,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||||||
/*
|
/*
|
||||||
* Invalidate all MMU roles to force them to reinitialize as CPUID
|
* Invalidate all MMU roles to force them to reinitialize as CPUID
|
||||||
* information is factored into reserved bit calculations.
|
* information is factored into reserved bit calculations.
|
||||||
|
*
|
||||||
|
* Correctly handling multiple vCPU models with respect to paging and
|
||||||
|
* physical address properties) in a single VM would require tracking
|
||||||
|
* all relevant CPUID information in kvm_mmu_page_role. That is very
|
||||||
|
* undesirable as it would increase the memory requirements for
|
||||||
|
* gfn_track (see struct kvm_mmu_page_role comments). For now that
|
||||||
|
* problem is swept under the rug; KVM's CPUID API is horrific and
|
||||||
|
* it's all but impossible to solve it without introducing a new API.
|
||||||
*/
|
*/
|
||||||
vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
|
vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
|
||||||
vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
|
vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
|
||||||
@@ -5032,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KVM does not correctly handle changing guest CPUID after KVM_RUN, as
|
* Changing guest CPUID after KVM_RUN is forbidden, see the comment in
|
||||||
* MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
|
* kvm_arch_vcpu_ioctl().
|
||||||
* tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
|
|
||||||
* faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
|
|
||||||
* sweep the problem under the rug.
|
|
||||||
*
|
|
||||||
* KVM's horrific CPUID ABI makes the problem all but impossible to
|
|
||||||
* solve, as correctly handling multiple vCPU models (with respect to
|
|
||||||
* paging and physical address properties) in a single VM would require
|
|
||||||
* tracking all relevant CPUID information in kvm_mmu_page_role. That
|
|
||||||
* is very undesirable as it would double the memory requirements for
|
|
||||||
* gfn_track (see struct kvm_mmu_page_role comments), and in practice
|
|
||||||
* no sane VMM mucks with the core vCPU model on the fly.
|
|
||||||
*/
|
*/
|
||||||
if (vcpu->arch.last_vmentry_cpu != -1) {
|
KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
|
||||||
pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
|
|
||||||
pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
|
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
|
||||||
@@ -5369,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||||||
|
|
||||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
{
|
{
|
||||||
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
|
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
|
||||||
++vcpu->stat.invlpg;
|
++vcpu->stat.invlpg;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
||||||
@@ -5854,8 +5867,6 @@ restart:
|
|||||||
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *slot)
|
const struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
bool flush = false;
|
|
||||||
|
|
||||||
if (kvm_memslots_have_rmaps(kvm)) {
|
if (kvm_memslots_have_rmaps(kvm)) {
|
||||||
write_lock(&kvm->mmu_lock);
|
write_lock(&kvm->mmu_lock);
|
||||||
/*
|
/*
|
||||||
@@ -5863,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
|||||||
* logging at a 4k granularity and never creates collapsible
|
* logging at a 4k granularity and never creates collapsible
|
||||||
* 2m SPTEs during dirty logging.
|
* 2m SPTEs during dirty logging.
|
||||||
*/
|
*/
|
||||||
flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
|
if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
|
||||||
if (flush)
|
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
|
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_tdp_mmu_enabled(kvm)) {
|
if (is_tdp_mmu_enabled(kvm)) {
|
||||||
read_lock(&kvm->mmu_lock);
|
read_lock(&kvm->mmu_lock);
|
||||||
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
|
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
|
||||||
if (flush)
|
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
|
|
||||||
read_unlock(&kvm->mmu_lock);
|
read_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -6182,23 +6190,46 @@ void kvm_mmu_module_exit(void)
|
|||||||
mmu_audit_disable();
|
mmu_audit_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the effective recovery period, accounting for '0' meaning "let KVM
|
||||||
|
* select a halving time of 1 hour". Returns true if recovery is enabled.
|
||||||
|
*/
|
||||||
|
static bool calc_nx_huge_pages_recovery_period(uint *period)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Use READ_ONCE to get the params, this may be called outside of the
|
||||||
|
* param setters, e.g. by the kthread to compute its next timeout.
|
||||||
|
*/
|
||||||
|
bool enabled = READ_ONCE(nx_huge_pages);
|
||||||
|
uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
|
||||||
|
|
||||||
|
if (!enabled || !ratio)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
|
||||||
|
if (!*period) {
|
||||||
|
/* Make sure the period is not less than one second. */
|
||||||
|
ratio = min(ratio, 3600u);
|
||||||
|
*period = 60 * 60 * 1000 / ratio;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
|
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
bool was_recovery_enabled, is_recovery_enabled;
|
bool was_recovery_enabled, is_recovery_enabled;
|
||||||
uint old_period, new_period;
|
uint old_period, new_period;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
was_recovery_enabled = nx_huge_pages_recovery_ratio;
|
was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
|
||||||
old_period = nx_huge_pages_recovery_period_ms;
|
|
||||||
|
|
||||||
err = param_set_uint(val, kp);
|
err = param_set_uint(val, kp);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
is_recovery_enabled = nx_huge_pages_recovery_ratio;
|
is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
|
||||||
new_period = nx_huge_pages_recovery_period_ms;
|
|
||||||
|
|
||||||
if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
|
if (is_recovery_enabled &&
|
||||||
(!was_recovery_enabled || old_period > new_period)) {
|
(!was_recovery_enabled || old_period > new_period)) {
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
|
|
||||||
@@ -6262,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|||||||
|
|
||||||
static long get_nx_lpage_recovery_timeout(u64 start_time)
|
static long get_nx_lpage_recovery_timeout(u64 start_time)
|
||||||
{
|
{
|
||||||
uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
|
bool enabled;
|
||||||
uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
|
uint period;
|
||||||
|
|
||||||
if (!period && ratio) {
|
enabled = calc_nx_huge_pages_recovery_period(&period);
|
||||||
/* Make sure the period is not less than one second. */
|
|
||||||
ratio = min(ratio, 3600u);
|
|
||||||
period = 60 * 60 * 1000 / ratio;
|
|
||||||
}
|
|
||||||
|
|
||||||
return READ_ONCE(nx_huge_pages) && ratio
|
return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
|
||||||
? start_time + msecs_to_jiffies(period) - get_jiffies_64()
|
: MAX_SCHEDULE_TIMEOUT;
|
||||||
: MAX_SCHEDULE_TIMEOUT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
|
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
|
||||||
|
|||||||
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
|||||||
|
|
||||||
r = RET_PF_RETRY;
|
r = RET_PF_RETRY;
|
||||||
write_lock(&vcpu->kvm->mmu_lock);
|
write_lock(&vcpu->kvm->mmu_lock);
|
||||||
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
|
|
||||||
|
if (is_page_fault_stale(vcpu, fault, mmu_seq))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
|
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
|
||||||
|
|||||||
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
|
|||||||
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
|
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
|
||||||
int level = sp->role.level;
|
int level = sp->role.level;
|
||||||
gfn_t base_gfn = sp->gfn;
|
gfn_t base_gfn = sp->gfn;
|
||||||
u64 old_child_spte;
|
|
||||||
u64 *sptep;
|
|
||||||
gfn_t gfn;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
trace_kvm_mmu_prepare_zap_page(sp);
|
trace_kvm_mmu_prepare_zap_page(sp);
|
||||||
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
|
|||||||
tdp_mmu_unlink_page(kvm, sp, shared);
|
tdp_mmu_unlink_page(kvm, sp, shared);
|
||||||
|
|
||||||
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
||||||
sptep = rcu_dereference(pt) + i;
|
u64 *sptep = rcu_dereference(pt) + i;
|
||||||
gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
|
gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
|
||||||
|
u64 old_child_spte;
|
||||||
|
|
||||||
if (shared) {
|
if (shared) {
|
||||||
/*
|
/*
|
||||||
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
|
|||||||
shared);
|
shared);
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_flush_remote_tlbs_with_address(kvm, gfn,
|
kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
|
||||||
KVM_PAGES_PER_HPAGE(level + 1));
|
KVM_PAGES_PER_HPAGE(level + 1));
|
||||||
|
|
||||||
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
||||||
@@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
|
|||||||
{
|
{
|
||||||
struct kvm_mmu_page *root;
|
struct kvm_mmu_page *root;
|
||||||
|
|
||||||
for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
|
for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
|
||||||
flush |= zap_gfn_range(kvm, root, range->start, range->end,
|
flush = zap_gfn_range(kvm, root, range->start, range->end,
|
||||||
range->may_block, flush, false);
|
range->may_block, flush, false);
|
||||||
|
|
||||||
return flush;
|
return flush;
|
||||||
}
|
}
|
||||||
@@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||||||
* Clear leaf entries which could be replaced by large mappings, for
|
* Clear leaf entries which could be replaced by large mappings, for
|
||||||
* GFNs within the slot.
|
* GFNs within the slot.
|
||||||
*/
|
*/
|
||||||
static bool zap_collapsible_spte_range(struct kvm *kvm,
|
static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||||
struct kvm_mmu_page *root,
|
struct kvm_mmu_page *root,
|
||||||
const struct kvm_memory_slot *slot,
|
const struct kvm_memory_slot *slot)
|
||||||
bool flush)
|
|
||||||
{
|
{
|
||||||
gfn_t start = slot->base_gfn;
|
gfn_t start = slot->base_gfn;
|
||||||
gfn_t end = start + slot->npages;
|
gfn_t end = start + slot->npages;
|
||||||
@@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
|
|||||||
|
|
||||||
tdp_root_for_each_pte(iter, root, start, end) {
|
tdp_root_for_each_pte(iter, root, start, end) {
|
||||||
retry:
|
retry:
|
||||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
|
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
|
||||||
flush = false;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_shadow_present_pte(iter.old_spte) ||
|
if (!is_shadow_present_pte(iter.old_spte) ||
|
||||||
!is_last_spte(iter.old_spte, iter.level))
|
!is_last_spte(iter.old_spte, iter.level))
|
||||||
@@ -1393,6 +1388,7 @@ retry:
|
|||||||
pfn, PG_LEVEL_NUM))
|
pfn, PG_LEVEL_NUM))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* Note, a successful atomic zap also does a remote TLB flush. */
|
||||||
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
|
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
|
||||||
/*
|
/*
|
||||||
* The iter must explicitly re-read the SPTE because
|
* The iter must explicitly re-read the SPTE because
|
||||||
@@ -1401,30 +1397,24 @@ retry:
|
|||||||
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
|
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
flush = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return flush;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear non-leaf entries (and free associated page tables) which could
|
* Clear non-leaf entries (and free associated page tables) which could
|
||||||
* be replaced by large mappings, for GFNs within the slot.
|
* be replaced by large mappings, for GFNs within the slot.
|
||||||
*/
|
*/
|
||||||
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *slot,
|
const struct kvm_memory_slot *slot)
|
||||||
bool flush)
|
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *root;
|
struct kvm_mmu_page *root;
|
||||||
|
|
||||||
lockdep_assert_held_read(&kvm->mmu_lock);
|
lockdep_assert_held_read(&kvm->mmu_lock);
|
||||||
|
|
||||||
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
|
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
|
||||||
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
|
zap_collapsible_spte_range(kvm, root, slot);
|
||||||
|
|
||||||
return flush;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||||||
struct kvm_memory_slot *slot,
|
struct kvm_memory_slot *slot,
|
||||||
gfn_t gfn, unsigned long mask,
|
gfn_t gfn, unsigned long mask,
|
||||||
bool wrprot);
|
bool wrprot);
|
||||||
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *slot,
|
const struct kvm_memory_slot *slot);
|
||||||
bool flush);
|
|
||||||
|
|
||||||
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot, gfn_t gfn,
|
struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
|
|||||||
@@ -900,6 +900,7 @@ out:
|
|||||||
bool svm_check_apicv_inhibit_reasons(ulong bit)
|
bool svm_check_apicv_inhibit_reasons(ulong bit)
|
||||||
{
|
{
|
||||||
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
|
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
|
||||||
|
BIT(APICV_INHIBIT_REASON_ABSENT) |
|
||||||
BIT(APICV_INHIBIT_REASON_HYPERV) |
|
BIT(APICV_INHIBIT_REASON_HYPERV) |
|
||||||
BIT(APICV_INHIBIT_REASON_NESTED) |
|
BIT(APICV_INHIBIT_REASON_NESTED) |
|
||||||
BIT(APICV_INHIBIT_REASON_IRQWIN) |
|
BIT(APICV_INHIBIT_REASON_IRQWIN) |
|
||||||
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
|
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
int cpu = get_cpu();
|
||||||
|
|
||||||
|
WARN_ON(cpu != vcpu->cpu);
|
||||||
svm->avic_is_running = is_run;
|
svm->avic_is_running = is_run;
|
||||||
|
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (kvm_vcpu_apicv_active(vcpu)) {
|
||||||
return;
|
if (is_run)
|
||||||
|
avic_vcpu_load(vcpu, cpu);
|
||||||
if (is_run)
|
else
|
||||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
avic_vcpu_put(vcpu);
|
||||||
else
|
}
|
||||||
avic_vcpu_put(vcpu);
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
|
||||||
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
|
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||||
|
|||||||
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
|
|||||||
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
|
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
|
||||||
|
|
||||||
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
|
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
|
||||||
pmu->reserved_bits = 0xffffffff00200000ull;
|
pmu->reserved_bits = 0xfffffff000280000ull;
|
||||||
pmu->version = 1;
|
pmu->version = 1;
|
||||||
/* not applicable to AMD; but clean them to prevent any fall out */
|
/* not applicable to AMD; but clean them to prevent any fall out */
|
||||||
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
||||||
|
|||||||
@@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sev_lock_for_migration(struct kvm *kvm)
|
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
|
||||||
{
|
{
|
||||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
|
||||||
|
struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
|
||||||
|
int r = -EBUSY;
|
||||||
|
|
||||||
|
if (dst_kvm == src_kvm)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bail if this VM is already involved in a migration to avoid deadlock
|
* Bail if these VMs are already involved in a migration to avoid
|
||||||
* between two VMs trying to migrate to/from each other.
|
* deadlock between two VMs trying to migrate to/from each other.
|
||||||
*/
|
*/
|
||||||
if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
|
if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
|
||||||
|
goto release_dst;
|
||||||
|
|
||||||
|
r = -EINTR;
|
||||||
|
if (mutex_lock_killable(&dst_kvm->lock))
|
||||||
|
goto release_src;
|
||||||
|
if (mutex_lock_killable(&src_kvm->lock))
|
||||||
|
goto unlock_dst;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
unlock_dst:
|
||||||
|
mutex_unlock(&dst_kvm->lock);
|
||||||
|
release_src:
|
||||||
|
atomic_set_release(&src_sev->migration_in_progress, 0);
|
||||||
|
release_dst:
|
||||||
|
atomic_set_release(&dst_sev->migration_in_progress, 0);
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sev_unlock_after_migration(struct kvm *kvm)
|
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
|
||||||
{
|
{
|
||||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
|
||||||
|
struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
|
||||||
|
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&dst_kvm->lock);
|
||||||
atomic_set_release(&sev->migration_in_progress, 0);
|
mutex_unlock(&src_kvm->lock);
|
||||||
|
atomic_set_release(&dst_sev->migration_in_progress, 0);
|
||||||
|
atomic_set_release(&src_sev->migration_in_progress, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
|
|||||||
dst->asid = src->asid;
|
dst->asid = src->asid;
|
||||||
dst->handle = src->handle;
|
dst->handle = src->handle;
|
||||||
dst->pages_locked = src->pages_locked;
|
dst->pages_locked = src->pages_locked;
|
||||||
|
dst->enc_context_owner = src->enc_context_owner;
|
||||||
|
|
||||||
src->asid = 0;
|
src->asid = 0;
|
||||||
src->active = false;
|
src->active = false;
|
||||||
src->handle = 0;
|
src->handle = 0;
|
||||||
src->pages_locked = 0;
|
src->pages_locked = 0;
|
||||||
|
src->enc_context_owner = NULL;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dst->regions_list);
|
list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
|
||||||
list_replace_init(&src->regions_list, &dst->regions_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
|
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
|
||||||
@@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
|
|||||||
bool charged = false;
|
bool charged = false;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = sev_lock_for_migration(kvm);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (sev_guest(kvm)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
source_kvm_file = fget(source_fd);
|
source_kvm_file = fget(source_fd);
|
||||||
if (!file_is_kvm(source_kvm_file)) {
|
if (!file_is_kvm(source_kvm_file)) {
|
||||||
ret = -EBADF;
|
ret = -EBADF;
|
||||||
@@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
source_kvm = source_kvm_file->private_data;
|
source_kvm = source_kvm_file->private_data;
|
||||||
ret = sev_lock_for_migration(source_kvm);
|
ret = sev_lock_two_vms(kvm, source_kvm);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_fput;
|
goto out_fput;
|
||||||
|
|
||||||
if (!sev_guest(source_kvm)) {
|
if (sev_guest(kvm) || !sev_guest(source_kvm)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_source;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
src_sev = &to_kvm_svm(source_kvm)->sev_info;
|
src_sev = &to_kvm_svm(source_kvm)->sev_info;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VMs mirroring src's encryption context rely on it to keep the
|
||||||
|
* ASID allocated, but below we are clearing src_sev->asid.
|
||||||
|
*/
|
||||||
|
if (src_sev->num_mirrored_vms) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
dst_sev->misc_cg = get_current_misc_cg();
|
dst_sev->misc_cg = get_current_misc_cg();
|
||||||
cg_cleanup_sev = dst_sev;
|
cg_cleanup_sev = dst_sev;
|
||||||
if (dst_sev->misc_cg != src_sev->misc_cg) {
|
if (dst_sev->misc_cg != src_sev->misc_cg) {
|
||||||
@@ -1728,13 +1752,11 @@ out_dst_cgroup:
|
|||||||
sev_misc_cg_uncharge(cg_cleanup_sev);
|
sev_misc_cg_uncharge(cg_cleanup_sev);
|
||||||
put_misc_cg(cg_cleanup_sev->misc_cg);
|
put_misc_cg(cg_cleanup_sev->misc_cg);
|
||||||
cg_cleanup_sev->misc_cg = NULL;
|
cg_cleanup_sev->misc_cg = NULL;
|
||||||
out_source:
|
out_unlock:
|
||||||
sev_unlock_after_migration(source_kvm);
|
sev_unlock_two_vms(kvm, source_kvm);
|
||||||
out_fput:
|
out_fput:
|
||||||
if (source_kvm_file)
|
if (source_kvm_file)
|
||||||
fput(source_kvm_file);
|
fput(source_kvm_file);
|
||||||
out_unlock:
|
|
||||||
sev_unlock_after_migration(kvm);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
|||||||
{
|
{
|
||||||
struct file *source_kvm_file;
|
struct file *source_kvm_file;
|
||||||
struct kvm *source_kvm;
|
struct kvm *source_kvm;
|
||||||
struct kvm_sev_info source_sev, *mirror_sev;
|
struct kvm_sev_info *source_sev, *mirror_sev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
source_kvm_file = fget(source_fd);
|
source_kvm_file = fget(source_fd);
|
||||||
if (!file_is_kvm(source_kvm_file)) {
|
if (!file_is_kvm(source_kvm_file)) {
|
||||||
ret = -EBADF;
|
ret = -EBADF;
|
||||||
goto e_source_put;
|
goto e_source_fput;
|
||||||
}
|
}
|
||||||
|
|
||||||
source_kvm = source_kvm_file->private_data;
|
source_kvm = source_kvm_file->private_data;
|
||||||
mutex_lock(&source_kvm->lock);
|
ret = sev_lock_two_vms(kvm, source_kvm);
|
||||||
|
if (ret)
|
||||||
|
goto e_source_fput;
|
||||||
|
|
||||||
if (!sev_guest(source_kvm)) {
|
/*
|
||||||
|
* Mirrors of mirrors should work, but let's not get silly. Also
|
||||||
|
* disallow out-of-band SEV/SEV-ES init if the target is already an
|
||||||
|
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
|
||||||
|
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
|
||||||
|
*/
|
||||||
|
if (sev_guest(kvm) || !sev_guest(source_kvm) ||
|
||||||
|
is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto e_source_unlock;
|
goto e_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mirrors of mirrors should work, but let's not get silly */
|
|
||||||
if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto e_source_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
|
|
||||||
sizeof(source_sev));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
||||||
* disappear until we're done with it
|
* disappear until we're done with it
|
||||||
*/
|
*/
|
||||||
|
source_sev = &to_kvm_svm(source_kvm)->sev_info;
|
||||||
kvm_get_kvm(source_kvm);
|
kvm_get_kvm(source_kvm);
|
||||||
|
source_sev->num_mirrored_vms++;
|
||||||
fput(source_kvm_file);
|
|
||||||
mutex_unlock(&source_kvm->lock);
|
|
||||||
mutex_lock(&kvm->lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Disallow out-of-band SEV/SEV-ES init if the target is already an
|
|
||||||
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
|
|
||||||
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
|
|
||||||
*/
|
|
||||||
if (sev_guest(kvm) || kvm->created_vcpus) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto e_mirror_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set enc_context_owner and copy its encryption context over */
|
/* Set enc_context_owner and copy its encryption context over */
|
||||||
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
||||||
mirror_sev->enc_context_owner = source_kvm;
|
mirror_sev->enc_context_owner = source_kvm;
|
||||||
mirror_sev->active = true;
|
mirror_sev->active = true;
|
||||||
mirror_sev->asid = source_sev.asid;
|
mirror_sev->asid = source_sev->asid;
|
||||||
mirror_sev->fd = source_sev.fd;
|
mirror_sev->fd = source_sev->fd;
|
||||||
mirror_sev->es_active = source_sev.es_active;
|
mirror_sev->es_active = source_sev->es_active;
|
||||||
mirror_sev->handle = source_sev.handle;
|
mirror_sev->handle = source_sev->handle;
|
||||||
|
INIT_LIST_HEAD(&mirror_sev->regions_list);
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not copy ap_jump_table. Since the mirror does not share the same
|
* Do not copy ap_jump_table. Since the mirror does not share the same
|
||||||
* KVM contexts as the original, and they may have different
|
* KVM contexts as the original, and they may have different
|
||||||
* memory-views.
|
* memory-views.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mutex_unlock(&kvm->lock);
|
e_unlock:
|
||||||
return 0;
|
sev_unlock_two_vms(kvm, source_kvm);
|
||||||
|
e_source_fput:
|
||||||
e_mirror_unlock:
|
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
kvm_put_kvm(source_kvm);
|
|
||||||
return ret;
|
|
||||||
e_source_unlock:
|
|
||||||
mutex_unlock(&source_kvm->lock);
|
|
||||||
e_source_put:
|
|
||||||
if (source_kvm_file)
|
if (source_kvm_file)
|
||||||
fput(source_kvm_file);
|
fput(source_kvm_file);
|
||||||
return ret;
|
return ret;
|
||||||
@@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
|
|||||||
struct list_head *head = &sev->regions_list;
|
struct list_head *head = &sev->regions_list;
|
||||||
struct list_head *pos, *q;
|
struct list_head *pos, *q;
|
||||||
|
|
||||||
|
WARN_ON(sev->num_mirrored_vms);
|
||||||
|
|
||||||
if (!sev_guest(kvm))
|
if (!sev_guest(kvm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
|
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
|
||||||
if (is_mirroring_enc_context(kvm)) {
|
if (is_mirroring_enc_context(kvm)) {
|
||||||
kvm_put_kvm(sev->enc_context_owner);
|
struct kvm *owner_kvm = sev->enc_context_owner;
|
||||||
|
struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
|
||||||
|
|
||||||
|
mutex_lock(&owner_kvm->lock);
|
||||||
|
if (!WARN_ON(!owner_sev->num_mirrored_vms))
|
||||||
|
owner_sev->num_mirrored_vms--;
|
||||||
|
mutex_unlock(&owner_kvm->lock);
|
||||||
|
kvm_put_kvm(owner_kvm);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that all guest tagged cache entries are flushed before
|
* Ensure that all guest tagged cache entries are flushed before
|
||||||
* releasing the pages back to the system for use. CLFLUSH will
|
* releasing the pages back to the system for use. CLFLUSH will
|
||||||
@@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
|
|
||||||
sev_unbind_asid(kvm, sev->handle);
|
sev_unbind_asid(kvm, sev->handle);
|
||||||
sev_asid_free(sev);
|
sev_asid_free(sev);
|
||||||
}
|
}
|
||||||
@@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
__free_page(virt_to_page(svm->sev_es.vmsa));
|
__free_page(virt_to_page(svm->sev_es.vmsa));
|
||||||
|
|
||||||
if (svm->sev_es.ghcb_sa_free)
|
if (svm->sev_es.ghcb_sa_free)
|
||||||
kfree(svm->sev_es.ghcb_sa);
|
kvfree(svm->sev_es.ghcb_sa);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dump_ghcb(struct vcpu_svm *svm)
|
static void dump_ghcb(struct vcpu_svm *svm)
|
||||||
@@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|||||||
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
struct ghcb *ghcb;
|
struct ghcb *ghcb;
|
||||||
u64 exit_code = 0;
|
u64 exit_code;
|
||||||
|
u64 reason;
|
||||||
|
|
||||||
ghcb = svm->sev_es.ghcb;
|
ghcb = svm->sev_es.ghcb;
|
||||||
|
|
||||||
/* Only GHCB Usage code 0 is supported */
|
|
||||||
if (ghcb->ghcb_usage)
|
|
||||||
goto vmgexit_err;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Retrieve the exit code now even though is may not be marked valid
|
* Retrieve the exit code now even though it may not be marked valid
|
||||||
* as it could help with debugging.
|
* as it could help with debugging.
|
||||||
*/
|
*/
|
||||||
exit_code = ghcb_get_sw_exit_code(ghcb);
|
exit_code = ghcb_get_sw_exit_code(ghcb);
|
||||||
|
|
||||||
|
/* Only GHCB Usage code 0 is supported */
|
||||||
|
if (ghcb->ghcb_usage) {
|
||||||
|
reason = GHCB_ERR_INVALID_USAGE;
|
||||||
|
goto vmgexit_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
reason = GHCB_ERR_MISSING_INPUT;
|
||||||
|
|
||||||
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
|
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
|
||||||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
|
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
|
||||||
!ghcb_sw_exit_info_2_is_valid(ghcb))
|
!ghcb_sw_exit_info_2_is_valid(ghcb))
|
||||||
@@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|||||||
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
|
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
reason = GHCB_ERR_INVALID_EVENT;
|
||||||
goto vmgexit_err;
|
goto vmgexit_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return true;
|
||||||
|
|
||||||
vmgexit_err:
|
vmgexit_err:
|
||||||
vcpu = &svm->vcpu;
|
vcpu = &svm->vcpu;
|
||||||
|
|
||||||
if (ghcb->ghcb_usage) {
|
if (reason == GHCB_ERR_INVALID_USAGE) {
|
||||||
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
|
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
|
||||||
ghcb->ghcb_usage);
|
ghcb->ghcb_usage);
|
||||||
|
} else if (reason == GHCB_ERR_INVALID_EVENT) {
|
||||||
|
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
|
||||||
|
exit_code);
|
||||||
} else {
|
} else {
|
||||||
vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
|
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
|
||||||
exit_code);
|
exit_code);
|
||||||
dump_ghcb(svm);
|
dump_ghcb(svm);
|
||||||
}
|
}
|
||||||
|
|
||||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
/* Clear the valid entries fields */
|
||||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
|
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||||
vcpu->run->internal.ndata = 2;
|
|
||||||
vcpu->run->internal.data[0] = exit_code;
|
|
||||||
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
|
|
||||||
|
|
||||||
return -EINVAL;
|
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||||
|
ghcb_set_sw_exit_info_2(ghcb, reason);
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||||
@@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
|||||||
svm->sev_es.ghcb_sa_sync = false;
|
svm->sev_es.ghcb_sa_sync = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(svm->sev_es.ghcb_sa);
|
kvfree(svm->sev_es.ghcb_sa);
|
||||||
svm->sev_es.ghcb_sa = NULL;
|
svm->sev_es.ghcb_sa = NULL;
|
||||||
svm->sev_es.ghcb_sa_free = false;
|
svm->sev_es.ghcb_sa_free = false;
|
||||||
}
|
}
|
||||||
@@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||||||
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
|
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
|
||||||
if (!scratch_gpa_beg) {
|
if (!scratch_gpa_beg) {
|
||||||
pr_err("vmgexit: scratch gpa not provided\n");
|
pr_err("vmgexit: scratch gpa not provided\n");
|
||||||
return false;
|
goto e_scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
scratch_gpa_end = scratch_gpa_beg + len;
|
scratch_gpa_end = scratch_gpa_beg + len;
|
||||||
if (scratch_gpa_end < scratch_gpa_beg) {
|
if (scratch_gpa_end < scratch_gpa_beg) {
|
||||||
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
|
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
|
||||||
len, scratch_gpa_beg);
|
len, scratch_gpa_beg);
|
||||||
return false;
|
goto e_scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
|
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
|
||||||
@@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||||||
scratch_gpa_end > ghcb_scratch_end) {
|
scratch_gpa_end > ghcb_scratch_end) {
|
||||||
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
|
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
|
||||||
scratch_gpa_beg, scratch_gpa_end);
|
scratch_gpa_beg, scratch_gpa_end);
|
||||||
return false;
|
goto e_scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
scratch_va = (void *)svm->sev_es.ghcb;
|
scratch_va = (void *)svm->sev_es.ghcb;
|
||||||
@@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||||||
if (len > GHCB_SCRATCH_AREA_LIMIT) {
|
if (len > GHCB_SCRATCH_AREA_LIMIT) {
|
||||||
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
|
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
|
||||||
len, GHCB_SCRATCH_AREA_LIMIT);
|
len, GHCB_SCRATCH_AREA_LIMIT);
|
||||||
return false;
|
goto e_scratch;
|
||||||
}
|
}
|
||||||
scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
|
scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
|
||||||
if (!scratch_va)
|
if (!scratch_va)
|
||||||
return false;
|
goto e_scratch;
|
||||||
|
|
||||||
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
|
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
|
||||||
/* Unable to copy scratch area from guest */
|
/* Unable to copy scratch area from guest */
|
||||||
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
|
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
|
||||||
|
|
||||||
kfree(scratch_va);
|
kvfree(scratch_va);
|
||||||
return false;
|
goto e_scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||||||
svm->sev_es.ghcb_sa_len = len;
|
svm->sev_es.ghcb_sa_len = len;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
e_scratch:
|
||||||
|
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||||
|
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
|
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
|
||||||
@@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
|
|||||||
|
|
||||||
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
|
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = -EINVAL;
|
/* Error, keep GHCB MSR value as-is */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
|
|||||||
GHCB_MSR_TERM_REASON_POS);
|
GHCB_MSR_TERM_REASON_POS);
|
||||||
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
|
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
|
||||||
reason_set, reason_code);
|
reason_set, reason_code);
|
||||||
fallthrough;
|
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
/* Error, keep GHCB MSR value as-is */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
|
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
|
||||||
@@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
if (!ghcb_gpa) {
|
if (!ghcb_gpa) {
|
||||||
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
|
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
|
||||||
return -EINVAL;
|
|
||||||
|
/* Without a GHCB, just return right back to the guest */
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
|
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
|
||||||
/* Unable to map GHCB from guest */
|
/* Unable to map GHCB from guest */
|
||||||
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
|
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
|
||||||
ghcb_gpa);
|
ghcb_gpa);
|
||||||
return -EINVAL;
|
|
||||||
|
/* Without a GHCB, just return right back to the guest */
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
|
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
|
||||||
@@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
exit_code = ghcb_get_sw_exit_code(ghcb);
|
exit_code = ghcb_get_sw_exit_code(ghcb);
|
||||||
|
|
||||||
ret = sev_es_validate_vmgexit(svm);
|
if (!sev_es_validate_vmgexit(svm))
|
||||||
if (ret)
|
return 1;
|
||||||
return ret;
|
|
||||||
|
|
||||||
sev_es_sync_from_ghcb(svm);
|
sev_es_sync_from_ghcb(svm);
|
||||||
ghcb_set_sw_exit_info_1(ghcb, 0);
|
ghcb_set_sw_exit_info_1(ghcb, 0);
|
||||||
ghcb_set_sw_exit_info_2(ghcb, 0);
|
ghcb_set_sw_exit_info_2(ghcb, 0);
|
||||||
|
|
||||||
ret = -EINVAL;
|
ret = 1;
|
||||||
switch (exit_code) {
|
switch (exit_code) {
|
||||||
case SVM_VMGEXIT_MMIO_READ:
|
case SVM_VMGEXIT_MMIO_READ:
|
||||||
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
|
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
|
||||||
@@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||||||
default:
|
default:
|
||||||
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
|
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
|
||||||
control->exit_info_1);
|
control->exit_info_1);
|
||||||
ghcb_set_sw_exit_info_1(ghcb, 1);
|
ghcb_set_sw_exit_info_1(ghcb, 2);
|
||||||
ghcb_set_sw_exit_info_2(ghcb,
|
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
|
||||||
X86_TRAP_UD |
|
|
||||||
SVM_EVTINJ_TYPE_EXEPT |
|
|
||||||
SVM_EVTINJ_VALID);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = 1;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
|
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
|
||||||
vcpu_unimpl(vcpu,
|
vcpu_unimpl(vcpu,
|
||||||
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
|
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
|
||||||
control->exit_info_1, control->exit_info_2);
|
control->exit_info_1, control->exit_info_2);
|
||||||
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = svm_invoke_exit_handler(vcpu, exit_code);
|
ret = svm_invoke_exit_handler(vcpu, exit_code);
|
||||||
@@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!setup_vmgexit_scratch(svm, in, bytes))
|
if (!setup_vmgexit_scratch(svm, in, bytes))
|
||||||
return -EINVAL;
|
return 1;
|
||||||
|
|
||||||
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
|
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
|
||||||
count, in);
|
count, in);
|
||||||
|
|||||||
@@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.load_eoi_exitmap = svm_load_eoi_exitmap,
|
.load_eoi_exitmap = svm_load_eoi_exitmap,
|
||||||
.hwapic_irr_update = svm_hwapic_irr_update,
|
.hwapic_irr_update = svm_hwapic_irr_update,
|
||||||
.hwapic_isr_update = svm_hwapic_isr_update,
|
.hwapic_isr_update = svm_hwapic_isr_update,
|
||||||
.sync_pir_to_irr = kvm_lapic_find_highest_irr,
|
|
||||||
.apicv_post_state_restore = avic_post_state_restore,
|
.apicv_post_state_restore = avic_post_state_restore,
|
||||||
|
|
||||||
.set_tss_addr = svm_set_tss_addr,
|
.set_tss_addr = svm_set_tss_addr,
|
||||||
|
|||||||
@@ -79,6 +79,7 @@ struct kvm_sev_info {
|
|||||||
struct list_head regions_list; /* List of registered regions */
|
struct list_head regions_list; /* List of registered regions */
|
||||||
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
|
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
|
||||||
struct kvm *enc_context_owner; /* Owner of copied encryption context */
|
struct kvm *enc_context_owner; /* Owner of copied encryption context */
|
||||||
|
unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
|
||||||
struct misc_cg *misc_cg; /* For misc cgroup accounting */
|
struct misc_cg *misc_cg; /* For misc cgroup accounting */
|
||||||
atomic_t migration_in_progress;
|
atomic_t migration_in_progress;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
|
|||||||
WARN_ON(!enable_vpid);
|
WARN_ON(!enable_vpid);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If VPID is enabled and used by vmc12, but L2 does not have a unique
|
* VPID is enabled and in use by vmcs12. If vpid12 is changing, then
|
||||||
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
|
* emulate a guest TLB flush as KVM does not track vpid12 history nor
|
||||||
* a VPID for L2, flush the current context as the effective ASID is
|
* is the VPID incorporated into the MMU context. I.e. KVM must assume
|
||||||
* common to both L1 and L2.
|
* that the new vpid12 has never been used and thus represents a new
|
||||||
*
|
* guest ASID that cannot have entries in the TLB.
|
||||||
* Defer the flush so that it runs after vmcs02.EPTP has been set by
|
|
||||||
* KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
|
|
||||||
* redundant flushes further down the nested pipeline.
|
|
||||||
*
|
|
||||||
* If a TLB flush isn't required due to any of the above, and vpid12 is
|
|
||||||
* changing then the new "virtual" VPID (vpid12) will reuse the same
|
|
||||||
* "real" VPID (vpid02), and so needs to be flushed. There's no direct
|
|
||||||
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
|
|
||||||
* all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
|
|
||||||
* guest-physical mappings, so there is no need to sync the nEPT MMU.
|
|
||||||
*/
|
*/
|
||||||
if (!nested_has_guest_tlb_tag(vcpu)) {
|
if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
|
||||||
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
|
||||||
} else if (is_vmenter &&
|
|
||||||
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
|
|
||||||
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
|
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
|
||||||
vpid_sync_context(nested_get_vpid02(vcpu));
|
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If VPID is enabled, used by vmc12, and vpid12 is not changing but
|
||||||
|
* does not have a unique TLB tag (ASID), i.e. EPT is disabled and
|
||||||
|
* KVM was unable to allocate a VPID for L2, flush the current context
|
||||||
|
* as the effective ASID is common to both L1 and L2.
|
||||||
|
*/
|
||||||
|
if (!nested_has_guest_tlb_tag(vcpu))
|
||||||
|
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
|
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
|
||||||
@@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||||||
|
|
||||||
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
|
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
|
||||||
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
|
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
|
||||||
vmcs12->guest_ia32_perf_global_ctrl)))
|
vmcs12->guest_ia32_perf_global_ctrl))) {
|
||||||
|
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
|
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
|
||||||
kvm_rip_write(vcpu, vmcs12->guest_rip);
|
kvm_rip_write(vcpu, vmcs12->guest_rip);
|
||||||
@@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||||||
};
|
};
|
||||||
u32 failed_index;
|
u32 failed_index;
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
kvm_service_local_tlb_flush_requests(vcpu);
|
||||||
kvm_vcpu_flush_tlb_current(vcpu);
|
|
||||||
|
|
||||||
evaluate_pending_interrupts = exec_controls_get(vmx) &
|
evaluate_pending_interrupts = exec_controls_get(vmx) &
|
||||||
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
|
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
|
||||||
@@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
|||||||
(void)nested_get_evmcs_page(vcpu);
|
(void)nested_get_evmcs_page(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Service the TLB flush request for L2 before switching to L1. */
|
/* Service pending TLB flush requests for L2 before switching to L1. */
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
kvm_service_local_tlb_flush_requests(vcpu);
|
||||||
kvm_vcpu_flush_tlb_current(vcpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
|
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
|
||||||
@@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
|||||||
if (!vmx->nested.cached_vmcs12)
|
if (!vmx->nested.cached_vmcs12)
|
||||||
goto out_cached_vmcs12;
|
goto out_cached_vmcs12;
|
||||||
|
|
||||||
|
vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
|
||||||
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
|
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
|
||||||
if (!vmx->nested.cached_shadow_vmcs12)
|
if (!vmx->nested.cached_shadow_vmcs12)
|
||||||
goto out_cached_shadow_vmcs12;
|
goto out_cached_shadow_vmcs12;
|
||||||
@@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
|||||||
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
|
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
|
||||||
struct vmcs_hdr hdr;
|
struct vmcs_hdr hdr;
|
||||||
|
|
||||||
if (ghc->gpa != vmptr &&
|
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
|
||||||
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
|
|
||||||
/*
|
/*
|
||||||
* Reads from an unbacked page return all 1s,
|
* Reads from an unbacked page return all 1s,
|
||||||
* which means that the 32 bits located at the
|
* which means that the 32 bits located at the
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
#include "lapic.h"
|
#include "lapic.h"
|
||||||
|
#include "irq.h"
|
||||||
#include "posted_intr.h"
|
#include "posted_intr.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "vmx.h"
|
#include "vmx.h"
|
||||||
@@ -77,13 +78,18 @@ after_clear_sn:
|
|||||||
pi_set_on(pi_desc);
|
pi_set_on(pi_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vmx_can_use_vtd_pi(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return irqchip_in_kernel(kvm) && enable_apicv &&
|
||||||
|
kvm_arch_has_assigned_device(kvm) &&
|
||||||
|
irq_remapping_cap(IRQ_POSTING_CAP);
|
||||||
|
}
|
||||||
|
|
||||||
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
|
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
||||||
|
|
||||||
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
|
if (!vmx_can_use_vtd_pi(vcpu->kvm))
|
||||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
|
||||||
!kvm_vcpu_apicv_active(vcpu))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Set SN when the vCPU is preempted */
|
/* Set SN when the vCPU is preempted */
|
||||||
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
|
|||||||
struct pi_desc old, new;
|
struct pi_desc old, new;
|
||||||
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
|
||||||
|
|
||||||
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
|
if (!vmx_can_use_vtd_pi(vcpu->kvm))
|
||||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
|
||||||
!kvm_vcpu_apicv_active(vcpu))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
|
|||||||
struct vcpu_data vcpu_info;
|
struct vcpu_data vcpu_info;
|
||||||
int idx, ret = 0;
|
int idx, ret = 0;
|
||||||
|
|
||||||
if (!kvm_arch_has_assigned_device(kvm) ||
|
if (!vmx_can_use_vtd_pi(kvm))
|
||||||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
|
|
||||||
!kvm_vcpu_apicv_active(kvm->vcpus[0]))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->irq_srcu);
|
idx = srcu_read_lock(&kvm->irq_srcu);
|
||||||
|
|||||||
@@ -2918,6 +2918,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (is_guest_mode(vcpu))
|
||||||
|
return nested_get_vpid02(vcpu);
|
||||||
|
return to_vmx(vcpu)->vpid;
|
||||||
|
}
|
||||||
|
|
||||||
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
|
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||||
@@ -2930,31 +2937,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
|
|||||||
if (enable_ept)
|
if (enable_ept)
|
||||||
ept_sync_context(construct_eptp(vcpu, root_hpa,
|
ept_sync_context(construct_eptp(vcpu, root_hpa,
|
||||||
mmu->shadow_root_level));
|
mmu->shadow_root_level));
|
||||||
else if (!is_guest_mode(vcpu))
|
|
||||||
vpid_sync_context(to_vmx(vcpu)->vpid);
|
|
||||||
else
|
else
|
||||||
vpid_sync_context(nested_get_vpid02(vcpu));
|
vpid_sync_context(vmx_get_current_vpid(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
|
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
|
* vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
|
||||||
* vmx_flush_tlb_guest() for an explanation of why this is ok.
|
* vmx_flush_tlb_guest() for an explanation of why this is ok.
|
||||||
*/
|
*/
|
||||||
vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
|
vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
|
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
|
* vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
|
||||||
* or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
|
* vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
|
||||||
* are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
|
* required to flush GVA->{G,H}PA mappings from the TLB if vpid is
|
||||||
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
|
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
|
||||||
* i.e. no explicit INVVPID is necessary.
|
* i.e. no explicit INVVPID is necessary.
|
||||||
*/
|
*/
|
||||||
vpid_sync_context(to_vmx(vcpu)->vpid);
|
vpid_sync_context(vmx_get_current_vpid(vcpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
||||||
@@ -6262,9 +6267,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
int max_irr;
|
int max_irr;
|
||||||
bool max_irr_updated;
|
bool got_posted_interrupt;
|
||||||
|
|
||||||
if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
|
if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (pi_test_on(&vmx->pi_desc)) {
|
if (pi_test_on(&vmx->pi_desc)) {
|
||||||
@@ -6274,22 +6279,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|||||||
* But on x86 this is just a compiler barrier anyway.
|
* But on x86 this is just a compiler barrier anyway.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
max_irr_updated =
|
got_posted_interrupt =
|
||||||
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
|
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
|
||||||
|
|
||||||
/*
|
|
||||||
* If we are running L2 and L1 has a new pending interrupt
|
|
||||||
* which can be injected, this may cause a vmexit or it may
|
|
||||||
* be injected into L2. Either way, this interrupt will be
|
|
||||||
* processed via KVM_REQ_EVENT, not RVI, because we do not use
|
|
||||||
* virtual interrupt delivery to inject L1 interrupts into L2.
|
|
||||||
*/
|
|
||||||
if (is_guest_mode(vcpu) && max_irr_updated)
|
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
||||||
} else {
|
} else {
|
||||||
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
||||||
|
got_posted_interrupt = false;
|
||||||
}
|
}
|
||||||
vmx_hwapic_irr_update(vcpu, max_irr);
|
|
||||||
|
/*
|
||||||
|
* Newly recognized interrupts are injected via either virtual interrupt
|
||||||
|
* delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
|
||||||
|
* disabled in two cases:
|
||||||
|
*
|
||||||
|
* 1) If L2 is running and the vCPU has a new pending interrupt. If L1
|
||||||
|
* wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
|
||||||
|
* VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
|
||||||
|
* into L2, but KVM doesn't use virtual interrupt delivery to inject
|
||||||
|
* interrupts into L2, and so KVM_REQ_EVENT is again needed.
|
||||||
|
*
|
||||||
|
* 2) If APICv is disabled for this vCPU, assigned devices may still
|
||||||
|
* attempt to post interrupts. The posted interrupt vector will cause
|
||||||
|
* a VM-Exit and the subsequent entry will call sync_pir_to_irr.
|
||||||
|
*/
|
||||||
|
if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
|
||||||
|
vmx_set_rvi(max_irr);
|
||||||
|
else if (got_posted_interrupt)
|
||||||
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
|
|
||||||
return max_irr;
|
return max_irr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7509,6 +7525,7 @@ static void hardware_unsetup(void)
|
|||||||
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
|
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
|
||||||
{
|
{
|
||||||
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
|
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
|
||||||
|
BIT(APICV_INHIBIT_REASON_ABSENT) |
|
||||||
BIT(APICV_INHIBIT_REASON_HYPERV) |
|
BIT(APICV_INHIBIT_REASON_HYPERV) |
|
||||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
|
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
|
||||||
|
|
||||||
@@ -7761,10 +7778,10 @@ static __init int hardware_setup(void)
|
|||||||
ple_window_shrink = 0;
|
ple_window_shrink = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cpu_has_vmx_apicv()) {
|
if (!cpu_has_vmx_apicv())
|
||||||
enable_apicv = 0;
|
enable_apicv = 0;
|
||||||
|
if (!enable_apicv)
|
||||||
vmx_x86_ops.sync_pir_to_irr = NULL;
|
vmx_x86_ops.sync_pir_to_irr = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_has_vmx_tsc_scaling()) {
|
if (cpu_has_vmx_tsc_scaling()) {
|
||||||
kvm_has_tsc_control = true;
|
kvm_has_tsc_control = true;
|
||||||
|
|||||||
@@ -3258,6 +3258,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
|
|||||||
static_call(kvm_x86_tlb_flush_guest)(vcpu);
|
static_call(kvm_x86_tlb_flush_guest)(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
++vcpu->stat.tlb_flush;
|
||||||
|
static_call(kvm_x86_tlb_flush_current)(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Service "local" TLB flush requests, which are specific to the current MMU
|
||||||
|
* context. In addition to the generic event handling in vcpu_enter_guest(),
|
||||||
|
* TLB flushes that are targeted at an MMU context also need to be serviced
|
||||||
|
* prior before nested VM-Enter/VM-Exit.
|
||||||
|
*/
|
||||||
|
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
||||||
|
kvm_vcpu_flush_tlb_current(vcpu);
|
||||||
|
|
||||||
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
|
||||||
|
kvm_vcpu_flush_tlb_guest(vcpu);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
|
||||||
|
|
||||||
static void record_steal_time(struct kvm_vcpu *vcpu)
|
static void record_steal_time(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
|
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
|
||||||
@@ -4133,6 +4156,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||||||
case KVM_CAP_SGX_ATTRIBUTE:
|
case KVM_CAP_SGX_ATTRIBUTE:
|
||||||
#endif
|
#endif
|
||||||
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
|
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
|
||||||
|
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
|
||||||
case KVM_CAP_SREGS2:
|
case KVM_CAP_SREGS2:
|
||||||
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
|
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
|
||||||
case KVM_CAP_VCPU_ATTRIBUTES:
|
case KVM_CAP_VCPU_ATTRIBUTES:
|
||||||
@@ -4448,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|||||||
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_lapic_state *s)
|
struct kvm_lapic_state *s)
|
||||||
{
|
{
|
||||||
if (vcpu->arch.apicv_active)
|
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
|
||||||
|
|
||||||
return kvm_apic_get_state(vcpu, s);
|
return kvm_apic_get_state(vcpu, s);
|
||||||
}
|
}
|
||||||
@@ -5124,6 +5147,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||||||
struct kvm_cpuid __user *cpuid_arg = argp;
|
struct kvm_cpuid __user *cpuid_arg = argp;
|
||||||
struct kvm_cpuid cpuid;
|
struct kvm_cpuid cpuid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* KVM does not correctly handle changing guest CPUID after KVM_RUN, as
|
||||||
|
* MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
|
||||||
|
* tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
|
||||||
|
* faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
|
||||||
|
* the core vCPU model on the fly, so fail.
|
||||||
|
*/
|
||||||
|
r = -EINVAL;
|
||||||
|
if (vcpu->arch.last_vmentry_cpu != -1)
|
||||||
|
goto out;
|
||||||
|
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||||
goto out;
|
goto out;
|
||||||
@@ -5134,6 +5168,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||||||
struct kvm_cpuid2 __user *cpuid_arg = argp;
|
struct kvm_cpuid2 __user *cpuid_arg = argp;
|
||||||
struct kvm_cpuid2 cpuid;
|
struct kvm_cpuid2 cpuid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
|
||||||
|
* KVM_SET_CPUID case above.
|
||||||
|
*/
|
||||||
|
r = -EINVAL;
|
||||||
|
if (vcpu->arch.last_vmentry_cpu != -1)
|
||||||
|
goto out;
|
||||||
|
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||||
goto out;
|
goto out;
|
||||||
@@ -5698,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
|||||||
smp_wmb();
|
smp_wmb();
|
||||||
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
|
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
|
||||||
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
|
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
|
||||||
|
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
|
||||||
r = 0;
|
r = 0;
|
||||||
split_irqchip_unlock:
|
split_irqchip_unlock:
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
@@ -6078,6 +6121,7 @@ set_identity_unlock:
|
|||||||
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
|
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
|
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
|
||||||
|
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
|
||||||
create_irqchip_unlock:
|
create_irqchip_unlock:
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
break;
|
break;
|
||||||
@@ -8776,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm)
|
|||||||
{
|
{
|
||||||
init_rwsem(&kvm->arch.apicv_update_lock);
|
init_rwsem(&kvm->arch.apicv_update_lock);
|
||||||
|
|
||||||
if (enable_apicv)
|
set_bit(APICV_INHIBIT_REASON_ABSENT,
|
||||||
clear_bit(APICV_INHIBIT_REASON_DISABLE,
|
&kvm->arch.apicv_inhibit_reasons);
|
||||||
&kvm->arch.apicv_inhibit_reasons);
|
if (!enable_apicv)
|
||||||
else
|
|
||||||
set_bit(APICV_INHIBIT_REASON_DISABLE,
|
set_bit(APICV_INHIBIT_REASON_DISABLE,
|
||||||
&kvm->arch.apicv_inhibit_reasons);
|
&kvm->arch.apicv_inhibit_reasons);
|
||||||
}
|
}
|
||||||
@@ -9528,8 +9571,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
|||||||
if (irqchip_split(vcpu->kvm))
|
if (irqchip_split(vcpu->kvm))
|
||||||
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||||
else {
|
else {
|
||||||
if (vcpu->arch.apicv_active)
|
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
|
||||||
if (ioapic_in_kernel(vcpu->kvm))
|
if (ioapic_in_kernel(vcpu->kvm))
|
||||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||||
}
|
}
|
||||||
@@ -9648,10 +9690,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
/* Flushing all ASIDs flushes the current ASID... */
|
/* Flushing all ASIDs flushes the current ASID... */
|
||||||
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
||||||
}
|
}
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
kvm_service_local_tlb_flush_requests(vcpu);
|
||||||
kvm_vcpu_flush_tlb_current(vcpu);
|
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
|
|
||||||
kvm_vcpu_flush_tlb_guest(vcpu);
|
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
|
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
|
||||||
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
|
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
|
||||||
@@ -9802,10 +9841,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This handles the case where a posted interrupt was
|
* This handles the case where a posted interrupt was
|
||||||
* notified with kvm_vcpu_kick.
|
* notified with kvm_vcpu_kick. Assigned devices can
|
||||||
|
* use the POSTED_INTR_VECTOR even if APICv is disabled,
|
||||||
|
* so do it even if APICv is disabled on this vCPU.
|
||||||
*/
|
*/
|
||||||
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
|
if (kvm_lapic_enabled(vcpu))
|
||||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||||
|
|
||||||
if (kvm_vcpu_exit_request(vcpu)) {
|
if (kvm_vcpu_exit_request(vcpu)) {
|
||||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||||
@@ -9849,8 +9890,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (vcpu->arch.apicv_active)
|
if (kvm_lapic_enabled(vcpu))
|
||||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||||
|
|
||||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||||
|
|||||||
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
|
|||||||
|
|
||||||
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
|
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
|
||||||
|
|
||||||
|
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
|
||||||
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
|
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
|
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
|
||||||
@@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
|
|||||||
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
|
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
++vcpu->stat.tlb_flush;
|
|
||||||
static_call(kvm_x86_tlb_flush_current)(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int is_pae(struct kvm_vcpu *vcpu)
|
static inline int is_pae(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
|
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
u64 *trampoline_pgd;
|
u64 *trampoline_pgd;
|
||||||
u64 efer;
|
u64 efer;
|
||||||
|
int i;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
base = (unsigned char *)real_mode_header;
|
base = (unsigned char *)real_mode_header;
|
||||||
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
|
|||||||
trampoline_header->flags = 0;
|
trampoline_header->flags = 0;
|
||||||
|
|
||||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||||
|
|
||||||
|
/* Map the real mode stub as virtual == physical */
|
||||||
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
||||||
trampoline_pgd[511] = init_top_pgt[511].pgd;
|
|
||||||
|
/*
|
||||||
|
* Include the entirety of the kernel mapping into the trampoline
|
||||||
|
* PGD. This way, all mappings present in the normal kernel page
|
||||||
|
* tables are usable while running on trampoline_pgd.
|
||||||
|
*/
|
||||||
|
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
|
||||||
|
trampoline_pgd[i] = init_top_pgt[i].pgd;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sme_sev_setup_real_mode(trampoline_header);
|
sme_sev_setup_real_mode(trampoline_header);
|
||||||
|
|||||||
@@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <../entry/calling.h>
|
||||||
|
|
||||||
.pushsection .noinstr.text, "ax"
|
.pushsection .noinstr.text, "ax"
|
||||||
/*
|
/*
|
||||||
@@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
|
|||||||
jmp hypercall_iret
|
jmp hypercall_iret
|
||||||
SYM_CODE_END(xen_iret)
|
SYM_CODE_END(xen_iret)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
|
||||||
|
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
|
||||||
|
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
|
||||||
|
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
|
||||||
|
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
|
||||||
|
* frame at the same address is useless.
|
||||||
|
*/
|
||||||
|
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
|
||||||
|
UNWIND_HINT_REGS
|
||||||
|
POP_REGS
|
||||||
|
|
||||||
|
/* stackleak_erase() can work safely on the kernel stack. */
|
||||||
|
STACKLEAK_ERASE_NOCLOBBER
|
||||||
|
|
||||||
|
addq $8, %rsp /* skip regs->orig_ax */
|
||||||
|
jmp xen_iret
|
||||||
|
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Xen handles syscall callbacks much like ordinary exceptions, which
|
* Xen handles syscall callbacks much like ordinary exceptions, which
|
||||||
* means we have:
|
* means we have:
|
||||||
|
|||||||
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
|
|||||||
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
|
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
return sysfs_emit(buf, "%s\n",
|
||||||
ata_lpm_policy_names[ap->target_lpm_policy]);
|
ata_lpm_policy_names[ap->target_lpm_policy]);
|
||||||
}
|
}
|
||||||
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
|
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
|
||||||
|
|||||||
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
|
|||||||
/* Transfer multiple of 2 bytes */
|
/* Transfer multiple of 2 bytes */
|
||||||
if (rw == READ) {
|
if (rw == READ) {
|
||||||
if (swap)
|
if (swap)
|
||||||
raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
|
raw_insw_swapw(data_addr, (u16 *)buf, words);
|
||||||
else
|
else
|
||||||
raw_insw((u16 *)data_addr, (u16 *)buf, words);
|
raw_insw(data_addr, (u16 *)buf, words);
|
||||||
} else {
|
} else {
|
||||||
if (swap)
|
if (swap)
|
||||||
raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
|
raw_outsw_swapw(data_addr, (u16 *)buf, words);
|
||||||
else
|
else
|
||||||
raw_outsw((u16 *)data_addr, (u16 *)buf, words);
|
raw_outsw(data_addr, (u16 *)buf, words);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Transfer trailing byte, if any. */
|
/* Transfer trailing byte, if any. */
|
||||||
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
|
|||||||
|
|
||||||
if (rw == READ) {
|
if (rw == READ) {
|
||||||
if (swap)
|
if (swap)
|
||||||
raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
|
raw_insw_swapw(data_addr, (u16 *)pad, 1);
|
||||||
else
|
else
|
||||||
raw_insw((u16 *)data_addr, (u16 *)pad, 1);
|
raw_insw(data_addr, (u16 *)pad, 1);
|
||||||
*buf = pad[0];
|
*buf = pad[0];
|
||||||
} else {
|
} else {
|
||||||
pad[0] = *buf;
|
pad[0] = *buf;
|
||||||
if (swap)
|
if (swap)
|
||||||
raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
|
raw_outsw_swapw(data_addr, (u16 *)pad, 1);
|
||||||
else
|
else
|
||||||
raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
|
raw_outsw(data_addr, (u16 *)pad, 1);
|
||||||
}
|
}
|
||||||
words++;
|
words++;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sata_fsl_host_stop(struct ata_host *host)
|
||||||
|
{
|
||||||
|
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||||
|
|
||||||
|
iounmap(host_priv->hcr_base);
|
||||||
|
kfree(host_priv);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* scsi mid-layer and libata interface structures
|
* scsi mid-layer and libata interface structures
|
||||||
*/
|
*/
|
||||||
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
|
|||||||
.port_start = sata_fsl_port_start,
|
.port_start = sata_fsl_port_start,
|
||||||
.port_stop = sata_fsl_port_stop,
|
.port_stop = sata_fsl_port_stop,
|
||||||
|
|
||||||
|
.host_stop = sata_fsl_host_stop,
|
||||||
|
|
||||||
.pmp_attach = sata_fsl_pmp_attach,
|
.pmp_attach = sata_fsl_pmp_attach,
|
||||||
.pmp_detach = sata_fsl_pmp_detach,
|
.pmp_detach = sata_fsl_pmp_detach,
|
||||||
};
|
};
|
||||||
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
|||||||
host_priv->ssr_base = ssr_base;
|
host_priv->ssr_base = ssr_base;
|
||||||
host_priv->csr_base = csr_base;
|
host_priv->csr_base = csr_base;
|
||||||
|
|
||||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
irq = platform_get_irq(ofdev, 0);
|
||||||
if (!irq) {
|
if (irq < 0) {
|
||||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
retval = irq;
|
||||||
goto error_exit_with_cleanup;
|
goto error_exit_with_cleanup;
|
||||||
}
|
}
|
||||||
host_priv->irq = irq;
|
host_priv->irq = irq;
|
||||||
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
|
|||||||
|
|
||||||
ata_host_detach(host);
|
ata_host_detach(host);
|
||||||
|
|
||||||
irq_dispose_mapping(host_priv->irq);
|
|
||||||
iounmap(host_priv->hcr_base);
|
|
||||||
kfree(host_priv);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (idx < 0) {
|
if (idx < 0) {
|
||||||
pr_warn("deleting an unspecified loop device is not supported.\n");
|
pr_warn_once("deleting an unspecified loop device is not supported.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int __init
|
||||||
lba_find_capability(int cap)
|
lba_find_capability(int cap)
|
||||||
{
|
{
|
||||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||||
@@ -366,7 +366,7 @@ fail:
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int __init
|
||||||
find_quicksilver(struct device *dev, void *data)
|
find_quicksilver(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
struct parisc_device **lba = data;
|
struct parisc_device **lba = data;
|
||||||
@@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int __init
|
||||||
parisc_agp_init(void)
|
parisc_agp_init(void)
|
||||||
{
|
{
|
||||||
extern struct sba_device *sba_list;
|
extern struct sba_device *sba_list;
|
||||||
|
|||||||
@@ -191,6 +191,8 @@ struct ipmi_user {
|
|||||||
struct work_struct remove_work;
|
struct work_struct remove_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct workqueue_struct *remove_work_wq;
|
||||||
|
|
||||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||||
__acquires(user->release_barrier)
|
__acquires(user->release_barrier)
|
||||||
{
|
{
|
||||||
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
|
|||||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||||
|
|
||||||
/* SRCU cleanup must happen in task context. */
|
/* SRCU cleanup must happen in task context. */
|
||||||
schedule_work(&user->remove_work);
|
queue_work(remove_work_wq, &user->remove_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||||
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
|
|||||||
/* We didn't find a user, deliver an error response. */
|
/* We didn't find a user, deliver an error response. */
|
||||||
ipmi_inc_stat(intf, unhandled_commands);
|
ipmi_inc_stat(intf, unhandled_commands);
|
||||||
|
|
||||||
msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
|
msg->data[0] = (netfn + 1) << 2;
|
||||||
msg->data[1] = msg->rsp[2];
|
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
|
||||||
msg->data[2] = msg->rsp[4] & ~0x3;
|
msg->data[1] = msg->rsp[1]; /* Addr */
|
||||||
|
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
|
||||||
|
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
|
||||||
msg->data[3] = cmd;
|
msg->data[3] = cmd;
|
||||||
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
|
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
|
||||||
msg->data_size = 5;
|
msg->data_size = 5;
|
||||||
@@ -4455,13 +4459,24 @@ return_unspecified:
|
|||||||
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
|
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
|
||||||
msg->rsp_size = 3;
|
msg->rsp_size = 3;
|
||||||
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
|
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
|
||||||
/* commands must have at least 3 bytes, responses 4. */
|
/* commands must have at least 4 bytes, responses 5. */
|
||||||
if (is_cmd && (msg->rsp_size < 3)) {
|
if (is_cmd && (msg->rsp_size < 4)) {
|
||||||
ipmi_inc_stat(intf, invalid_commands);
|
ipmi_inc_stat(intf, invalid_commands);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (!is_cmd && (msg->rsp_size < 4))
|
if (!is_cmd && (msg->rsp_size < 5)) {
|
||||||
goto return_unspecified;
|
ipmi_inc_stat(intf, invalid_ipmb_responses);
|
||||||
|
/* Construct a valid error response. */
|
||||||
|
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
|
||||||
|
msg->rsp[0] |= (1 << 2); /* Make it a response */
|
||||||
|
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
|
||||||
|
msg->rsp[1] = msg->data[1]; /* Addr */
|
||||||
|
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
|
||||||
|
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
|
||||||
|
msg->rsp[3] = msg->data[3]; /* Cmd */
|
||||||
|
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
|
||||||
|
msg->rsp_size = 5;
|
||||||
|
}
|
||||||
} else if ((msg->data_size >= 2)
|
} else if ((msg->data_size >= 2)
|
||||||
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
|
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
|
||||||
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
|
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
|
||||||
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
|
|||||||
if (rv) {
|
if (rv) {
|
||||||
rv->done = free_smi_msg;
|
rv->done = free_smi_msg;
|
||||||
rv->user_data = NULL;
|
rv->user_data = NULL;
|
||||||
|
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
|
||||||
atomic_inc(&smi_msg_inuse_count);
|
atomic_inc(&smi_msg_inuse_count);
|
||||||
}
|
}
|
||||||
return rv;
|
return rv;
|
||||||
@@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
|
|||||||
|
|
||||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||||
|
|
||||||
|
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
|
||||||
|
if (!remove_work_wq) {
|
||||||
|
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
|
||||||
|
rv = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
initialized = true;
|
initialized = true;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
|
|||||||
int count;
|
int count;
|
||||||
|
|
||||||
if (initialized) {
|
if (initialized) {
|
||||||
|
destroy_workqueue(remove_work_wq);
|
||||||
|
|
||||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||||
&panic_block);
|
&panic_block);
|
||||||
|
|
||||||
|
|||||||
@@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
|
|||||||
.release = cpufreq_sysfs_release,
|
.release = cpufreq_sysfs_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
|
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
|
||||||
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct device *dev = get_cpu_device(cpu);
|
|
||||||
|
|
||||||
if (unlikely(!dev))
|
if (unlikely(!dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
if (policy->max_freq_req) {
|
if (policy->max_freq_req) {
|
||||||
/*
|
/*
|
||||||
* CPUFREQ_CREATE_POLICY notification is sent only after
|
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
|
||||||
* successfully adding max_freq_req request.
|
* notification, since CPUFREQ_CREATE_POLICY notification was
|
||||||
|
* sent after adding max_freq_req earlier.
|
||||||
*/
|
*/
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
CPUFREQ_REMOVE_POLICY, policy);
|
CPUFREQ_REMOVE_POLICY, policy);
|
||||||
@@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
|
|||||||
if (new_policy) {
|
if (new_policy) {
|
||||||
for_each_cpu(j, policy->related_cpus) {
|
for_each_cpu(j, policy->related_cpus) {
|
||||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||||
add_cpu_dev_symlink(policy, j);
|
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
|
||||||
}
|
}
|
||||||
|
|
||||||
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
|
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
|
||||||
@@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|||||||
/* Create sysfs link on CPU registration */
|
/* Create sysfs link on CPU registration */
|
||||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||||
if (policy)
|
if (policy)
|
||||||
add_cpu_dev_symlink(policy, cpu);
|
add_cpu_dev_symlink(policy, cpu, dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
table = &buffer->sg_table;
|
table = &buffer->sg_table;
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
for_each_sgtable_sg(table, sg, i) {
|
||||||
struct page *page = sg_page(sg);
|
struct page *page = sg_page(sg);
|
||||||
|
|
||||||
__free_pages(page, compound_order(page));
|
__free_pages(page, compound_order(page));
|
||||||
|
|||||||
@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||||||
struct sg_table *sg = NULL;
|
struct sg_table *sg = NULL;
|
||||||
uint64_t user_addr = 0;
|
uint64_t user_addr = 0;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj = NULL;
|
||||||
u32 domain, alloc_domain;
|
u32 domain, alloc_domain;
|
||||||
u64 alloc_flags;
|
u64 alloc_flags;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
|
|||||||
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
|
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
|
||||||
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
|
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
|
||||||
err_node_allow:
|
err_node_allow:
|
||||||
drm_gem_object_put(gobj);
|
|
||||||
/* Don't unreserve system mem limit twice */
|
/* Don't unreserve system mem limit twice */
|
||||||
goto err_reserve_limit;
|
goto err_reserve_limit;
|
||||||
err_bo_create:
|
err_bo_create:
|
||||||
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
|
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
|
||||||
err_reserve_limit:
|
err_reserve_limit:
|
||||||
mutex_destroy(&(*mem)->lock);
|
mutex_destroy(&(*mem)->lock);
|
||||||
kfree(*mem);
|
if (gobj)
|
||||||
|
drm_gem_object_put(gobj);
|
||||||
|
else
|
||||||
|
kfree(*mem);
|
||||||
err:
|
err:
|
||||||
if (sg) {
|
if (sg) {
|
||||||
sg_free_table(sg);
|
sg_free_table(sg);
|
||||||
|
|||||||
@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||||||
/* disable all interrupts */
|
/* disable all interrupts */
|
||||||
amdgpu_irq_disable_all(adev);
|
amdgpu_irq_disable_all(adev);
|
||||||
if (adev->mode_info.mode_config_initialized){
|
if (adev->mode_info.mode_config_initialized){
|
||||||
if (!amdgpu_device_has_dc_support(adev))
|
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
|
||||||
drm_helper_force_disable_all(adev_to_drm(adev));
|
drm_helper_force_disable_all(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||||
@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
amdgpu_amdkfd_pre_reset(adev);
|
||||||
|
|
||||||
if (from_hypervisor)
|
if (from_hypervisor)
|
||||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||||
else
|
else
|
||||||
@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||||
r = amdgpu_ib_ring_tests(adev);
|
r = amdgpu_ib_ring_tests(adev);
|
||||||
|
amdgpu_amdkfd_post_reset(adev);
|
||||||
|
|
||||||
error:
|
error:
|
||||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||||
@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
|
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
|
||||||
|
|
||||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
if (!amdgpu_sriov_vf(tmp_adev))
|
||||||
|
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark these ASICs to be reseted as untracked first
|
* Mark these ASICs to be reseted as untracked first
|
||||||
@@ -5129,7 +5133,7 @@ skip_hw_reset:
|
|||||||
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
|
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
|
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
|
||||||
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
|
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5148,9 +5152,9 @@ skip_hw_reset:
|
|||||||
|
|
||||||
skip_sched_resume:
|
skip_sched_resume:
|
||||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||||
/* unlock kfd */
|
/* unlock kfd: SRIOV would do it separately */
|
||||||
if (!need_emergency_restart)
|
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
|
||||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||||
|
|
||||||
/* kfd_post_reset will do nothing if kfd device is not initialized,
|
/* kfd_post_reset will do nothing if kfd device is not initialized,
|
||||||
* need to bring up kfd here if it's not be initialized before
|
* need to bring up kfd here if it's not be initialized before
|
||||||
|
|||||||
@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
|
|||||||
[HDP_HWIP] = HDP_HWID,
|
[HDP_HWIP] = HDP_HWID,
|
||||||
[SDMA0_HWIP] = SDMA0_HWID,
|
[SDMA0_HWIP] = SDMA0_HWID,
|
||||||
[SDMA1_HWIP] = SDMA1_HWID,
|
[SDMA1_HWIP] = SDMA1_HWID,
|
||||||
|
[SDMA2_HWIP] = SDMA2_HWID,
|
||||||
|
[SDMA3_HWIP] = SDMA3_HWID,
|
||||||
[MMHUB_HWIP] = MMHUB_HWID,
|
[MMHUB_HWIP] = MMHUB_HWID,
|
||||||
[ATHUB_HWIP] = ATHUB_HWID,
|
[ATHUB_HWIP] = ATHUB_HWID,
|
||||||
[NBIO_HWIP] = NBIF_HWID,
|
[NBIO_HWIP] = NBIF_HWID,
|
||||||
@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
|
|||||||
case IP_VERSION(3, 0, 64):
|
case IP_VERSION(3, 0, 64):
|
||||||
case IP_VERSION(3, 1, 1):
|
case IP_VERSION(3, 1, 1):
|
||||||
case IP_VERSION(3, 0, 2):
|
case IP_VERSION(3, 0, 2):
|
||||||
|
case IP_VERSION(3, 0, 192):
|
||||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||||
if (!amdgpu_sriov_vf(adev))
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||||
|
|||||||
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
case IP_VERSION(3, 0, 0):
|
case IP_VERSION(3, 0, 0):
|
||||||
case IP_VERSION(3, 0, 64):
|
case IP_VERSION(3, 0, 64):
|
||||||
|
case IP_VERSION(3, 0, 192):
|
||||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
|
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
|
||||||
fw_name = FIRMWARE_SIENNA_CICHLID;
|
fw_name = FIRMWARE_SIENNA_CICHLID;
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
|
|||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||||
if (adev->mode_info.crtcs[i])
|
if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
|
||||||
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
|
hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
kfree(adev->amdgpu_vkms_output);
|
kfree(adev->amdgpu_vkms_output);
|
||||||
|
|||||||
@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
|
|||||||
|
|
||||||
gfx_v9_0_cp_enable(adev, false);
|
gfx_v9_0_cp_enable(adev, false);
|
||||||
|
|
||||||
/* Skip suspend with A+A reset */
|
/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
|
||||||
if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
|
if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
|
||||||
dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
|
(adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
|
||||||
|
dev_dbg(adev->dev, "Skipping RLC halt\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
|||||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||||
case IP_VERSION(3, 0, 0):
|
case IP_VERSION(3, 0, 0):
|
||||||
case IP_VERSION(3, 0, 64):
|
case IP_VERSION(3, 0, 64):
|
||||||
|
case IP_VERSION(3, 0, 192):
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
if (encode)
|
if (encode)
|
||||||
*codecs = &sriov_sc_video_codecs_encode;
|
*codecs = &sriov_sc_video_codecs_encode;
|
||||||
|
|||||||
@@ -1574,7 +1574,6 @@ retry_flush_work:
|
|||||||
static void svm_range_restore_work(struct work_struct *work)
|
static void svm_range_restore_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *dwork = to_delayed_work(work);
|
struct delayed_work *dwork = to_delayed_work(work);
|
||||||
struct amdkfd_process_info *process_info;
|
|
||||||
struct svm_range_list *svms;
|
struct svm_range_list *svms;
|
||||||
struct svm_range *prange;
|
struct svm_range *prange;
|
||||||
struct kfd_process *p;
|
struct kfd_process *p;
|
||||||
@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
|
|||||||
* the lifetime of this thread, kfd_process and mm will be valid.
|
* the lifetime of this thread, kfd_process and mm will be valid.
|
||||||
*/
|
*/
|
||||||
p = container_of(svms, struct kfd_process, svms);
|
p = container_of(svms, struct kfd_process, svms);
|
||||||
process_info = p->kgd_process_info;
|
|
||||||
mm = p->mm;
|
mm = p->mm;
|
||||||
if (!mm)
|
if (!mm)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&process_info->lock);
|
|
||||||
svm_range_list_lock_and_flush_work(svms, mm);
|
svm_range_list_lock_and_flush_work(svms, mm);
|
||||||
mutex_lock(&svms->lock);
|
mutex_lock(&svms->lock);
|
||||||
|
|
||||||
@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
|
|||||||
out_reschedule:
|
out_reschedule:
|
||||||
mutex_unlock(&svms->lock);
|
mutex_unlock(&svms->lock);
|
||||||
mmap_write_unlock(mm);
|
mmap_write_unlock(mm);
|
||||||
mutex_unlock(&process_info->lock);
|
|
||||||
|
|
||||||
/* If validation failed, reschedule another attempt */
|
/* If validation failed, reschedule another attempt */
|
||||||
if (evicted_ranges) {
|
if (evicted_ranges) {
|
||||||
@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
|||||||
|
|
||||||
if (atomic_read(&svms->drain_pagefaults)) {
|
if (atomic_read(&svms->drain_pagefaults)) {
|
||||||
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
|
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
|
||||||
|
r = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
|||||||
mm = get_task_mm(p->lead_thread);
|
mm = get_task_mm(p->lead_thread);
|
||||||
if (!mm) {
|
if (!mm) {
|
||||||
pr_debug("svms 0x%p failed to get mm\n", svms);
|
pr_debug("svms 0x%p failed to get mm\n", svms);
|
||||||
|
r = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2660,6 +2658,7 @@ retry_write_locked:
|
|||||||
|
|
||||||
if (svm_range_skip_recover(prange)) {
|
if (svm_range_skip_recover(prange)) {
|
||||||
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
|
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
|
||||||
|
r = 0;
|
||||||
goto out_unlock_range;
|
goto out_unlock_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2668,6 +2667,7 @@ retry_write_locked:
|
|||||||
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
|
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
|
||||||
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
|
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
|
||||||
svms, prange->start, prange->last);
|
svms, prange->start, prange->last);
|
||||||
|
r = 0;
|
||||||
goto out_unlock_range;
|
goto out_unlock_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3177,7 +3177,6 @@ static int
|
|||||||
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||||
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
|
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
|
||||||
{
|
{
|
||||||
struct amdkfd_process_info *process_info = p->kgd_process_info;
|
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct list_head update_list;
|
struct list_head update_list;
|
||||||
struct list_head insert_list;
|
struct list_head insert_list;
|
||||||
@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||||||
|
|
||||||
svms = &p->svms;
|
svms = &p->svms;
|
||||||
|
|
||||||
mutex_lock(&process_info->lock);
|
|
||||||
|
|
||||||
svm_range_list_lock_and_flush_work(svms, mm);
|
svm_range_list_lock_and_flush_work(svms, mm);
|
||||||
|
|
||||||
r = svm_range_is_valid(p, start, size);
|
r = svm_range_is_valid(p, start, size);
|
||||||
@@ -3273,8 +3270,6 @@ out_unlock_range:
|
|||||||
mutex_unlock(&svms->lock);
|
mutex_unlock(&svms->lock);
|
||||||
mmap_read_unlock(mm);
|
mmap_read_unlock(mm);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&process_info->lock);
|
|
||||||
|
|
||||||
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
|
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
|
||||||
&p->svms, start, start + size - 1, r);
|
&p->svms, start, start + size - 1, r);
|
||||||
|
|
||||||
|
|||||||
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
|
||||||
|
(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
|
||||||
|
DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||||
|
|||||||
@@ -36,6 +36,8 @@
|
|||||||
#include "dm_helpers.h"
|
#include "dm_helpers.h"
|
||||||
|
|
||||||
#include "dc_link_ddc.h"
|
#include "dc_link_ddc.h"
|
||||||
|
#include "ddc_service_types.h"
|
||||||
|
#include "dpcd_defs.h"
|
||||||
|
|
||||||
#include "i2caux_interface.h"
|
#include "i2caux_interface.h"
|
||||||
#include "dmub_cmd.h"
|
#include "dmub_cmd.h"
|
||||||
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
|
static bool needs_dsc_aux_workaround(struct dc_link *link)
|
||||||
|
{
|
||||||
|
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||||
|
(link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
|
||||||
|
link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
||||||
{
|
{
|
||||||
struct dc_sink *dc_sink = aconnector->dc_sink;
|
struct dc_sink *dc_sink = aconnector->dc_sink;
|
||||||
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
|||||||
u8 *dsc_branch_dec_caps = NULL;
|
u8 *dsc_branch_dec_caps = NULL;
|
||||||
|
|
||||||
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
|
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
|
||||||
#if defined(CONFIG_HP_HOOK_WORKAROUND)
|
|
||||||
/*
|
/*
|
||||||
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
|
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
|
||||||
* because it only check the dsc/fec caps of the "port variable" and not the dock
|
* because it only check the dsc/fec caps of the "port variable" and not the dock
|
||||||
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
|||||||
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
|
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
if (!aconnector->dsc_aux && !port->parent->port_parent &&
|
||||||
if (!aconnector->dsc_aux && !port->parent->port_parent)
|
needs_dsc_aux_workaround(aconnector->dc_link))
|
||||||
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
|
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
|
||||||
#endif
|
|
||||||
if (!aconnector->dsc_aux)
|
if (!aconnector->dsc_aux)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|||||||
@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
|
|||||||
dal_ddc_service_set_transaction_type(link->ddc,
|
dal_ddc_service_set_transaction_type(link->ddc,
|
||||||
sink_caps->transaction_type);
|
sink_caps->transaction_type);
|
||||||
|
|
||||||
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
|
/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
|
||||||
|
* reports DSC support.
|
||||||
|
*/
|
||||||
|
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
||||||
|
link->type == dc_connection_mst_branch &&
|
||||||
|
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||||
|
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
|
||||||
|
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
|
||||||
|
link->wa_flags.dpia_mst_dsc_always_on = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
||||||
/* In case of fallback to SST when topology discovery below fails
|
/* In case of fallback to SST when topology discovery below fails
|
||||||
* HDCP caps will be querried again later by the upper layer (caller
|
* HDCP caps will be querried again later by the upper layer (caller
|
||||||
@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||||||
LINK_INFO("link=%d, mst branch is now Disconnected\n",
|
LINK_INFO("link=%d, mst branch is now Disconnected\n",
|
||||||
link->link_index);
|
link->link_index);
|
||||||
|
|
||||||
|
/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
|
||||||
|
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||||
|
link->wa_flags.dpia_mst_dsc_always_on = false;
|
||||||
|
|
||||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||||
|
|
||||||
link->mst_stream_alloc_table.stream_count = 0;
|
link->mst_stream_alloc_table.stream_count = 0;
|
||||||
|
|||||||
@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
|
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
|
||||||
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
|
lt_settings->dpcd_lane_settings[lane].raw = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status == LINK_TRAINING_SUCCESS) {
|
if (status == LINK_TRAINING_SUCCESS) {
|
||||||
|
|||||||
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
|
|||||||
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
|
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
|
||||||
|
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
|
||||||
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
|
|||||||
|
|
||||||
if (!new_ctx)
|
if (!new_ctx)
|
||||||
return DC_ERROR_UNEXPECTED;
|
return DC_ERROR_UNEXPECTED;
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update link encoder to stream assignment.
|
|
||||||
* TODO: Split out reason allocation from validation.
|
|
||||||
*/
|
|
||||||
if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
|
|
||||||
dc->res_pool->funcs->link_encs_assign(
|
|
||||||
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (dc->res_pool->funcs->validate_global) {
|
if (dc->res_pool->funcs->validate_global) {
|
||||||
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
|
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
|
||||||
@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
|
|||||||
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
|
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
|
||||||
result = DC_FAIL_BANDWIDTH_VALIDATE;
|
result = DC_FAIL_BANDWIDTH_VALIDATE;
|
||||||
|
|
||||||
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
|
/*
|
||||||
|
* Only update link encoder to stream assignment after bandwidth validation passed.
|
||||||
|
* TODO: Split out assignment and validation.
|
||||||
|
*/
|
||||||
|
if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
|
||||||
|
dc->res_pool->funcs->link_encs_assign(
|
||||||
|
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
|
||||||
|
#endif
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -508,7 +508,8 @@ union dpia_debug_options {
|
|||||||
uint32_t disable_dpia:1;
|
uint32_t disable_dpia:1;
|
||||||
uint32_t force_non_lttpr:1;
|
uint32_t force_non_lttpr:1;
|
||||||
uint32_t extend_aux_rd_interval:1;
|
uint32_t extend_aux_rd_interval:1;
|
||||||
uint32_t reserved:29;
|
uint32_t disable_mst_dsc_work_around:1;
|
||||||
|
uint32_t reserved:28;
|
||||||
} bits;
|
} bits;
|
||||||
uint32_t raw;
|
uint32_t raw;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -191,6 +191,8 @@ struct dc_link {
|
|||||||
bool dp_skip_DID2;
|
bool dp_skip_DID2;
|
||||||
bool dp_skip_reset_segment;
|
bool dp_skip_reset_segment;
|
||||||
bool dp_mot_reset_segment;
|
bool dp_mot_reset_segment;
|
||||||
|
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
|
||||||
|
bool dpia_mst_dsc_always_on;
|
||||||
} wa_flags;
|
} wa_flags;
|
||||||
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
||||||
|
|
||||||
|
|||||||
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||||||
dev_err(adev->dev, "Failed to disable smu features.\n");
|
dev_err(adev->dev, "Failed to disable smu features.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
|
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
|
||||||
adev->gfx.rlc.funcs->stop)
|
adev->gfx.rlc.funcs->stop)
|
||||||
adev->gfx.rlc.funcs->stop(adev);
|
adev->gfx.rlc.funcs->stop(adev);
|
||||||
|
|
||||||
|
|||||||
@@ -1640,6 +1640,9 @@ struct intel_dp {
|
|||||||
struct intel_dp_pcon_frl frl;
|
struct intel_dp_pcon_frl frl;
|
||||||
|
|
||||||
struct intel_psr psr;
|
struct intel_psr psr;
|
||||||
|
|
||||||
|
/* When we last wrote the OUI for eDP */
|
||||||
|
unsigned long last_oui_write;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum lspcon_vendor {
|
enum lspcon_vendor {
|
||||||
|
|||||||
@@ -29,6 +29,7 @@
|
|||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/timekeeping.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
@@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
|
|||||||
|
|
||||||
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
|
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
|
||||||
drm_err(&i915->drm, "Failed to write source OUI\n");
|
drm_err(&i915->drm, "Failed to write source OUI\n");
|
||||||
|
|
||||||
|
intel_dp->last_oui_write = jiffies;
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||||
|
|
||||||
|
drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
|
||||||
|
wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the device supports it, try to set the power state appropriately */
|
/* If the device supports it, try to set the power state appropriately */
|
||||||
|
|||||||
@@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
|
|||||||
const struct intel_crtc_state *crtc_state);
|
const struct intel_crtc_state *crtc_state);
|
||||||
void intel_dp_phy_test(struct intel_encoder *encoder);
|
void intel_dp_phy_test(struct intel_encoder *encoder);
|
||||||
|
|
||||||
|
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
|
||||||
|
|
||||||
#endif /* __INTEL_DP_H__ */
|
#endif /* __INTEL_DP_H__ */
|
||||||
|
|||||||
@@ -36,6 +36,7 @@
|
|||||||
|
|
||||||
#include "intel_backlight.h"
|
#include "intel_backlight.h"
|
||||||
#include "intel_display_types.h"
|
#include "intel_display_types.h"
|
||||||
|
#include "intel_dp.h"
|
||||||
#include "intel_dp_aux_backlight.h"
|
#include "intel_dp_aux_backlight.h"
|
||||||
|
|
||||||
/* TODO:
|
/* TODO:
|
||||||
@@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
|
|||||||
int ret;
|
int ret;
|
||||||
u8 tcon_cap[4];
|
u8 tcon_cap[4];
|
||||||
|
|
||||||
|
intel_dp_wait_source_oui(intel_dp);
|
||||||
|
|
||||||
ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
|
ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
|
||||||
if (ret != sizeof(tcon_cap))
|
if (ret != sizeof(tcon_cap))
|
||||||
return false;
|
return false;
|
||||||
@@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
|
|||||||
int ret;
|
int ret;
|
||||||
u8 old_ctrl, ctrl;
|
u8 old_ctrl, ctrl;
|
||||||
|
|
||||||
|
intel_dp_wait_source_oui(intel_dp);
|
||||||
|
|
||||||
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
|
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
|
||||||
if (ret != 1) {
|
if (ret != 1) {
|
||||||
drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
|
drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
|
||||||
@@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
|
|||||||
struct intel_panel *panel = &connector->panel;
|
struct intel_panel *panel = &connector->panel;
|
||||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||||
|
|
||||||
|
if (!panel->backlight.edp.vesa.info.aux_enable) {
|
||||||
|
u32 pwm_level = intel_backlight_invert_pwm_level(connector,
|
||||||
|
panel->backlight.pwm_level_max);
|
||||||
|
|
||||||
|
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
|
||||||
|
}
|
||||||
|
|
||||||
drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
|
drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
|
|||||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||||
|
|
||||||
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
|
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
|
||||||
|
|
||||||
|
if (!panel->backlight.edp.vesa.info.aux_enable)
|
||||||
|
panel->backlight.pwm_funcs->disable(old_conn_state,
|
||||||
|
intel_backlight_invert_pwm_level(connector, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
|
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
|
||||||
@@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!panel->backlight.edp.vesa.info.aux_enable) {
|
||||||
|
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
|
||||||
|
if (ret < 0) {
|
||||||
|
drm_err(&i915->drm,
|
||||||
|
"Failed to setup PWM backlight controls for eDP backlight: %d\n",
|
||||||
|
ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
panel->backlight.max = panel->backlight.edp.vesa.info.max;
|
panel->backlight.max = panel->backlight.edp.vesa.info.max;
|
||||||
panel->backlight.min = 0;
|
panel->backlight.min = 0;
|
||||||
if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
|
if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
|
||||||
@@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
|
|||||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||||
|
|
||||||
/* TODO: We currently only support AUX only backlight configurations, not backlights which
|
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
|
||||||
* require a mix of PWM and AUX controls to work. In the mean time, these machines typically
|
|
||||||
* work just fine using normal PWM controls anyway.
|
|
||||||
*/
|
|
||||||
if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
|
|
||||||
drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
|
|
||||||
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
|
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||||||
FF_MODE2_GS_TIMER_MASK,
|
FF_MODE2_GS_TIMER_MASK,
|
||||||
FF_MODE2_GS_TIMER_224,
|
FF_MODE2_GS_TIMER_224,
|
||||||
0, false);
|
0, false);
|
||||||
|
|
||||||
/*
|
|
||||||
* Wa_14012131227:dg1
|
|
||||||
* Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
|
|
||||||
*/
|
|
||||||
wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
|
|
||||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
|
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ config DRM_MSM
|
|||||||
tristate "MSM DRM"
|
tristate "MSM DRM"
|
||||||
depends on DRM
|
depends on DRM
|
||||||
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
|
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
|
||||||
|
depends on COMMON_CLK
|
||||||
depends on IOMMU_SUPPORT
|
depends on IOMMU_SUPPORT
|
||||||
depends on (OF && COMMON_CLK) || COMPILE_TEST
|
|
||||||
depends on QCOM_OCMEM || QCOM_OCMEM=n
|
depends on QCOM_OCMEM || QCOM_OCMEM=n
|
||||||
depends on QCOM_LLCC || QCOM_LLCC=n
|
depends on QCOM_LLCC || QCOM_LLCC=n
|
||||||
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
|
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
|
||||||
|
|||||||
@@ -23,8 +23,10 @@ msm-y := \
|
|||||||
hdmi/hdmi_i2c.o \
|
hdmi/hdmi_i2c.o \
|
||||||
hdmi/hdmi_phy.o \
|
hdmi/hdmi_phy.o \
|
||||||
hdmi/hdmi_phy_8960.o \
|
hdmi/hdmi_phy_8960.o \
|
||||||
|
hdmi/hdmi_phy_8996.o \
|
||||||
hdmi/hdmi_phy_8x60.o \
|
hdmi/hdmi_phy_8x60.o \
|
||||||
hdmi/hdmi_phy_8x74.o \
|
hdmi/hdmi_phy_8x74.o \
|
||||||
|
hdmi/hdmi_pll_8960.o \
|
||||||
edp/edp.o \
|
edp/edp.o \
|
||||||
edp/edp_aux.o \
|
edp/edp_aux.o \
|
||||||
edp/edp_bridge.o \
|
edp/edp_bridge.o \
|
||||||
@@ -37,6 +39,7 @@ msm-y := \
|
|||||||
disp/mdp4/mdp4_dtv_encoder.o \
|
disp/mdp4/mdp4_dtv_encoder.o \
|
||||||
disp/mdp4/mdp4_lcdc_encoder.o \
|
disp/mdp4/mdp4_lcdc_encoder.o \
|
||||||
disp/mdp4/mdp4_lvds_connector.o \
|
disp/mdp4/mdp4_lvds_connector.o \
|
||||||
|
disp/mdp4/mdp4_lvds_pll.o \
|
||||||
disp/mdp4/mdp4_irq.o \
|
disp/mdp4/mdp4_irq.o \
|
||||||
disp/mdp4/mdp4_kms.o \
|
disp/mdp4/mdp4_kms.o \
|
||||||
disp/mdp4/mdp4_plane.o \
|
disp/mdp4/mdp4_plane.o \
|
||||||
@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
|
|||||||
dp/dp_audio.o
|
dp/dp_audio.o
|
||||||
|
|
||||||
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
|
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
|
||||||
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
|
|
||||||
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
|
|
||||||
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
|
|
||||||
|
|
||||||
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
|
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
|
||||||
|
|
||||||
|
|||||||
@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
|||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||||
u32 gpu_scid, cntl1_regval = 0;
|
u32 cntl1_regval = 0;
|
||||||
|
|
||||||
if (IS_ERR(a6xx_gpu->llc_mmio))
|
if (IS_ERR(a6xx_gpu->llc_mmio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
|
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
|
||||||
gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
|
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
|
||||||
|
|
||||||
gpu_scid &= 0x1f;
|
gpu_scid &= 0x1f;
|
||||||
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
|
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
|
||||||
(gpu_scid << 15) | (gpu_scid << 20);
|
(gpu_scid << 15) | (gpu_scid << 20);
|
||||||
|
|
||||||
|
/* On A660, the SCID programming for UCHE traffic is done in
|
||||||
|
* A6XX_GBIF_SCACHE_CNTL0[14:10]
|
||||||
|
*/
|
||||||
|
if (adreno_is_a660_family(adreno_gpu))
|
||||||
|
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
|
||||||
|
(1 << 8), (gpu_scid << 10) | (1 << 8));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
|
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
|
||||||
|
|
||||||
/* On A660, the SCID programming for UCHE traffic is done in
|
|
||||||
* A6XX_GBIF_SCACHE_CNTL0[14:10]
|
|
||||||
*/
|
|
||||||
if (adreno_is_a660_family(adreno_gpu))
|
|
||||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
|
|
||||||
(1 << 8), (gpu_scid << 10) | (1 << 8));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
|
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
|
||||||
@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
|||||||
return (unsigned long)busy_time;
|
return (unsigned long)busy_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
|
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
|
||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||||
|
|||||||
@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
|
|||||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||||
|
|
||||||
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
|
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
|
||||||
2, sizeof(*a6xx_state->gmu_registers));
|
3, sizeof(*a6xx_state->gmu_registers));
|
||||||
|
|
||||||
if (!a6xx_state->gmu_registers)
|
if (!a6xx_state->gmu_registers)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
a6xx_state->nr_gmu_registers = 2;
|
a6xx_state->nr_gmu_registers = 3;
|
||||||
|
|
||||||
/* Get the CX GMU registers from AHB */
|
/* Get the CX GMU registers from AHB */
|
||||||
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
|
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ struct dp_aux_private {
|
|||||||
bool read;
|
bool read;
|
||||||
bool no_send_addr;
|
bool no_send_addr;
|
||||||
bool no_send_stop;
|
bool no_send_stop;
|
||||||
|
bool initted;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
u32 segment;
|
u32 segment;
|
||||||
|
|
||||||
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&aux->mutex);
|
mutex_lock(&aux->mutex);
|
||||||
|
if (!aux->initted) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
dp_aux_update_offset_and_segment(aux, msg);
|
dp_aux_update_offset_and_segment(aux, msg);
|
||||||
dp_aux_transfer_helper(aux, msg, true);
|
dp_aux_transfer_helper(aux, msg, true);
|
||||||
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
|||||||
}
|
}
|
||||||
|
|
||||||
aux->cmd_busy = false;
|
aux->cmd_busy = false;
|
||||||
|
|
||||||
|
exit:
|
||||||
mutex_unlock(&aux->mutex);
|
mutex_unlock(&aux->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
|
|||||||
|
|
||||||
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
||||||
|
|
||||||
|
mutex_lock(&aux->mutex);
|
||||||
|
|
||||||
dp_catalog_aux_enable(aux->catalog, true);
|
dp_catalog_aux_enable(aux->catalog, true);
|
||||||
aux->retry_cnt = 0;
|
aux->retry_cnt = 0;
|
||||||
|
aux->initted = true;
|
||||||
|
|
||||||
|
mutex_unlock(&aux->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
|
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
|
||||||
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
|
|||||||
|
|
||||||
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
||||||
|
|
||||||
|
mutex_lock(&aux->mutex);
|
||||||
|
|
||||||
|
aux->initted = false;
|
||||||
dp_catalog_aux_enable(aux->catalog, false);
|
dp_catalog_aux_enable(aux->catalog, false);
|
||||||
|
|
||||||
|
mutex_unlock(&aux->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int dp_aux_register(struct drm_dp_aux *dp_aux)
|
int dp_aux_register(struct drm_dp_aux *dp_aux)
|
||||||
|
|||||||
@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
|||||||
if (!prop) {
|
if (!prop) {
|
||||||
DRM_DEV_DEBUG(dev,
|
DRM_DEV_DEBUG(dev,
|
||||||
"failed to find data lane mapping, using default\n");
|
"failed to find data lane mapping, using default\n");
|
||||||
|
/* Set the number of date lanes to 4 by default. */
|
||||||
|
msm_host->num_data_lanes = 4;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
|||||||
goto free_priv;
|
goto free_priv;
|
||||||
|
|
||||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||||
|
msm_gpu_hw_init(gpu);
|
||||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||||
|
|
||||||
|
|||||||
@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
|
||||||
struct drm_file *file)
|
ktime_t timeout)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
|
||||||
struct drm_msm_wait_fence *args = data;
|
|
||||||
ktime_t timeout = to_ktime(args->timeout);
|
|
||||||
struct msm_gpu_submitqueue *queue;
|
|
||||||
struct msm_gpu *gpu = priv->gpu;
|
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (args->pad) {
|
if (fence_id > queue->last_fence) {
|
||||||
DRM_ERROR("invalid pad: %08x\n", args->pad);
|
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
|
||||||
|
fence_id, queue->last_fence);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gpu)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
|
|
||||||
if (!queue)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map submitqueue scoped "seqno" (which is actually an idr key)
|
* Map submitqueue scoped "seqno" (which is actually an idr key)
|
||||||
* back to underlying dma-fence
|
* back to underlying dma-fence
|
||||||
@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
|||||||
ret = mutex_lock_interruptible(&queue->lock);
|
ret = mutex_lock_interruptible(&queue->lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
fence = idr_find(&queue->fence_idr, args->fence);
|
fence = idr_find(&queue->fence_idr, fence_id);
|
||||||
if (fence)
|
if (fence)
|
||||||
fence = dma_fence_get_rcu(fence);
|
fence = dma_fence_get_rcu(fence);
|
||||||
mutex_unlock(&queue->lock);
|
mutex_unlock(&queue->lock);
|
||||||
@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file)
|
||||||
|
{
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
|
struct drm_msm_wait_fence *args = data;
|
||||||
|
struct msm_gpu_submitqueue *queue;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (args->pad) {
|
||||||
|
DRM_ERROR("invalid pad: %08x\n", args->pad);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!priv->gpu)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
|
||||||
|
if (!queue)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
|
||||||
|
|
||||||
msm_submitqueue_put(queue);
|
msm_submitqueue_put(queue);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
|||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
|
||||||
vma->vm_flags &= ~VM_PFNMAP;
|
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
|
||||||
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
|
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||||||
break;
|
break;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
default:
|
default:
|
||||||
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
|
DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
|
||||||
(flags & MSM_BO_CACHE_MASK));
|
(flags & MSM_BO_CACHE_MASK));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
args->nr_cmds);
|
args->nr_cmds);
|
||||||
if (IS_ERR(submit)) {
|
if (IS_ERR(submit)) {
|
||||||
ret = PTR_ERR(submit);
|
ret = PTR_ERR(submit);
|
||||||
|
submit = NULL;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
drm_sched_entity_push_job(&submit->base);
|
drm_sched_entity_push_job(&submit->base);
|
||||||
|
|
||||||
args->fence = submit->fence_id;
|
args->fence = submit->fence_id;
|
||||||
|
queue->last_fence = submit->fence_id;
|
||||||
|
|
||||||
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
|
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
|
||||||
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
|
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
|
||||||
|
|||||||
@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
|
|||||||
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
|
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
|
||||||
* by the submitqueue's priority
|
* by the submitqueue's priority
|
||||||
* @faults: the number of GPU hangs associated with this submitqueue
|
* @faults: the number of GPU hangs associated with this submitqueue
|
||||||
|
* @last_fence: the sequence number of the last allocated fence (for error
|
||||||
|
* checking)
|
||||||
* @ctx: the per-drm_file context associated with the submitqueue (ie.
|
* @ctx: the per-drm_file context associated with the submitqueue (ie.
|
||||||
* which set of pgtables do submits jobs associated with the
|
* which set of pgtables do submits jobs associated with the
|
||||||
* submitqueue use)
|
* submitqueue use)
|
||||||
@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
|
|||||||
u32 flags;
|
u32 flags;
|
||||||
u32 ring_nr;
|
u32 ring_nr;
|
||||||
int faults;
|
int faults;
|
||||||
|
uint32_t last_fence;
|
||||||
struct msm_file_private *ctx;
|
struct msm_file_private *ctx;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct idr fence_idr;
|
struct idr fence_idr;
|
||||||
|
|||||||
@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
|||||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||||
struct dev_pm_opp *opp;
|
struct dev_pm_opp *opp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that devfreq_recommended_opp() can modify the freq
|
||||||
|
* to something that actually is in the opp table:
|
||||||
|
*/
|
||||||
opp = devfreq_recommended_opp(dev, freq, flags);
|
opp = devfreq_recommended_opp(dev, freq, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
|||||||
*/
|
*/
|
||||||
if (gpu->devfreq.idle_freq) {
|
if (gpu->devfreq.idle_freq) {
|
||||||
gpu->devfreq.idle_freq = *freq;
|
gpu->devfreq.idle_freq = *freq;
|
||||||
|
dev_pm_opp_put(opp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
|
|||||||
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
|
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
|
||||||
unsigned long idle_freq, target_freq = 0;
|
unsigned long idle_freq, target_freq = 0;
|
||||||
|
|
||||||
if (!df->devfreq)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hold devfreq lock to synchronize with get_dev_status()/
|
* Hold devfreq lock to synchronize with get_dev_status()/
|
||||||
* target() callbacks
|
* target() callbacks
|
||||||
@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
|
|||||||
{
|
{
|
||||||
struct msm_gpu_devfreq *df = &gpu->devfreq;
|
struct msm_gpu_devfreq *df = &gpu->devfreq;
|
||||||
|
|
||||||
|
if (!df->devfreq)
|
||||||
|
return;
|
||||||
|
|
||||||
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
|
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
|
||||||
HRTIMER_MODE_ABS);
|
HRTIMER_MODE_REL);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
struct drm_device *dev = state->dev;
|
struct drm_device *dev = state->dev;
|
||||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||||
struct vc4_hvs *hvs = vc4->hvs;
|
struct vc4_hvs *hvs = vc4->hvs;
|
||||||
struct drm_crtc_state *old_crtc_state;
|
|
||||||
struct drm_crtc_state *new_crtc_state;
|
struct drm_crtc_state *new_crtc_state;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct vc4_hvs_state *old_hvs_state;
|
struct vc4_hvs_state *old_hvs_state;
|
||||||
|
unsigned int channel;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||||
@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
|
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vc4->hvs->hvs5)
|
|
||||||
clk_set_min_rate(hvs->core_clk, 500000000);
|
|
||||||
|
|
||||||
old_hvs_state = vc4_hvs_get_old_global_state(state);
|
old_hvs_state = vc4_hvs_get_old_global_state(state);
|
||||||
if (!old_hvs_state)
|
if (IS_ERR(old_hvs_state))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
|
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
|
||||||
struct vc4_crtc_state *vc4_crtc_state =
|
struct drm_crtc_commit *commit;
|
||||||
to_vc4_crtc_state(old_crtc_state);
|
|
||||||
unsigned int channel = vc4_crtc_state->assigned_channel;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!old_hvs_state->fifo_state[channel].in_use)
|
if (!old_hvs_state->fifo_state[channel].in_use)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
|
commit = old_hvs_state->fifo_state[channel].pending_commit;
|
||||||
|
if (!commit)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = drm_crtc_commit_wait(commit);
|
||||||
if (ret)
|
if (ret)
|
||||||
drm_err(dev, "Timed out waiting for commit\n");
|
drm_err(dev, "Timed out waiting for commit\n");
|
||||||
|
|
||||||
|
drm_crtc_commit_put(commit);
|
||||||
|
old_hvs_state->fifo_state[channel].pending_commit = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vc4->hvs->hvs5)
|
||||||
|
clk_set_min_rate(hvs->core_clk, 500000000);
|
||||||
|
|
||||||
drm_atomic_helper_commit_modeset_disables(dev, state);
|
drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||||
|
|
||||||
vc4_ctm_commit(vc4, state);
|
vc4_ctm_commit(vc4, state);
|
||||||
@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
hvs_state = vc4_hvs_get_new_global_state(state);
|
hvs_state = vc4_hvs_get_new_global_state(state);
|
||||||
if (!hvs_state)
|
if (WARN_ON(IS_ERR(hvs_state)))
|
||||||
return -EINVAL;
|
return PTR_ERR(hvs_state);
|
||||||
|
|
||||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
||||||
struct vc4_crtc_state *vc4_crtc_state =
|
struct vc4_crtc_state *vc4_crtc_state =
|
||||||
@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
|
|||||||
|
|
||||||
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
||||||
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
|
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
|
||||||
|
|
||||||
if (!old_state->fifo_state[i].pending_commit)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
state->fifo_state[i].pending_commit =
|
|
||||||
drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &state->base;
|
return &state->base;
|
||||||
@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
hvs_new_state = vc4_hvs_get_global_state(state);
|
hvs_new_state = vc4_hvs_get_global_state(state);
|
||||||
if (!hvs_new_state)
|
if (IS_ERR(hvs_new_state))
|
||||||
return -EINVAL;
|
return PTR_ERR(hvs_new_state);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
|
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
|
||||||
if (!hvs_new_state->fifo_state[i].in_use)
|
if (!hvs_new_state->fifo_state[i].in_use)
|
||||||
|
|||||||
@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
|
|||||||
schedule_work(&vgdev->config_changed_work);
|
schedule_work(&vgdev->config_changed_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __poll_t virtio_gpu_poll(struct file *filp,
|
|
||||||
struct poll_table_struct *wait)
|
|
||||||
{
|
|
||||||
struct drm_file *drm_file = filp->private_data;
|
|
||||||
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
|
|
||||||
struct drm_device *dev = drm_file->minor->dev;
|
|
||||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
|
||||||
struct drm_pending_event *e = NULL;
|
|
||||||
__poll_t mask = 0;
|
|
||||||
|
|
||||||
if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
|
|
||||||
return drm_poll(filp, wait);
|
|
||||||
|
|
||||||
poll_wait(filp, &drm_file->event_wait, wait);
|
|
||||||
|
|
||||||
if (!list_empty(&drm_file->event_list)) {
|
|
||||||
spin_lock_irq(&dev->event_lock);
|
|
||||||
e = list_first_entry(&drm_file->event_list,
|
|
||||||
struct drm_pending_event, link);
|
|
||||||
drm_file->event_space += e->event->length;
|
|
||||||
list_del(&e->link);
|
|
||||||
spin_unlock_irq(&dev->event_lock);
|
|
||||||
|
|
||||||
kfree(e);
|
|
||||||
mask |= EPOLLIN | EPOLLRDNORM;
|
|
||||||
}
|
|
||||||
|
|
||||||
return mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct virtio_device_id id_table[] = {
|
static struct virtio_device_id id_table[] = {
|
||||||
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
|
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
|
||||||
{ 0 },
|
{ 0 },
|
||||||
@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
|
|||||||
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
|
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
|
||||||
MODULE_AUTHOR("Alon Levy");
|
MODULE_AUTHOR("Alon Levy");
|
||||||
|
|
||||||
static const struct file_operations virtio_gpu_driver_fops = {
|
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
|
||||||
.owner = THIS_MODULE,
|
|
||||||
.open = drm_open,
|
|
||||||
.release = drm_release,
|
|
||||||
.unlocked_ioctl = drm_ioctl,
|
|
||||||
.compat_ioctl = drm_compat_ioctl,
|
|
||||||
.poll = virtio_gpu_poll,
|
|
||||||
.read = drm_read,
|
|
||||||
.llseek = noop_llseek,
|
|
||||||
.mmap = drm_gem_mmap
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct drm_driver driver = {
|
static const struct drm_driver driver = {
|
||||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
|
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
|
||||||
|
|||||||
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
|
|
||||||
struct virtio_gpu_fence_event {
|
struct virtio_gpu_fence_event {
|
||||||
struct drm_pending_event base;
|
struct drm_pending_event base;
|
||||||
struct drm_event event;
|
struct drm_event event;
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
|
|||||||
if (!e)
|
if (!e)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
|
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
|
||||||
e->event.length = sizeof(e->event);
|
e->event.length = sizeof(e->event);
|
||||||
|
|
||||||
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
|
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
|
||||||
|
|||||||
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct i2c_algorithm cbus_i2c_algo = {
|
static const struct i2c_algorithm cbus_i2c_algo = {
|
||||||
.smbus_xfer = cbus_i2c_smbus_xfer,
|
.smbus_xfer = cbus_i2c_smbus_xfer,
|
||||||
.functionality = cbus_i2c_func,
|
.smbus_xfer_atomic = cbus_i2c_smbus_xfer,
|
||||||
|
.functionality = cbus_i2c_func,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int cbus_i2c_remove(struct platform_device *pdev)
|
static int cbus_i2c_remove(struct platform_device *pdev)
|
||||||
|
|||||||
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
|
|||||||
if (!(ipd & REG_INT_MBRF))
|
if (!(ipd & REG_INT_MBRF))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* ack interrupt */
|
/* ack interrupt (read also produces a spurious START flag, clear it too) */
|
||||||
i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
|
i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
|
||||||
|
|
||||||
/* Can only handle a maximum of 32 bytes at a time */
|
/* Can only handle a maximum of 32 bytes at a time */
|
||||||
if (len > 32)
|
if (len > 32)
|
||||||
|
|||||||
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|||||||
{
|
{
|
||||||
struct stm32f7_i2c_dev *i2c_dev = data;
|
struct stm32f7_i2c_dev *i2c_dev = data;
|
||||||
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
||||||
|
struct stm32_i2c_dma *dma = i2c_dev->dma;
|
||||||
void __iomem *base = i2c_dev->base;
|
void __iomem *base = i2c_dev->base;
|
||||||
u32 status, mask;
|
u32 status, mask;
|
||||||
int ret = IRQ_HANDLED;
|
int ret = IRQ_HANDLED;
|
||||||
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|||||||
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
|
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
|
||||||
__func__, f7_msg->addr);
|
__func__, f7_msg->addr);
|
||||||
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
|
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
|
||||||
|
if (i2c_dev->use_dma) {
|
||||||
|
stm32f7_i2c_disable_dma_req(i2c_dev);
|
||||||
|
dmaengine_terminate_async(dma->chan_using);
|
||||||
|
}
|
||||||
f7_msg->result = -ENXIO;
|
f7_msg->result = -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|||||||
/* Clear STOP flag */
|
/* Clear STOP flag */
|
||||||
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
||||||
|
|
||||||
if (i2c_dev->use_dma) {
|
if (i2c_dev->use_dma && !f7_msg->result) {
|
||||||
ret = IRQ_WAKE_THREAD;
|
ret = IRQ_WAKE_THREAD;
|
||||||
} else {
|
} else {
|
||||||
i2c_dev->master_mode = false;
|
i2c_dev->master_mode = false;
|
||||||
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|||||||
if (f7_msg->stop) {
|
if (f7_msg->stop) {
|
||||||
mask = STM32F7_I2C_CR2_STOP;
|
mask = STM32F7_I2C_CR2_STOP;
|
||||||
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
|
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
|
||||||
} else if (i2c_dev->use_dma) {
|
} else if (i2c_dev->use_dma && !f7_msg->result) {
|
||||||
ret = IRQ_WAKE_THREAD;
|
ret = IRQ_WAKE_THREAD;
|
||||||
} else if (f7_msg->smbus) {
|
} else if (f7_msg->smbus) {
|
||||||
stm32f7_i2c_smbus_rep_start(i2c_dev);
|
stm32f7_i2c_smbus_rep_start(i2c_dev);
|
||||||
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
|
|||||||
if (!ret) {
|
if (!ret) {
|
||||||
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
|
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
|
||||||
stm32f7_i2c_disable_dma_req(i2c_dev);
|
stm32f7_i2c_disable_dma_req(i2c_dev);
|
||||||
dmaengine_terminate_all(dma->chan_using);
|
dmaengine_terminate_async(dma->chan_using);
|
||||||
f7_msg->result = -ETIMEDOUT;
|
f7_msg->result = -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
|
|||||||
/* Disable dma */
|
/* Disable dma */
|
||||||
if (i2c_dev->use_dma) {
|
if (i2c_dev->use_dma) {
|
||||||
stm32f7_i2c_disable_dma_req(i2c_dev);
|
stm32f7_i2c_disable_dma_req(i2c_dev);
|
||||||
dmaengine_terminate_all(dma->chan_using);
|
dmaengine_terminate_async(dma->chan_using);
|
||||||
}
|
}
|
||||||
|
|
||||||
i2c_dev->master_mode = false;
|
i2c_dev->master_mode = false;
|
||||||
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||||||
time_left = wait_for_completion_timeout(&i2c_dev->complete,
|
time_left = wait_for_completion_timeout(&i2c_dev->complete,
|
||||||
i2c_dev->adap.timeout);
|
i2c_dev->adap.timeout);
|
||||||
ret = f7_msg->result;
|
ret = f7_msg->result;
|
||||||
|
if (ret) {
|
||||||
|
if (i2c_dev->use_dma)
|
||||||
|
dmaengine_synchronize(dma->chan_using);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible that some unsent data have already been
|
||||||
|
* written into TXDR. To avoid sending old data in a
|
||||||
|
* further transfer, flush TXDR in case of any error
|
||||||
|
*/
|
||||||
|
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||||
|
i2c_dev->base + STM32F7_I2C_ISR);
|
||||||
|
goto pm_free;
|
||||||
|
}
|
||||||
|
|
||||||
if (!time_left) {
|
if (!time_left) {
|
||||||
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
|
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
|
||||||
i2c_dev->msg->addr);
|
i2c_dev->msg->addr);
|
||||||
if (i2c_dev->use_dma)
|
if (i2c_dev->use_dma)
|
||||||
dmaengine_terminate_all(dma->chan_using);
|
dmaengine_terminate_sync(dma->chan_using);
|
||||||
|
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
|||||||
timeout = wait_for_completion_timeout(&i2c_dev->complete,
|
timeout = wait_for_completion_timeout(&i2c_dev->complete,
|
||||||
i2c_dev->adap.timeout);
|
i2c_dev->adap.timeout);
|
||||||
ret = f7_msg->result;
|
ret = f7_msg->result;
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
if (i2c_dev->use_dma)
|
||||||
|
dmaengine_synchronize(dma->chan_using);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible that some unsent data have already been
|
||||||
|
* written into TXDR. To avoid sending old data in a
|
||||||
|
* further transfer, flush TXDR in case of any error
|
||||||
|
*/
|
||||||
|
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||||
|
i2c_dev->base + STM32F7_I2C_ISR);
|
||||||
goto pm_free;
|
goto pm_free;
|
||||||
|
}
|
||||||
|
|
||||||
if (!timeout) {
|
if (!timeout) {
|
||||||
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
|
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
|
||||||
if (i2c_dev->use_dma)
|
if (i2c_dev->use_dma)
|
||||||
dmaengine_terminate_all(dma->chan_using);
|
dmaengine_terminate_sync(dma->chan_using);
|
||||||
|
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
goto pm_free;
|
goto pm_free;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
|
|||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
|
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
|
||||||
|
|
||||||
|
static const struct spi_device_id b53_spi_ids[] = {
|
||||||
|
{ .name = "bcm5325" },
|
||||||
|
{ .name = "bcm5365" },
|
||||||
|
{ .name = "bcm5395" },
|
||||||
|
{ .name = "bcm5397" },
|
||||||
|
{ .name = "bcm5398" },
|
||||||
|
{ .name = "bcm53115" },
|
||||||
|
{ .name = "bcm53125" },
|
||||||
|
{ .name = "bcm53128" },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(spi, b53_spi_ids);
|
||||||
|
|
||||||
static struct spi_driver b53_spi_driver = {
|
static struct spi_driver b53_spi_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "b53-switch",
|
.name = "b53-switch",
|
||||||
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
|
|||||||
.probe = b53_spi_probe,
|
.probe = b53_spi_probe,
|
||||||
.remove = b53_spi_remove,
|
.remove = b53_spi_remove,
|
||||||
.shutdown = b53_spi_shutdown,
|
.shutdown = b53_spi_shutdown,
|
||||||
|
.id_table = b53_spi_ids,
|
||||||
};
|
};
|
||||||
|
|
||||||
module_spi_driver(b53_spi_driver);
|
module_spi_driver(b53_spi_driver);
|
||||||
|
|||||||
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
|
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
|
||||||
u16 status, u16 lpa,
|
u16 ctrl, u16 status, u16 lpa,
|
||||||
struct phylink_link_state *state)
|
struct phylink_link_state *state)
|
||||||
{
|
{
|
||||||
|
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
|
||||||
|
|
||||||
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
|
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
|
||||||
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
|
/* The Spped and Duplex Resolved register is 1 if AN is enabled
|
||||||
|
* and complete, or if AN is disabled. So with disabled AN we
|
||||||
|
* still get here on link up. But we want to set an_complete
|
||||||
|
* only if AN was enabled, thus we look at BMCR_ANENABLE.
|
||||||
|
* (According to 802.3-2008 section 22.2.4.2.10, we should be
|
||||||
|
* able to get this same value from BMSR_ANEGCAPABLE, but tests
|
||||||
|
* show that these Marvell PHYs don't conform to this part of
|
||||||
|
* the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
|
||||||
|
*/
|
||||||
|
state->an_complete = !!(ctrl & BMCR_ANENABLE);
|
||||||
state->duplex = status &
|
state->duplex = status &
|
||||||
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
|
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
|
||||||
DUPLEX_FULL : DUPLEX_HALF;
|
DUPLEX_FULL : DUPLEX_HALF;
|
||||||
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
|
|||||||
dev_err(chip->dev, "invalid PHY speed\n");
|
dev_err(chip->dev, "invalid PHY speed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
} else if (state->link &&
|
||||||
|
state->interface != PHY_INTERFACE_MODE_SGMII) {
|
||||||
|
/* If Speed and Duplex Resolved register is 0 and link is up, it
|
||||||
|
* means that AN was enabled, but link partner had it disabled
|
||||||
|
* and the PHY invoked the Auto-Negotiation Bypass feature and
|
||||||
|
* linked anyway.
|
||||||
|
*/
|
||||||
|
state->duplex = DUPLEX_FULL;
|
||||||
|
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
|
||||||
|
state->speed = SPEED_2500;
|
||||||
|
else
|
||||||
|
state->speed = SPEED_1000;
|
||||||
} else {
|
} else {
|
||||||
state->link = false;
|
state->link = false;
|
||||||
}
|
}
|
||||||
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
|
|||||||
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
|
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
|
||||||
int lane, struct phylink_link_state *state)
|
int lane, struct phylink_link_state *state)
|
||||||
{
|
{
|
||||||
u16 lpa, status;
|
u16 lpa, status, ctrl;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
|
||||||
|
if (err) {
|
||||||
|
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
err = mv88e6352_serdes_read(chip, 0x11, &status);
|
err = mv88e6352_serdes_read(chip, 0x11, &status);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
|
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
|
||||||
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
|
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
|
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
|
||||||
@@ -883,9 +912,16 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
|
|||||||
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
|
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
|
||||||
int port, int lane, struct phylink_link_state *state)
|
int port, int lane, struct phylink_link_state *state)
|
||||||
{
|
{
|
||||||
u16 lpa, status;
|
u16 lpa, status, ctrl;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||||
|
MV88E6390_SGMII_BMCR, &ctrl);
|
||||||
|
if (err) {
|
||||||
|
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||||
MV88E6390_SGMII_PHY_STATUS, &status);
|
MV88E6390_SGMII_PHY_STATUS, &status);
|
||||||
if (err) {
|
if (err) {
|
||||||
@@ -900,7 +936,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
|
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
|
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
|
||||||
@@ -1271,9 +1307,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
|
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
|
||||||
|
bool on)
|
||||||
{
|
{
|
||||||
u16 reg, pcs;
|
u16 reg;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||||
|
MV88E6393X_SERDES_CTRL1, ®);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (on)
|
||||||
|
reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
|
||||||
|
MV88E6393X_SERDES_CTRL1_RX_PDOWN);
|
||||||
|
else
|
||||||
|
reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
|
||||||
|
MV88E6393X_SERDES_CTRL1_RX_PDOWN;
|
||||||
|
|
||||||
|
return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
|
||||||
|
MV88E6393X_SERDES_CTRL1, reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
|
||||||
|
{
|
||||||
|
u16 reg;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* mv88e6393x family errata 4.6:
|
/* mv88e6393x family errata 4.6:
|
||||||
@@ -1284,26 +1342,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
|
|||||||
* It seems that after this workaround the SERDES is automatically
|
* It seems that after this workaround the SERDES is automatically
|
||||||
* powered up (the bit is cleared), so power it down.
|
* powered up (the bit is cleared), so power it down.
|
||||||
*/
|
*/
|
||||||
if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
|
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||||
lane == MV88E6393X_PORT10_LANE) {
|
MV88E6393X_SERDES_POC, ®);
|
||||||
err = mv88e6390_serdes_read(chip, lane,
|
if (err)
|
||||||
MDIO_MMD_PHYXS,
|
return err;
|
||||||
MV88E6393X_SERDES_POC, ®);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
reg &= ~MV88E6393X_SERDES_POC_PDOWN;
|
reg &= ~MV88E6393X_SERDES_POC_PDOWN;
|
||||||
reg |= MV88E6393X_SERDES_POC_RESET;
|
reg |= MV88E6393X_SERDES_POC_RESET;
|
||||||
|
|
||||||
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
|
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
|
||||||
MV88E6393X_SERDES_POC, reg);
|
MV88E6393X_SERDES_POC, reg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mv88e6390_serdes_power_sgmii(chip, lane, false);
|
err = mv88e6390_serdes_power_sgmii(chip, lane, false);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
|
||||||
|
return mv88e6393x_serdes_power_lane(chip, lane, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
|
||||||
|
{
|
||||||
|
u16 reg, pcs;
|
||||||
|
int err;
|
||||||
|
|
||||||
/* mv88e6393x family errata 4.8:
|
/* mv88e6393x family errata 4.8:
|
||||||
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may
|
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may
|
||||||
@@ -1334,38 +1411,149 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
|
|||||||
MV88E6393X_ERRATA_4_8_REG, reg);
|
MV88E6393X_ERRATA_4_8_REG, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
|
static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
|
||||||
|
u8 cmode)
|
||||||
{
|
{
|
||||||
|
static const struct {
|
||||||
|
u16 dev, reg, val, mask;
|
||||||
|
} fixes[] = {
|
||||||
|
{ MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
|
||||||
|
{ MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
|
||||||
|
{ MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
|
||||||
|
{ MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
|
||||||
|
{ MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
|
||||||
|
{ MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
|
||||||
|
{ MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
|
||||||
|
MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
|
||||||
|
};
|
||||||
|
int err, i;
|
||||||
|
u16 reg;
|
||||||
|
|
||||||
|
/* mv88e6393x family errata 5.2:
|
||||||
|
* For optimal signal integrity the following sequence should be applied
|
||||||
|
* to SERDES operating in 10G mode. These registers only apply to 10G
|
||||||
|
* operation and have no effect on other speeds.
|
||||||
|
*/
|
||||||
|
if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
|
||||||
|
err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
|
||||||
|
fixes[i].reg, ®);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
reg &= ~fixes[i].mask;
|
||||||
|
reg |= fixes[i].val;
|
||||||
|
|
||||||
|
err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
|
||||||
|
fixes[i].reg, reg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
|
||||||
|
int lane, u8 cmode, bool on)
|
||||||
|
{
|
||||||
|
u16 reg;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
|
if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Inband AN is broken on Amethyst in 2500base-x mode when set by
|
||||||
|
* standard mechanism (via cmode).
|
||||||
|
* We can get around this by configuring the PCS mode to 1000base-x
|
||||||
|
* and then writing value 0x58 to register 1e.8000. (This must be done
|
||||||
|
* while SerDes receiver and transmitter are disabled, which is, when
|
||||||
|
* this function is called.)
|
||||||
|
* It seem that when we do this configuration to 2500base-x mode (by
|
||||||
|
* changing PCS mode to 1000base-x and frequency to 3.125 GHz from
|
||||||
|
* 1.25 GHz) and then configure to sgmii or 1000base-x, the device
|
||||||
|
* thinks that it already has SerDes at 1.25 GHz and does not change
|
||||||
|
* the 1e.8000 register, leaving SerDes at 3.125 GHz.
|
||||||
|
* To avoid this, change PCS mode back to 2500base-x when disabling
|
||||||
|
* SerDes from 2500base-x mode.
|
||||||
|
*/
|
||||||
|
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||||
|
MV88E6393X_SERDES_POC, ®);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
|
reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
|
||||||
|
if (on)
|
||||||
|
reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
|
||||||
|
MV88E6393X_SERDES_POC_AN;
|
||||||
|
else
|
||||||
|
reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
|
||||||
|
reg |= MV88E6393X_SERDES_POC_RESET;
|
||||||
|
|
||||||
|
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
|
||||||
|
MV88E6393X_SERDES_POC, reg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
|
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
|
int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
|
||||||
bool on)
|
bool on)
|
||||||
{
|
{
|
||||||
u8 cmode = chip->ports[port].cmode;
|
u8 cmode = chip->ports[port].cmode;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (port != 0 && port != 9 && port != 10)
|
if (port != 0 && port != 9 && port != 10)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (on) {
|
||||||
|
err = mv88e6393x_serdes_erratum_4_8(chip, lane);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
|
||||||
|
true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_power_lane(chip, lane, true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
switch (cmode) {
|
switch (cmode) {
|
||||||
case MV88E6XXX_PORT_STS_CMODE_SGMII:
|
case MV88E6XXX_PORT_STS_CMODE_SGMII:
|
||||||
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
|
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
|
||||||
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
|
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
|
||||||
return mv88e6390_serdes_power_sgmii(chip, lane, on);
|
err = mv88e6390_serdes_power_sgmii(chip, lane, on);
|
||||||
|
break;
|
||||||
case MV88E6393X_PORT_STS_CMODE_5GBASER:
|
case MV88E6393X_PORT_STS_CMODE_5GBASER:
|
||||||
case MV88E6393X_PORT_STS_CMODE_10GBASER:
|
case MV88E6393X_PORT_STS_CMODE_10GBASER:
|
||||||
return mv88e6390_serdes_power_10g(chip, lane, on);
|
err = mv88e6390_serdes_power_10g(chip, lane, on);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (!on) {
|
||||||
|
err = mv88e6393x_serdes_power_lane(chip, lane, false);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user