diff --git a/Documentation/virt/kvm/s390/s390-diag.rst b/Documentation/virt/kvm/s390/s390-diag.rst
index ca85f030eb0b..3e4f9e3bef81 100644
--- a/Documentation/virt/kvm/s390/s390-diag.rst
+++ b/Documentation/virt/kvm/s390/s390-diag.rst
@@ -35,20 +35,24 @@ DIAGNOSE function codes not specific to KVM, please refer to the
 documentation for the s390 hypervisors defining them.
 
 
-DIAGNOSE function code 'X'500' - KVM virtio functions
------------------------------------------------------
+DIAGNOSE function code 'X'500' - KVM functions
+----------------------------------------------
 
-If the function code specifies 0x500, various virtio-related functions
-are performed.
+If the function code specifies 0x500, various KVM-specific functions
+are performed, including virtio functions.
 
-General register 1 contains the virtio subfunction code. Supported
-virtio subfunctions depend on KVM's userspace. Generally, userspace
-provides either s390-virtio (subcodes 0-2) or virtio-ccw (subcode 3).
+General register 1 contains the subfunction code. Supported subfunctions
+depend on KVM's userspace. Regarding virtio subfunctions, generally
+userspace provides either s390-virtio (subcodes 0-2) or virtio-ccw
+(subcode 3).
 
 Upon completion of the DIAGNOSE instruction, general register 2 contains
 the function's return code, which is either a return code or a subcode
 specific value.
 
+If the specified subfunction is not supported, a SPECIFICATION exception
+will be triggered.
+
 Subcode 0 - s390-virtio notification and early console printk
     Handled by userspace.
 
@@ -76,6 +80,23 @@ Subcode 3 - virtio-ccw notification
 
     See also the virtio standard for a discussion of this hypercall.
 
+Subcode 4 - storage-limit
+    Handled by userspace.
+
+    After completion of the DIAGNOSE call, general register 2 will
+    contain the storage limit: the maximum physical address that might be
+    used for storage throughout the lifetime of the VM.
+
+    The storage limit does not indicate currently usable storage, it may
+    include holes, standby storage and areas reserved for other means, such
+    as memory hotplug or virtio-mem devices. Other interfaces for detecting
+    actually usable storage, such as SCLP, must be used in conjunction with
+    this subfunction.
+
+    Note that the storage limit can be larger, but never smaller than the
+    maximum storage address indicated by SCLP via the "maximum storage
+    increment" and the "increment size".
+
 
 DIAGNOSE function code 'X'501 - KVM breakpoint
 ----------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index fa5e4f3ba76d..195a004e6ddf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -20224,6 +20224,16 @@ L:	linux-s390@vger.kernel.org
 S:	Supported
 F:	drivers/s390/cio/
 
+S390 CRYPTO MODULES, PRNG DRIVER, ARCH RANDOM
+M:	Harald Freudenberger <freude@linux.ibm.com>
+M:	Holger Dengler <dengler@linux.ibm.com>
+L:	linux-crypto@vger.kernel.org
+L:	linux-s390@vger.kernel.org
+S:	Supported
+F:	arch/s390/crypto/
+F:	arch/s390/include/asm/archrandom.h
+F:	arch/s390/include/asm/cpacf.h
+
 S390 DASD DRIVER
 M:	Stefan Haberland <sth@linux.ibm.com>
 M:	Jan Hoeppner <hoeppner@linux.ibm.com>
@@ -20233,6 +20243,14 @@ F:	block/partitions/ibm.c
 F:	drivers/s390/block/dasd*
 F:	include/linux/dasd_mod.h
 
+S390 HWRANDOM TRNG DRIVER
+M:	Harald Freudenberger <freude@linux.ibm.com>
+M:	Holger Dengler <dengler@linux.ibm.com>
+L:	linux-crypto@vger.kernel.org
+L:	linux-s390@vger.kernel.org
+S:	Supported
+F:	drivers/char/hw_random/s390-trng.c
+
 S390 IOMMU (PCI)
 M:	Niklas Schnelle <schnelle@linux.ibm.com>
 M:	Matthew Rosato <mjrosato@linux.ibm.com>
@@ -20314,10 +20332,16 @@ F:	arch/s390/kvm/pci*
 F:	drivers/vfio/pci/vfio_pci_zdev.c
 F:	include/uapi/linux/vfio_zdev.h
 
-S390 ZCRYPT DRIVER
+S390 ZCRYPT AND PKEY DRIVER AND AP BUS
 M:	Harald Freudenberger <freude@linux.ibm.com>
+M:	Holger Dengler <dengler@linux.ibm.com>
 L:	linux-s390@vger.kernel.org
 S:	Supported
+F:	arch/s390/include/asm/ap.h
+F:	arch/s390/include/asm/pkey.h
+F:	arch/s390/include/asm/trace/zcrypt.h
+F:	arch/s390/include/uapi/asm/pkey.h
+F:	arch/s390/include/uapi/asm/zcrypt.h
 F:	drivers/s390/crypto/
 
 S390 ZFCP DRIVER
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index cc1f9cffe2a5..a45259d4c0a5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -52,6 +52,13 @@ config KASAN_SHADOW_OFFSET
 	depends on KASAN
 	default 0x1C000000000000
 
+config GCC_ASM_FLAG_OUTPUT_BROKEN
+	def_bool CC_IS_GCC && GCC_VERSION < 140200
+	help
+	  GCC versions before 14.2.0 may die with an internal
+	  compiler error in some configurations if flag output
+	  operands are used within inline assemblies.
+
 config S390
 	def_bool y
 	#
@@ -224,6 +231,7 @@ config S390
 	select HAVE_VIRT_CPU_ACCOUNTING_IDLE
 	select IOMMU_HELPER		if PCI
 	select IOMMU_SUPPORT		if PCI
+	select LOCK_MM_AND_FIND_VMA
 	select MMU_GATHER_MERGE_VMAS
 	select MMU_GATHER_NO_GATHER
 	select MMU_GATHER_RCU_TABLE_FREE
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
index 1d131a81cb8b..7617aa2d2f7e 100644
--- a/arch/s390/boot/physmem_info.c
+++ b/arch/s390/boot/physmem_info.c
@@ -9,6 +9,7 @@
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sclp.h>
+#include <asm/asm.h>
 #include <asm/uv.h>
 #include "decompressor.h"
 #include "boot.h"
@@ -59,13 +60,13 @@ static int __diag260(unsigned long rx1, unsigned long rx2)
 {
 	unsigned long reg1, reg2, ry;
 	union register_pair rx;
+	int cc, exception;
 	psw_t old;
-	int rc;
 
 	rx.even = rx1;
 	rx.odd	= rx2;
 	ry = 0x10; /* storage configuration */
-	rc = -1;   /* fail */
+	exception = 1;
 	asm volatile(
 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
 		"	epsw	%[reg1],%[reg2]\n"
@@ -74,20 +75,22 @@ static int __diag260(unsigned long rx1, unsigned long rx2)
 		"	larl	%[reg1],1f\n"
 		"	stg	%[reg1],8(%[psw_pgm])\n"
 		"	diag	%[rx],%[ry],0x260\n"
-		"	ipm	%[rc]\n"
-		"	srl	%[rc],28\n"
+		"	lhi	%[exc],0\n"
 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
-		: [reg1] "=&d" (reg1),
+		CC_IPM(cc)
+		: CC_OUT(cc, cc),
+		  [exc] "+d" (exception),
+		  [reg1] "=&d" (reg1),
 		  [reg2] "=&a" (reg2),
-		  [rc] "+&d" (rc),
 		  [ry] "+&d" (ry),
 		  "+Q" (get_lowcore()->program_new_psw),
 		  "=Q" (old)
 		: [rx] "d" (rx.pair),
 		  [psw_old] "a" (&old),
 		  [psw_pgm] "a" (&get_lowcore()->program_new_psw)
-		: "cc", "memory");
-	return rc == 0 ? ry : -1;
+		: CC_CLOBBER_LIST("memory"));
+	cc = exception ? -1 : CC_TRANSFORM(cc);
+	return cc == 0 ? ry : -1;
 }
 
 static int diag260(void)
@@ -109,10 +112,12 @@ static int diag260(void)
 	return 0;
 }
 
-static int tprot(unsigned long addr)
+#define DIAG500_SC_STOR_LIMIT 4
+
+static int diag500_storage_limit(unsigned long *max_physmem_end)
 {
+	unsigned long storage_limit;
 	unsigned long reg1, reg2;
-	int rc = -EFAULT;
 	psw_t old;
 
 	asm volatile(
@@ -122,20 +127,57 @@ static int tprot(unsigned long addr)
 		"	st	%[reg2],4(%[psw_pgm])\n"
 		"	larl	%[reg1],1f\n"
 		"	stg	%[reg1],8(%[psw_pgm])\n"
-		"	tprot	0(%[addr]),0\n"
-		"	ipm	%[rc]\n"
-		"	srl	%[rc],28\n"
+		"	lghi	1,%[subcode]\n"
+		"	lghi	2,0\n"
+		"	diag	2,4,0x500\n"
 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
+		"	lgr	%[slimit],2\n"
 		: [reg1] "=&d" (reg1),
 		  [reg2] "=&a" (reg2),
-		  [rc] "+&d" (rc),
+		  [slimit] "=d" (storage_limit),
+		  "=Q" (get_lowcore()->program_new_psw),
+		  "=Q" (old)
+		: [psw_old] "a" (&old),
+		  [psw_pgm] "a" (&get_lowcore()->program_new_psw),
+		  [subcode] "i" (DIAG500_SC_STOR_LIMIT)
+		: "memory", "1", "2");
+	if (!storage_limit)
+		return -EINVAL;
+	/* Convert inclusive end to exclusive end */
+	*max_physmem_end = storage_limit + 1;
+	return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+	unsigned long reg1, reg2;
+	int cc, exception;
+	psw_t old;
+
+	exception = 1;
+	asm volatile(
+		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
+		"	epsw	%[reg1],%[reg2]\n"
+		"	st	%[reg1],0(%[psw_pgm])\n"
+		"	st	%[reg2],4(%[psw_pgm])\n"
+		"	larl	%[reg1],1f\n"
+		"	stg	%[reg1],8(%[psw_pgm])\n"
+		"	tprot	0(%[addr]),0\n"
+		"	lhi	%[exc],0\n"
+		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc),
+		  [exc] "+d" (exception),
+		  [reg1] "=&d" (reg1),
+		  [reg2] "=&a" (reg2),
 		  "=Q" (get_lowcore()->program_new_psw.addr),
 		  "=Q" (old)
 		: [psw_old] "a" (&old),
 		  [psw_pgm] "a" (&get_lowcore()->program_new_psw),
 		  [addr] "a" (addr)
-		: "cc", "memory");
-	return rc;
+		: CC_CLOBBER_LIST("memory"));
+	cc = exception ? -EFAULT : CC_TRANSFORM(cc);
+	return cc;
 }
 
 static unsigned long search_mem_end(void)
@@ -157,7 +199,9 @@ unsigned long detect_max_physmem_end(void)
 {
 	unsigned long max_physmem_end = 0;
 
-	if (!sclp_early_get_memsize(&max_physmem_end)) {
+	if (!diag500_storage_limit(&max_physmem_end)) {
+		physmem_info.info_source = MEM_DETECT_DIAG500_STOR_LIMIT;
+	} else if (!sclp_early_get_memsize(&max_physmem_end)) {
 		physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
 	} else {
 		max_physmem_end = search_mem_end();
@@ -170,6 +214,13 @@ void detect_physmem_online_ranges(unsigned long max_physmem_end)
 {
 	if (!sclp_early_read_storage_info()) {
 		physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
+	} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
+		unsigned long online_end;
+
+		if (!sclp_early_get_memsize(&online_end)) {
+			physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
+			add_physmem_online_range(0, online_end);
+		}
 	} else if (!diag260()) {
 		physmem_info.info_source = MEM_DETECT_DIAG260;
 	} else if (max_physmem_end) {
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index c8f149ad77e5..abe6e6c0ab98 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -182,12 +182,15 @@ static void kaslr_adjust_got(unsigned long offset)
  * Merge information from several sources into a single ident_map_size value.
  * "ident_map_size" represents the upper limit of physical memory we may ever
  * reach. It might not be all online memory, but also include standby (offline)
- * memory. "ident_map_size" could be lower then actual standby or even online
+ * memory or memory areas reserved for other means (e.g., memory devices such as
+ * virtio-mem).
+ *
+ * "ident_map_size" could be lower then actual standby/reserved or even online
  * memory present, due to limiting factors. We should never go above this limit.
  * It is the size of our identity mapping.
  *
  * Consider the following factors:
- * 1. max_physmem_end - end of physical memory online or standby.
+ * 1. max_physmem_end - end of physical memory online, standby or reserved.
  *    Always >= end of the last online memory range (get_physmem_online_end()).
  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
  *    kernel is able to support.
@@ -480,7 +483,7 @@ void startup_kernel(void)
 	 * __vmlinux_relocs_64_end as the lower range address. However,
 	 * .amode31 section is written to by the decompressed kernel - at
 	 * that time the contents of .vmlinux.relocs is not needed anymore.
-	 * Conversly, .vmlinux.relocs is read only by the decompressor, even
+	 * Conversely, .vmlinux.relocs is read only by the decompressor, even
 	 * before the kernel started. Therefore, in case the two sections
 	 * overlap there is no risk of corrupting any data.
 	 */
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 318e6ba95bfd..4568e8f81dac 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -22,8 +22,8 @@ void uv_query_info(void)
 	if (!test_facility(158))
 		return;
 
-	/* rc==0x100 means that there is additional data we do not process */
-	if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
+	/* Ignore that there might be more data we do not process */
+	if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != UVC_RC_MORE_DATA)
 		return;
 
 	if (IS_ENABLED(CONFIG_KVM)) {
@@ -46,7 +46,8 @@ void uv_query_info(void)
 		uv_info.supp_add_secret_req_ver = uvcb.supp_add_secret_req_ver;
 		uv_info.supp_add_secret_pcf = uvcb.supp_add_secret_pcf;
 		uv_info.supp_secret_types = uvcb.supp_secret_types;
-		uv_info.max_secrets = uvcb.max_secrets;
+		uv_info.max_assoc_secrets = uvcb.max_assoc_secrets;
+		uv_info.max_retr_secrets = uvcb.max_retr_secrets;
 	}
 
 	if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index fb0e9a1d9be2..d8d227ab82de 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -625,6 +625,7 @@ CONFIG_VFIO_PCI=m
 CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_MEM=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
@@ -810,6 +811,7 @@ CONFIG_PKEY=m
 CONFIG_PKEY_CCA=m
 CONFIG_PKEY_EP11=m
 CONFIG_PKEY_PCKMO=m
+CONFIG_PKEY_UV=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
 CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 88be0a734b60..6c2f2bb4fbf8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -615,6 +615,7 @@ CONFIG_VFIO_PCI=m
 CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_MEM=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
@@ -797,6 +798,7 @@ CONFIG_PKEY=m
 CONFIG_PKEY_CCA=m
 CONFIG_PKEY_EP11=m
 CONFIG_PKEY_PCKMO=m
+CONFIG_PKEY_UV=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
 CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index ef4491ccbbf8..511093713a6f 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -34,14 +34,22 @@
  * is called. As paes can handle different kinds of key blobs
  * and padding is also possible, the limits need to be generous.
  */
-#define PAES_MIN_KEYSIZE 16
-#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
+#define PAES_MIN_KEYSIZE	16
+#define PAES_MAX_KEYSIZE	MAXEP11AESKEYBLOBSIZE
+#define PAES_256_PROTKEY_SIZE	(32 + 32)	/* key + verification pattern */
+#define PXTS_256_PROTKEY_SIZE	(32 + 32 + 32)	/* k1 + k2 + verification pattern */
 
 static u8 *ctrblk;
 static DEFINE_MUTEX(ctrblk_lock);
 
 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 
+struct paes_protkey {
+	u32 type;
+	u32 len;
+	u8 protkey[PXTS_256_PROTKEY_SIZE];
+};
+
 struct key_blob {
 	/*
 	 * Small keys will be stored in the keybuf. Larger keys are
@@ -55,31 +63,43 @@ struct key_blob {
 	unsigned int keylen;
 };
 
-static inline int _key_to_kb(struct key_blob *kb,
-			     const u8 *key,
-			     unsigned int keylen)
+/*
+ * make_clrkey_token() - wrap the raw key ck with pkey clearkey token
+ * information.
+ * @returns the size of the clearkey token
+ */
+static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
 {
-	struct clearkey_header {
+	struct clrkey_token {
 		u8  type;
 		u8  res0[3];
 		u8  version;
 		u8  res1[3];
 		u32 keytype;
 		u32 len;
-	} __packed * h;
+		u8 key[];
+	} __packed *token = (struct clrkey_token *)dest;
 
+	token->type = 0x00;
+	token->version = 0x02;
+	token->keytype = (cklen - 8) >> 3;
+	token->len = cklen;
+	memcpy(token->key, ck, cklen);
+
+	return sizeof(*token) + cklen;
+}
+
+static inline int _key_to_kb(struct key_blob *kb,
+			     const u8 *key,
+			     unsigned int keylen)
+{
 	switch (keylen) {
 	case 16:
 	case 24:
 	case 32:
 		/* clear key value, prepare pkey clear key token in keybuf */
 		memset(kb->keybuf, 0, sizeof(kb->keybuf));
-		h = (struct clearkey_header *) kb->keybuf;
-		h->version = 0x02; /* TOKVER_CLEAR_KEY */
-		h->keytype = (keylen - 8) >> 3;
-		h->len = keylen;
-		memcpy(kb->keybuf + sizeof(*h), key, keylen);
-		kb->keylen = sizeof(*h) + keylen;
+		kb->keylen = make_clrkey_token(key, keylen, kb->keybuf);
 		kb->key = kb->keybuf;
 		break;
 	default:
@@ -99,6 +119,40 @@ static inline int _key_to_kb(struct key_blob *kb,
 	return 0;
 }
 
+static inline int _xts_key_to_kb(struct key_blob *kb,
+				 const u8 *key,
+				 unsigned int keylen)
+{
+	size_t cklen = keylen / 2;
+
+	memset(kb->keybuf, 0, sizeof(kb->keybuf));
+
+	switch (keylen) {
+	case 32:
+	case 64:
+		/* clear key value, prepare pkey clear key tokens in keybuf */
+		kb->key = kb->keybuf;
+		kb->keylen  = make_clrkey_token(key, cklen, kb->key);
+		kb->keylen += make_clrkey_token(key + cklen, cklen,
+						kb->key + kb->keylen);
+		break;
+	default:
+		/* other key material, let pkey handle this */
+		if (keylen <= sizeof(kb->keybuf)) {
+			kb->key = kb->keybuf;
+		} else {
+			kb->key = kmalloc(keylen, GFP_KERNEL);
+			if (!kb->key)
+				return -ENOMEM;
+		}
+		memcpy(kb->key, key, keylen);
+		kb->keylen = keylen;
+		break;
+	}
+
+	return 0;
+}
+
 static inline void _free_kb_keybuf(struct key_blob *kb)
 {
 	if (kb->key && kb->key != kb->keybuf
@@ -106,52 +160,53 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
 		kfree_sensitive(kb->key);
 		kb->key = NULL;
 	}
+	memzero_explicit(kb->keybuf, sizeof(kb->keybuf));
 }
 
 struct s390_paes_ctx {
 	struct key_blob kb;
-	struct pkey_protkey pk;
+	struct paes_protkey pk;
 	spinlock_t pk_lock;
 	unsigned long fc;
 };
 
 struct s390_pxts_ctx {
-	struct key_blob kb[2];
-	struct pkey_protkey pk[2];
+	struct key_blob kb;
+	struct paes_protkey pk[2];
 	spinlock_t pk_lock;
 	unsigned long fc;
 };
 
-static inline int __paes_keyblob2pkey(struct key_blob *kb,
-				     struct pkey_protkey *pk)
+static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen,
+				      struct paes_protkey *pk)
 {
-	int i, ret = -EIO;
+	int i, rc = -EIO;
 
 	/* try three times in case of busy card */
-	for (i = 0; ret && i < 3; i++) {
-		if (ret == -EBUSY && in_task()) {
+	for (i = 0; rc && i < 3; i++) {
+		if (rc == -EBUSY && in_task()) {
 			if (msleep_interruptible(1000))
 				return -EINTR;
 		}
-		ret = pkey_key2protkey(kb->key, kb->keylen,
-				       pk->protkey, &pk->len, &pk->type);
+		rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len,
+				      &pk->type);
 	}
 
-	return ret;
+	return rc;
 }
 
 static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
 {
-	int ret;
-	struct pkey_protkey pkey;
+	struct paes_protkey pk;
+	int rc;
 
-	pkey.len = sizeof(pkey.protkey);
-	ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
-	if (ret)
-		return ret;
+	pk.len = sizeof(pk.protkey);
+	rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk);
+	if (rc)
+		return rc;
 
 	spin_lock_bh(&ctx->pk_lock);
-	memcpy(&ctx->pk, &pkey, sizeof(pkey));
+	memcpy(&ctx->pk, &pk, sizeof(pk));
 	spin_unlock_bh(&ctx->pk_lock);
 
 	return 0;
@@ -176,8 +231,8 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
 
 static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
 {
-	int rc;
 	unsigned long fc;
+	int rc;
 
 	rc = __paes_convert_key(ctx);
 	if (rc)
@@ -197,8 +252,8 @@ static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			    unsigned int key_len)
 {
-	int rc;
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int rc;
 
 	_free_kb_keybuf(&ctx->kb);
 	rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -212,19 +267,19 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct {
+		u8 key[PAES_256_PROTKEY_SIZE];
+	} param;
 	struct skcipher_walk walk;
 	unsigned int nbytes, n, k;
-	int ret;
-	struct {
-		u8 key[MAXPROTKEYSIZE];
-	} param;
+	int rc;
 
-	ret = skcipher_walk_virt(&walk, req, false);
-	if (ret)
-		return ret;
+	rc = skcipher_walk_virt(&walk, req, false);
+	if (rc)
+		return rc;
 
 	spin_lock_bh(&ctx->pk_lock);
-	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 	spin_unlock_bh(&ctx->pk_lock);
 
 	while ((nbytes = walk.nbytes) != 0) {
@@ -233,16 +288,16 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 		k = cpacf_km(ctx->fc | modifier, &param,
 			     walk.dst.virt.addr, walk.src.virt.addr, n);
 		if (k)
-			ret = skcipher_walk_done(&walk, nbytes - k);
+			rc = skcipher_walk_done(&walk, nbytes - k);
 		if (k < n) {
 			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
 			spin_lock_bh(&ctx->pk_lock);
-			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
-	return ret;
+	return rc;
 }
 
 static int ecb_paes_encrypt(struct skcipher_request *req)
@@ -291,8 +346,8 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
 
 static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
 {
-	int rc;
 	unsigned long fc;
+	int rc;
 
 	rc = __paes_convert_key(ctx);
 	if (rc)
@@ -312,8 +367,8 @@ static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
 static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			    unsigned int key_len)
 {
-	int rc;
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int rc;
 
 	_free_kb_keybuf(&ctx->kb);
 	rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -327,21 +382,21 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct skcipher_walk walk;
-	unsigned int nbytes, n, k;
-	int ret;
 	struct {
 		u8 iv[AES_BLOCK_SIZE];
-		u8 key[MAXPROTKEYSIZE];
+		u8 key[PAES_256_PROTKEY_SIZE];
 	} param;
+	struct skcipher_walk walk;
+	unsigned int nbytes, n, k;
+	int rc;
 
-	ret = skcipher_walk_virt(&walk, req, false);
-	if (ret)
-		return ret;
+	rc = skcipher_walk_virt(&walk, req, false);
+	if (rc)
+		return rc;
 
 	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
 	spin_lock_bh(&ctx->pk_lock);
-	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 	spin_unlock_bh(&ctx->pk_lock);
 
 	while ((nbytes = walk.nbytes) != 0) {
@@ -351,17 +406,17 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 			      walk.dst.virt.addr, walk.src.virt.addr, n);
 		if (k) {
 			memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
-			ret = skcipher_walk_done(&walk, nbytes - k);
+			rc = skcipher_walk_done(&walk, nbytes - k);
 		}
 		if (k < n) {
 			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
 			spin_lock_bh(&ctx->pk_lock);
-			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
-	return ret;
+	return rc;
 }
 
 static int cbc_paes_encrypt(struct skcipher_request *req)
@@ -396,8 +451,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
 {
 	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	ctx->kb[0].key = NULL;
-	ctx->kb[1].key = NULL;
+	ctx->kb.key = NULL;
 	spin_lock_init(&ctx->pk_lock);
 
 	return 0;
@@ -407,24 +461,51 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
 {
 	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	_free_kb_keybuf(&ctx->kb[0]);
-	_free_kb_keybuf(&ctx->kb[1]);
+	_free_kb_keybuf(&ctx->kb);
 }
 
 static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
 {
-	struct pkey_protkey pkey0, pkey1;
+	struct paes_protkey pk0, pk1;
+	size_t split_keylen;
+	int rc;
 
-	pkey0.len = sizeof(pkey0.protkey);
-	pkey1.len = sizeof(pkey1.protkey);
+	pk0.len = sizeof(pk0.protkey);
+	pk1.len = sizeof(pk1.protkey);
 
-	if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
-	    __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+	rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0);
+	if (rc)
+		return rc;
+
+	switch (pk0.type) {
+	case PKEY_KEYTYPE_AES_128:
+	case PKEY_KEYTYPE_AES_256:
+		/* second keytoken required */
+		if (ctx->kb.keylen % 2)
+			return -EINVAL;
+		split_keylen = ctx->kb.keylen / 2;
+
+		rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen,
+					 split_keylen, &pk1);
+		if (rc)
+			return rc;
+
+		if (pk0.type != pk1.type)
+			return -EINVAL;
+		break;
+	case PKEY_KEYTYPE_AES_XTS_128:
+	case PKEY_KEYTYPE_AES_XTS_256:
+		/* single key */
+		pk1.type = 0;
+		break;
+	default:
+		/* unsupported protected keytype */
 		return -EINVAL;
+	}
 
 	spin_lock_bh(&ctx->pk_lock);
-	memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
-	memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+	ctx->pk[0] = pk0;
+	ctx->pk[1] = pk1;
 	spin_unlock_bh(&ctx->pk_lock);
 
 	return 0;
@@ -433,17 +514,30 @@ static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
 static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
 {
 	unsigned long fc;
+	int rc;
 
-	if (__xts_paes_convert_key(ctx))
-		return -EINVAL;
-
-	if (ctx->pk[0].type != ctx->pk[1].type)
-		return -EINVAL;
+	rc = __xts_paes_convert_key(ctx);
+	if (rc)
+		return rc;
 
 	/* Pick the correct function code based on the protected key type */
-	fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
-		(ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
-		CPACF_KM_PXTS_256 : 0;
+	switch (ctx->pk[0].type) {
+	case PKEY_KEYTYPE_AES_128:
+		fc = CPACF_KM_PXTS_128;
+		break;
+	case PKEY_KEYTYPE_AES_256:
+		fc = CPACF_KM_PXTS_256;
+		break;
+	case PKEY_KEYTYPE_AES_XTS_128:
+		fc = CPACF_KM_PXTS_128_FULL;
+		break;
+	case PKEY_KEYTYPE_AES_XTS_256:
+		fc = CPACF_KM_PXTS_256_FULL;
+		break;
+	default:
+		fc = 0;
+		break;
+	}
 
 	/* Check if the function code is available */
 	ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
@@ -452,24 +546,19 @@ static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
 }
 
 static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
-			    unsigned int xts_key_len)
+			    unsigned int in_keylen)
 {
-	int rc;
 	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	u8 ckey[2 * AES_MAX_KEY_SIZE];
-	unsigned int ckey_len, key_len;
+	unsigned int ckey_len;
+	int rc;
 
-	if (xts_key_len % 2)
+	if ((in_keylen == 32 || in_keylen == 64) &&
+	    xts_verify_key(tfm, in_key, in_keylen))
 		return -EINVAL;
 
-	key_len = xts_key_len / 2;
-
-	_free_kb_keybuf(&ctx->kb[0]);
-	_free_kb_keybuf(&ctx->kb[1]);
-	rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
-	if (rc)
-		return rc;
-	rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
+	_free_kb_keybuf(&ctx->kb);
+	rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen);
 	if (rc)
 		return rc;
 
@@ -477,6 +566,13 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 	if (rc)
 		return rc;
 
+	/*
+	 * It is not possible on a single protected key (e.g. full AES-XTS) to
+	 * check, if k1 and k2 are the same.
+	 */
+	if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 ||
+	    ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256)
+		return 0;
 	/*
 	 * xts_verify_key verifies the key length is not odd and makes
 	 * sure that the two keys are not the same. This can be done
@@ -489,28 +585,82 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 	return xts_verify_key(tfm, ckey, 2*ckey_len);
 }
 
-static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+static int paes_xts_crypt_full(struct skcipher_request *req,
+			       unsigned long modifier)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct skcipher_walk walk;
 	unsigned int keylen, offset, nbytes, n, k;
-	int ret;
 	struct {
-		u8 key[MAXPROTKEYSIZE];	/* key + verification pattern */
+		u8 key[64];
+		u8 tweak[16];
+		u8 nap[16];
+		u8 wkvp[32];
+	} fxts_param = {
+		.nap = {0},
+	};
+	struct skcipher_walk walk;
+	int rc;
+
+	rc = skcipher_walk_virt(&walk, req, false);
+	if (rc)
+		return rc;
+
+	keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
+	offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
+
+	spin_lock_bh(&ctx->pk_lock);
+	memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen);
+	memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
+	       sizeof(fxts_param.wkvp));
+	spin_unlock_bh(&ctx->pk_lock);
+	memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak));
+	fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+
+	while ((nbytes = walk.nbytes) != 0) {
+		/* only use complete blocks */
+		n = nbytes & ~(AES_BLOCK_SIZE - 1);
+		k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset,
+			     walk.dst.virt.addr, walk.src.virt.addr, n);
+		if (k)
+			rc = skcipher_walk_done(&walk, nbytes - k);
+		if (k < n) {
+			if (__xts_paes_convert_key(ctx))
+				return skcipher_walk_done(&walk, -EIO);
+			spin_lock_bh(&ctx->pk_lock);
+			memcpy(fxts_param.key + offset, ctx->pk[0].protkey,
+			       keylen);
+			memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
+			       sizeof(fxts_param.wkvp));
+			spin_unlock_bh(&ctx->pk_lock);
+		}
+	}
+
+	return rc;
+}
+
+static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+	unsigned int keylen, offset, nbytes, n, k;
+	struct {
+		u8 key[PAES_256_PROTKEY_SIZE];
 		u8 tweak[16];
 		u8 block[16];
 		u8 bit[16];
 		u8 xts[16];
 	} pcc_param;
 	struct {
-		u8 key[MAXPROTKEYSIZE];	/* key + verification pattern */
+		u8 key[PAES_256_PROTKEY_SIZE];
 		u8 init[16];
 	} xts_param;
+	struct skcipher_walk walk;
+	int rc;
 
-	ret = skcipher_walk_virt(&walk, req, false);
-	if (ret)
-		return ret;
+	rc = skcipher_walk_virt(&walk, req, false);
+	if (rc)
+		return rc;
 
 	keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
 	offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
@@ -530,7 +680,7 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 		k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
 			     walk.dst.virt.addr, walk.src.virt.addr, n);
 		if (k)
-			ret = skcipher_walk_done(&walk, nbytes - k);
+			rc = skcipher_walk_done(&walk, nbytes - k);
 		if (k < n) {
 			if (__xts_paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
@@ -541,7 +691,24 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
 		}
 	}
 
-	return ret;
+	return rc;
+}
+
+static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	switch (ctx->fc) {
+	case CPACF_KM_PXTS_128:
+	case CPACF_KM_PXTS_256:
+		return paes_xts_crypt(req, modifier);
+	case CPACF_KM_PXTS_128_FULL:
+	case CPACF_KM_PXTS_256_FULL:
+		return paes_xts_crypt_full(req, modifier);
+	default:
+		return -EINVAL;
+	}
 }
 
 static int xts_paes_encrypt(struct skcipher_request *req)
@@ -591,8 +758,8 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
 
 static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
 {
-	int rc;
 	unsigned long fc;
+	int rc;
 
 	rc = __paes_convert_key(ctx);
 	if (rc)
@@ -613,8 +780,8 @@ static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
 static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			    unsigned int key_len)
 {
-	int rc;
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int rc;
 
 	_free_kb_keybuf(&ctx->kb);
 	rc = _key_to_kb(&ctx->kb, in_key, key_len);
@@ -644,19 +811,19 @@ static int ctr_paes_crypt(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	u8 buf[AES_BLOCK_SIZE], *ctrptr;
+	struct {
+		u8 key[PAES_256_PROTKEY_SIZE];
+	} param;
 	struct skcipher_walk walk;
 	unsigned int nbytes, n, k;
-	int ret, locked;
-	struct {
-		u8 key[MAXPROTKEYSIZE];
-	} param;
+	int rc, locked;
 
-	ret = skcipher_walk_virt(&walk, req, false);
-	if (ret)
-		return ret;
+	rc = skcipher_walk_virt(&walk, req, false);
+	if (rc)
+		return rc;
 
 	spin_lock_bh(&ctx->pk_lock);
-	memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 	spin_unlock_bh(&ctx->pk_lock);
 
 	locked = mutex_trylock(&ctrblk_lock);
@@ -673,7 +840,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
 				memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
 				       AES_BLOCK_SIZE);
 			crypto_inc(walk.iv, AES_BLOCK_SIZE);
-			ret = skcipher_walk_done(&walk, nbytes - k);
+			rc = skcipher_walk_done(&walk, nbytes - k);
 		}
 		if (k < n) {
 			if (__paes_convert_key(ctx)) {
@@ -682,7 +849,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
 				return skcipher_walk_done(&walk, -EIO);
 			}
 			spin_lock_bh(&ctx->pk_lock);
-			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 			spin_unlock_bh(&ctx->pk_lock);
 		}
 	}
@@ -702,15 +869,15 @@ static int ctr_paes_crypt(struct skcipher_request *req)
 			if (__paes_convert_key(ctx))
 				return skcipher_walk_done(&walk, -EIO);
 			spin_lock_bh(&ctx->pk_lock);
-			memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
 			spin_unlock_bh(&ctx->pk_lock);
 		}
 		memcpy(walk.dst.virt.addr, buf, nbytes);
 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
-		ret = skcipher_walk_done(&walk, nbytes);
+		rc = skcipher_walk_done(&walk, nbytes);
 	}
 
-	return ret;
+	return rc;
 }
 
 static struct skcipher_alg ctr_paes_alg = {
@@ -750,7 +917,7 @@ static void paes_s390_fini(void)
 
 static int __init paes_s390_init(void)
 {
-	int ret;
+	int rc;
 
 	/* Query available functions for KM, KMC and KMCTR */
 	cpacf_query(CPACF_KM, &km_functions);
@@ -760,23 +927,23 @@ static int __init paes_s390_init(void)
 	if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
 	    cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
 	    cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
-		ret = crypto_register_skcipher(&ecb_paes_alg);
-		if (ret)
+		rc = crypto_register_skcipher(&ecb_paes_alg);
+		if (rc)
 			goto out_err;
 	}
 
 	if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
 	    cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
 	    cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
-		ret = crypto_register_skcipher(&cbc_paes_alg);
-		if (ret)
+		rc = crypto_register_skcipher(&cbc_paes_alg);
+		if (rc)
 			goto out_err;
 	}
 
 	if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
 	    cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
-		ret = crypto_register_skcipher(&xts_paes_alg);
-		if (ret)
+		rc = crypto_register_skcipher(&xts_paes_alg);
+		if (rc)
 			goto out_err;
 	}
 
@@ -785,18 +952,18 @@ static int __init paes_s390_init(void)
 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
 		if (!ctrblk) {
-			ret = -ENOMEM;
+			rc = -ENOMEM;
 			goto out_err;
 		}
-		ret = crypto_register_skcipher(&ctr_paes_alg);
-		if (ret)
+		rc = crypto_register_skcipher(&ctr_paes_alg);
+		if (rc)
 			goto out_err;
 	}
 
 	return 0;
 out_err:
 	paes_s390_fini();
-	return ret;
+	return rc;
 }
 
 module_init(paes_s390_init);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index a077087bc6cc..2becd77df741 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -679,7 +679,7 @@ static ssize_t prng_chunksize_show(struct device *dev,
 				   struct device_attribute *attr,
 				   char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+	return sysfs_emit(buf, "%u\n", prng_chunk_size);
 }
 static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
 
@@ -698,7 +698,7 @@ static ssize_t prng_counter_show(struct device *dev,
 		counter = prng_data->prngws.byte_counter;
 	mutex_unlock(&prng_data->mutex);
 
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", counter);
+	return sysfs_emit(buf, "%llu\n", counter);
 }
 static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
 
@@ -707,7 +707,7 @@ static ssize_t prng_errorflag_show(struct device *dev,
 				   struct device_attribute *attr,
 				   char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+	return sysfs_emit(buf, "%d\n", prng_errorflag);
 }
 static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
 
@@ -717,9 +717,9 @@ static ssize_t prng_mode_show(struct device *dev,
 			      char *buf)
 {
 	if (prng_mode == PRNG_MODE_TDES)
-		return scnprintf(buf, PAGE_SIZE, "TDES\n");
+		return sysfs_emit(buf, "TDES\n");
 	else
-		return scnprintf(buf, PAGE_SIZE, "SHA512\n");
+		return sysfs_emit(buf, "SHA512\n");
 }
 static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
 
@@ -742,7 +742,7 @@ static ssize_t prng_reseed_limit_show(struct device *dev,
 				      struct device_attribute *attr,
 				      char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+	return sysfs_emit(buf, "%u\n", prng_reseed_limit);
 }
 static ssize_t prng_reseed_limit_store(struct device *dev,
 				       struct device_attribute *attr,
@@ -773,7 +773,7 @@ static ssize_t prng_strength_show(struct device *dev,
 				  struct device_attribute *attr,
 				  char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "256\n");
+	return sysfs_emit(buf, "256\n");
 }
 static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
 
diff --git a/arch/s390/include/asm/asm.h b/arch/s390/include/asm/asm.h
new file mode 100644
index 000000000000..ec011b94af2a
--- /dev/null
+++ b/arch/s390/include/asm/asm.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ASM_H
+#define _ASM_S390_ASM_H
+
+#include <linux/stringify.h>
+
+/*
+ * Helper macros to be used for flag output operand handling.
+ * Inline assemblies must use four of the five supplied macros:
+ *
+ * Use CC_IPM(sym) at the end of the inline assembly; this extracts the
+ * condition code and program mask with the ipm instruction and writes it to
+ * the variable with symbolic name [sym] if the compiler has no support for
+ * flag output operands. If the compiler has support for flag output operands
+ * this generates no code.
+ *
+ * Use CC_OUT(sym, var) at the output operand list of an inline assembly. This
+ * defines an output operand with symbolic name [sym] for the variable
+ * [var]. [var] must be an int variable and [sym] must be identical with [sym]
+ * used with CC_IPM().
+ *
+ * Use either CC_CLOBBER or CC_CLOBBER_LIST() for the clobber list. Use
+ * CC_CLOBBER if the clobber list contains only "cc", otherwise use
+ * CC_CLOBBER_LIST() and add all clobbers as argument to the macro.
+ *
+ * Use CC_TRANSFORM() to convert the variable [var] which contains the
+ * extracted condition code. If the condition code is extracted with ipm, the
+ * [var] also contains the program mask. CC_TRANSFORM() moves the condition
+ * code to the two least significant bits and sets all other bits to zero.
+ */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_GCC_ASM_FLAG_OUTPUT_BROKEN))
+
+#define __HAVE_ASM_FLAG_OUTPUTS__
+
+#define CC_IPM(sym)
+#define CC_OUT(sym, var)	"=@cc" (var)
+#define CC_TRANSFORM(cc)	({ cc; })
+#define CC_CLOBBER
+#define CC_CLOBBER_LIST(...)	__VA_ARGS__
+
+#else
+
+#define CC_IPM(sym)		"	ipm	%[" __stringify(sym) "]\n"
+#define CC_OUT(sym, var)	[sym] "=d" (var)
+#define CC_TRANSFORM(cc)	({ (cc) >> 28; })
+#define CC_CLOBBER		"cc"
+#define CC_CLOBBER_LIST(...)	"cc", __VA_ARGS__
+
+#endif
+
+#endif /* _ASM_S390_ASM_H */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 0c4cad7d5a5b..6723fca64018 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -72,14 +72,24 @@ ATOMIC_OPS(xor)
 #define arch_atomic_fetch_or		arch_atomic_fetch_or
 #define arch_atomic_fetch_xor		arch_atomic_fetch_xor
 
-#define arch_atomic_xchg(v, new)	(arch_xchg(&((v)->counter), new))
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
+{
+	return arch_xchg(&v->counter, new);
+}
+#define arch_atomic_xchg arch_atomic_xchg
 
 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 {
-	return __atomic_cmpxchg(&v->counter, old, new);
+	return arch_cmpxchg(&v->counter, old, new);
 }
 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
 
+static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+	return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+
 #define ATOMIC64_INIT(i)  { (i) }
 
 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
@@ -112,14 +122,24 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
 }
 #define arch_atomic64_add arch_atomic64_add
 
-#define arch_atomic64_xchg(v, new)	(arch_xchg(&((v)->counter), new))
+static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
+{
+	return arch_xchg(&v->counter, new);
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
 
 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 {
-	return __atomic64_cmpxchg((long *)&v->counter, old, new);
+	return arch_cmpxchg(&v->counter, old, new);
 }
 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
 
+static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+	return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+
 #define ATOMIC64_OPS(op)							\
 static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
 {										\
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 65380da9e75f..1d6b2056fad8 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -169,79 +169,4 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
 
 #endif /* MARCH_HAS_Z196_FEATURES */
 
-static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
-{
-	asm volatile(
-		"	cs	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+Q" (*ptr)
-		: [new] "d" (new)
-		: "cc", "memory");
-	return old;
-}
-
-static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
-{
-	asm volatile(
-		"	csg	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+QS" (*ptr)
-		: [new] "d" (new)
-		: "cc", "memory");
-	return old;
-}
-
-/* GCC versions before 14.2.0 may die with an ICE in some configurations. */
-#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
-
-static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
-{
-	int cc;
-
-	asm volatile(
-		"	cs	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+Q" (*ptr), "=@cc" (cc)
-		: [new] "d" (new)
-		: "memory");
-	return cc == 0;
-}
-
-static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
-{
-	int cc;
-
-	asm volatile(
-		"	csg	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+QS" (*ptr), "=@cc" (cc)
-		: [new] "d" (new)
-		: "memory");
-	return cc == 0;
-}
-
-#else /* __GCC_ASM_FLAG_OUTPUTS__ */
-
-static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
-{
-	int old_expected = old;
-
-	asm volatile(
-		"	cs	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+Q" (*ptr)
-		: [new] "d" (new)
-		: "cc", "memory");
-	return old == old_expected;
-}
-
-static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
-{
-	long old_expected = old;
-
-	asm volatile(
-		"	csg	%[old],%[new],%[ptr]"
-		: [old] "+d" (old), [ptr] "+QS" (*ptr)
-		: [new] "d" (new)
-		: "cc", "memory");
-	return old == old_expected;
-}
-
-#endif /* __GCC_ASM_FLAG_OUTPUTS__ */
-
 #endif /* __ARCH_S390_ATOMIC_OPS__  */
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index aae0315374de..a9e2006033b7 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,185 +11,231 @@
 #include <linux/mmdebug.h>
 #include <linux/types.h>
 #include <linux/bug.h>
+#include <asm/asm.h>
+
+void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
+{
+	asm volatile(
+		"	cs	%[old],%[new],%[ptr]\n"
+		: [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
+		: [new] "d" (new)
+		: "memory", "cc");
+	return old;
+}
+
+static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
+{
+	asm volatile(
+		"	csg	%[old],%[new],%[ptr]\n"
+		: [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
+		: [new] "d" (new)
+		: "memory", "cc");
+	return old;
+}
+
+static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new)
+{
+	union {
+		u8 b[4];
+		u32 w;
+	} old32, new32;
+	u32 prev;
+	int i;
+
+	i = ptr & 3;
+	ptr &= ~0x3;
+	prev = READ_ONCE(*(u32 *)ptr);
+	do {
+		old32.w = prev;
+		if (old32.b[i] != old)
+			return old32.b[i];
+		new32.w = old32.w;
+		new32.b[i] = new;
+		prev = __cs_asm(ptr, old32.w, new32.w);
+	} while (prev != old32.w);
+	return old;
+}
+
+static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new)
+{
+	union {
+		u16 b[2];
+		u32 w;
+	} old32, new32;
+	u32 prev;
+	int i;
+
+	i = (ptr & 3) >> 1;
+	ptr &= ~0x3;
+	prev = READ_ONCE(*(u32 *)ptr);
+	do {
+		old32.w = prev;
+		if (old32.b[i] != old)
+			return old32.b[i];
+		new32.w = old32.w;
+		new32.b[i] = new;
+		prev = __cs_asm(ptr, old32.w, new32.w);
+	} while (prev != old32.w);
+	return old;
+}
+
+static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
+{
+	switch (size) {
+	case 1:	 return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff);
+	case 2:  return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff);
+	case 4:  return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff);
+	case 8:  return __csg_asm(ptr, old, new);
+	default: __cmpxchg_called_with_bad_pointer();
+	}
+	return old;
+}
+
+#define arch_cmpxchg(ptr, o, n)						\
+({									\
+	(__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr),	\
+					   (unsigned long)(o),		\
+					   (unsigned long)(n),		\
+					   sizeof(*(ptr)));		\
+})
+
+#define arch_cmpxchg64		arch_cmpxchg
+#define arch_cmpxchg_local	arch_cmpxchg
+#define arch_cmpxchg64_local	arch_cmpxchg
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+#define arch_try_cmpxchg(ptr, oldp, new)				\
+({									\
+	__typeof__(ptr) __oldp = (__typeof__(ptr))(oldp);		\
+	__typeof__(*(ptr)) __old = *__oldp;				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __prev;					\
+	int __cc;							\
+									\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+	case 2: {							\
+		__prev = arch_cmpxchg((ptr), (__old), (__new));		\
+		__cc = (__prev != __old);				\
+		if (unlikely(__cc))					\
+			*__oldp = __prev;				\
+		break;							\
+	}								\
+	case 4:	{							\
+		asm volatile(						\
+			"	cs	%[__old],%[__new],%[__ptr]\n"	\
+			: [__old] "+d" (*__oldp),			\
+			  [__ptr] "+Q" (*(ptr)),			\
+			  "=@cc" (__cc)					\
+			: [__new] "d" (__new)				\
+			: "memory");					\
+		break;							\
+	}								\
+	case 8:	{							\
+		 asm volatile(						\
+			 "	csg	%[__old],%[__new],%[__ptr]\n"	\
+			 : [__old] "+d" (*__oldp),			\
+			   [__ptr] "+QS" (*(ptr)),			\
+			   "=@cc" (__cc)				\
+			 : [__new] "d" (__new)				\
+			 : "memory");					\
+		 break;							\
+	}								\
+	default:							\
+		__cmpxchg_called_with_bad_pointer();			\
+	}								\
+	likely(__cc == 0);						\
+})
+
+#else /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
+#define arch_try_cmpxchg(ptr, oldp, new)				\
+({									\
+	__typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp);		\
+	__typeof__(*(ptr)) __old = *__oldp;				\
+	__typeof__(*(ptr)) __new = (new);				\
+	__typeof__(*(ptr)) __prev;					\
+									\
+	__prev = arch_cmpxchg((ptr), (__old), (__new));			\
+	if (unlikely(__prev != __old))					\
+		*__oldp = __prev;					\
+	likely(__prev == __old);					\
+})
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
+#define arch_try_cmpxchg64		arch_try_cmpxchg
+#define arch_try_cmpxchg_local		arch_try_cmpxchg
+#define arch_try_cmpxchg64_local	arch_try_cmpxchg
 
 void __xchg_called_with_bad_pointer(void);
 
-static __always_inline unsigned long
-__arch_xchg(unsigned long x, unsigned long address, int size)
+static inline u8 __arch_xchg1(u64 ptr, u8 x)
 {
-	unsigned long old;
-	int shift;
+	int shift = (3 ^ (ptr & 3)) << 3;
+	u32 mask, old, new;
 
+	ptr &= ~0x3;
+	mask = ~(0xff << shift);
+	old = READ_ONCE(*(u32 *)ptr);
+	do {
+		new = old & mask;
+		new |= x << shift;
+	} while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+	return old >> shift;
+}
+
+static inline u16 __arch_xchg2(u64 ptr, u16 x)
+{
+	int shift = (2 ^ (ptr & 2)) << 3;
+	u32 mask, old, new;
+
+	ptr &= ~0x3;
+	mask = ~(0xffff << shift);
+	old = READ_ONCE(*(u32 *)ptr);
+	do {
+		new = old & mask;
+		new |= x << shift;
+	} while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+	return old >> shift;
+}
+
+static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
+{
 	switch (size) {
 	case 1:
-		shift = (3 ^ (address & 3)) << 3;
-		address ^= address & 3;
-		asm volatile(
-			"       l       %0,%1\n"
-			"0:     lr      0,%0\n"
-			"       nr      0,%3\n"
-			"       or      0,%2\n"
-			"       cs      %0,0,%1\n"
-			"       jl      0b\n"
-			: "=&d" (old), "+Q" (*(int *) address)
-			: "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
-			: "memory", "cc", "0");
-		return old >> shift;
+		return __arch_xchg1(ptr, x & 0xff);
 	case 2:
-		shift = (2 ^ (address & 2)) << 3;
-		address ^= address & 2;
-		asm volatile(
-			"       l       %0,%1\n"
-			"0:     lr      0,%0\n"
-			"       nr      0,%3\n"
-			"       or      0,%2\n"
-			"       cs      %0,0,%1\n"
-			"       jl      0b\n"
-			: "=&d" (old), "+Q" (*(int *) address)
-			: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
-			: "memory", "cc", "0");
-		return old >> shift;
-	case 4:
-		asm volatile(
-			"       l       %0,%1\n"
-			"0:     cs      %0,%2,%1\n"
-			"       jl      0b\n"
-			: "=&d" (old), "+Q" (*(int *) address)
-			: "d" (x)
-			: "memory", "cc");
+		return __arch_xchg2(ptr, x & 0xffff);
+	case 4: {
+		u32 old = READ_ONCE(*(u32 *)ptr);
+
+		do {
+		} while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff));
 		return old;
-	case 8:
-		asm volatile(
-			"       lg      %0,%1\n"
-			"0:     csg     %0,%2,%1\n"
-			"       jl      0b\n"
-			: "=&d" (old), "+QS" (*(long *) address)
-			: "d" (x)
-			: "memory", "cc");
+	}
+	case 8: {
+		u64 old = READ_ONCE(*(u64 *)ptr);
+
+		do {
+		} while (!arch_try_cmpxchg((u64 *)ptr, &old, x));
 		return old;
 	}
+	}
 	__xchg_called_with_bad_pointer();
 	return x;
 }
 
 #define arch_xchg(ptr, x)						\
 ({									\
-	__typeof__(*(ptr)) __ret;					\
-									\
-	__ret = (__typeof__(*(ptr)))					\
-		__arch_xchg((unsigned long)(x), (unsigned long)(ptr),	\
-			    sizeof(*(ptr)));				\
-	__ret;								\
+	(__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr),		\
+					(unsigned long)(x),		\
+					sizeof(*(ptr)));		\
 })
 
-void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long __cmpxchg(unsigned long address,
-					       unsigned long old,
-					       unsigned long new, int size)
-{
-	switch (size) {
-	case 1: {
-		unsigned int prev, shift, mask;
-
-		shift = (3 ^ (address & 3)) << 3;
-		address ^= address & 3;
-		old = (old & 0xff) << shift;
-		new = (new & 0xff) << shift;
-		mask = ~(0xff << shift);
-		asm volatile(
-			"	l	%[prev],%[address]\n"
-			"	nr	%[prev],%[mask]\n"
-			"	xilf	%[mask],0xffffffff\n"
-			"	or	%[new],%[prev]\n"
-			"	or	%[prev],%[tmp]\n"
-			"0:	lr	%[tmp],%[prev]\n"
-			"	cs	%[prev],%[new],%[address]\n"
-			"	jnl	1f\n"
-			"	xr	%[tmp],%[prev]\n"
-			"	xr	%[new],%[tmp]\n"
-			"	nr	%[tmp],%[mask]\n"
-			"	jz	0b\n"
-			"1:"
-			: [prev] "=&d" (prev),
-			  [address] "+Q" (*(int *)address),
-			  [tmp] "+&d" (old),
-			  [new] "+&d" (new),
-			  [mask] "+&d" (mask)
-			:: "memory", "cc");
-		return prev >> shift;
-	}
-	case 2: {
-		unsigned int prev, shift, mask;
-
-		shift = (2 ^ (address & 2)) << 3;
-		address ^= address & 2;
-		old = (old & 0xffff) << shift;
-		new = (new & 0xffff) << shift;
-		mask = ~(0xffff << shift);
-		asm volatile(
-			"	l	%[prev],%[address]\n"
-			"	nr	%[prev],%[mask]\n"
-			"	xilf	%[mask],0xffffffff\n"
-			"	or	%[new],%[prev]\n"
-			"	or	%[prev],%[tmp]\n"
-			"0:	lr	%[tmp],%[prev]\n"
-			"	cs	%[prev],%[new],%[address]\n"
-			"	jnl	1f\n"
-			"	xr	%[tmp],%[prev]\n"
-			"	xr	%[new],%[tmp]\n"
-			"	nr	%[tmp],%[mask]\n"
-			"	jz	0b\n"
-			"1:"
-			: [prev] "=&d" (prev),
-			  [address] "+Q" (*(int *)address),
-			  [tmp] "+&d" (old),
-			  [new] "+&d" (new),
-			  [mask] "+&d" (mask)
-			:: "memory", "cc");
-		return prev >> shift;
-	}
-	case 4: {
-		unsigned int prev = old;
-
-		asm volatile(
-			"	cs	%[prev],%[new],%[address]\n"
-			: [prev] "+&d" (prev),
-			  [address] "+Q" (*(int *)address)
-			: [new] "d" (new)
-			: "memory", "cc");
-		return prev;
-	}
-	case 8: {
-		unsigned long prev = old;
-
-		asm volatile(
-			"	csg	%[prev],%[new],%[address]\n"
-			: [prev] "+&d" (prev),
-			  [address] "+QS" (*(long *)address)
-			: [new] "d" (new)
-			: "memory", "cc");
-		return prev;
-	}
-	}
-	__cmpxchg_called_with_bad_pointer();
-	return old;
-}
-
-#define arch_cmpxchg(ptr, o, n)						\
-({									\
-	__typeof__(*(ptr)) __ret;					\
-									\
-	__ret = (__typeof__(*(ptr)))					\
-		__cmpxchg((unsigned long)(ptr), (unsigned long)(o),	\
-			  (unsigned long)(n), sizeof(*(ptr)));		\
-	__ret;								\
-})
-
-#define arch_cmpxchg64		arch_cmpxchg
-#define arch_cmpxchg_local	arch_cmpxchg
-#define arch_cmpxchg64_local	arch_cmpxchg
-
 #define system_has_cmpxchg128()		1
 
 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
@@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n
 }
 
 #define arch_cmpxchg128		arch_cmpxchg128
+#define arch_cmpxchg128_local	arch_cmpxchg128
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new)
+{
+	int cc;
+
+	asm volatile(
+		"	cdsg	%[old],%[new],%[ptr]\n"
+		: [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
+		: [new] "d" (new)
+		: "memory");
+	return likely(cc == 0);
+}
+
+#define arch_try_cmpxchg128		arch_try_cmpxchg128
+#define arch_try_cmpxchg128_local	arch_try_cmpxchg128
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
 
 #endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 1d3a4b0c650f..59ab1192e2d5 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -56,6 +56,8 @@
 #define CPACF_KM_PXTS_256	0x3c
 #define CPACF_KM_XTS_128_FULL	0x52
 #define CPACF_KM_XTS_256_FULL	0x54
+#define CPACF_KM_PXTS_128_FULL	0x5a
+#define CPACF_KM_PXTS_256_FULL	0x5c
 
 /*
  * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9e4bbc3e53f8..e1a279e0d6a6 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -13,6 +13,7 @@
 #include <linux/kmsan-checks.h>
 #include <asm/asm-extable.h>
 #include <asm/facility.h>
+#include <asm/asm.h>
 
 asm(".include \"asm/cpu_mf-insn.h\"\n");
 
@@ -185,11 +186,12 @@ static inline int lcctl(u64 ctl)
 	int cc;
 
 	asm volatile (
-		"	lcctl	%1\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (cc) : "Q" (ctl) : "cc");
-	return cc;
+		"	lcctl	%[ctl]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		: [ctl] "Q" (ctl)
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 /* Extract CPU counter */
@@ -199,12 +201,13 @@ static inline int __ecctr(u64 ctr, u64 *content)
 	int cc;
 
 	asm volatile (
-		"	ecctr	%0,%2\n"
-		"	ipm	%1\n"
-		"	srl	%1,28\n"
-		: "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
+		"	ecctr	%[_content],%[ctr]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [_content] "=d" (_content)
+		: [ctr] "d" (ctr)
+		: CC_CLOBBER);
 	*content = _content;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 /* Extract CPU counter */
@@ -234,18 +237,17 @@ static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
 	int cc;
 
 	asm volatile (
-		"	STCCTM	%2,%3,%1\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (cc)
-		: "Q" (*dest), "d" (range), "i" (set)
-		: "cc", "memory");
+		"	STCCTM	%[range],%[set],%[dest]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		: [dest] "Q" (*dest), [range] "d" (range), [set] "i" (set)
+		: CC_CLOBBER_LIST("memory"));
 	/*
 	 * If cc == 2, less than RANGE counters are stored, but it's not easy
 	 * to tell how many. Always unpoison the whole range for simplicity.
 	 */
 	kmsan_unpoison_memory(dest, range * sizeof(u64));
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 /* Query sampling information */
@@ -265,19 +267,20 @@ static inline int qsi(struct hws_qsi_info_block *info)
 /* Load sampling controls */
 static inline int lsctl(struct hws_lsctl_request_block *req)
 {
-	int cc;
+	int cc, exception;
 
-	cc = 1;
+	exception = 1;
 	asm volatile(
-		"0:	lsctl	0(%1)\n"
-		"1:	ipm	%0\n"
-		"	srl	%0,28\n"
+		"0:	lsctl	%[req]\n"
+		"1:	lhi	%[exc],0\n"
 		"2:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "+d" (cc), "+a" (req)
-		: "m" (*req)
-		: "cc", "memory");
-
-	return cc ? -EINVAL : 0;
+		: CC_OUT(cc, cc), [exc] "+d" (exception)
+		: [req] "Q" (*req)
+		: CC_CLOBBER);
+	if (exception || CC_TRANSFORM(cc))
+		return -EINVAL;
+	return 0;
 }
 #endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 715bcf8fb69a..5f5b1aa6c233 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -88,7 +88,7 @@ static __always_inline bool test_facility(unsigned long nr)
 	return __test_facility(nr, &stfle_fac_list);
 }
 
-static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+static inline unsigned long __stfle_asm(u64 *fac_list, int size)
 {
 	unsigned long reg0 = size - 1;
 
@@ -96,7 +96,7 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
 		"	lgr	0,%[reg0]\n"
 		"	.insn	s,0xb2b00000,%[list]\n" /* stfle */
 		"	lgr	%[reg0],0\n"
-		: [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
+		: [reg0] "+&d" (reg0), [list] "+Q" (*fac_list)
 		:
 		: "memory", "cc", "0");
 	return reg0;
@@ -104,10 +104,10 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
 
 /**
  * stfle - Store facility list extended
- * @stfle_fac_list: array where facility list can be stored
+ * @fac_list: array where facility list can be stored
  * @size: size of passed in array in double words
  */
-static inline void __stfle(u64 *stfle_fac_list, int size)
+static inline void __stfle(u64 *fac_list, int size)
 {
 	unsigned long nr;
 	u32 stfl_fac_list;
@@ -116,20 +116,20 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
 		"	stfl	0(0)\n"
 		: "=m" (get_lowcore()->stfl_fac_list));
 	stfl_fac_list = get_lowcore()->stfl_fac_list;
-	memcpy(stfle_fac_list, &stfl_fac_list, 4);
+	memcpy(fac_list, &stfl_fac_list, 4);
 	nr = 4; /* bytes stored by stfl */
 	if (stfl_fac_list & 0x01000000) {
 		/* More facility bits available with stfle */
-		nr = __stfle_asm(stfle_fac_list, size);
+		nr = __stfle_asm(fac_list, size);
 		nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
 	}
-	memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+	memset((char *)fac_list + nr, 0, size * 8 - nr);
 }
 
-static inline void stfle(u64 *stfle_fac_list, int size)
+static inline void stfle(u64 *fac_list, int size)
 {
 	preempt_disable();
-	__stfle(stfle_fac_list, size);
+	__stfle(fac_list, size);
 	preempt_enable();
 }
 
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 9725586f4259..64761c78f774 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -107,9 +107,6 @@ void gmap_remove(struct gmap *gmap);
 struct gmap *gmap_get(struct gmap *gmap);
 void gmap_put(struct gmap *gmap);
 
-void gmap_enable(struct gmap *gmap);
-void gmap_disable(struct gmap *gmap);
-struct gmap *gmap_get_enabled(void);
 int gmap_map_segment(struct gmap *gmap, unsigned long from,
 		     unsigned long to, unsigned long len);
 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 1bd08eb56d5f..9084b750350d 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -94,6 +94,9 @@ void arch_kexec_protect_crashkres(void);
 
 void arch_kexec_unprotect_crashkres(void);
 #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
+
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
 #endif
 
 #ifdef CONFIG_KEXEC_FILE
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8e77afbed58e..51201b4ac93a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -527,6 +527,9 @@ struct kvm_vcpu_stat {
 #define PGM_REGION_FIRST_TRANS		0x39
 #define PGM_REGION_SECOND_TRANS		0x3a
 #define PGM_REGION_THIRD_TRANS		0x3b
+#define PGM_SECURE_STORAGE_ACCESS	0x3d
+#define PGM_NON_SECURE_STORAGE_ACCESS	0x3e
+#define PGM_SECURE_STORAGE_VIOLATION	0x3f
 #define PGM_MONITOR			0x40
 #define PGM_PER				0x80
 #define PGM_CRYPTO_OPERATION		0x119
@@ -747,8 +750,6 @@ struct kvm_vcpu_arch {
 	struct hrtimer    ckc_timer;
 	struct kvm_s390_pgm_info pgm;
 	struct gmap *gmap;
-	/* backup location for the currently enabled gmap when scheduled out */
-	struct gmap *enabled_gmap;
 	struct kvm_guestdbg_info_arch guestdbg;
 	unsigned long pfault_token;
 	unsigned long pfault_select;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 48c64716d1f2..42a092fa1029 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -165,8 +165,7 @@ struct lowcore {
 	__u64	percpu_offset;			/* 0x03b8 */
 	__u8	pad_0x03c0[0x03c8-0x03c0];	/* 0x03c0 */
 	__u64	machine_flags;			/* 0x03c8 */
-	__u64	gmap;				/* 0x03d0 */
-	__u8	pad_0x03d8[0x0400-0x03d8];	/* 0x03d8 */
+	__u8	pad_0x03d0[0x0400-0x03d0];	/* 0x03d0 */
 
 	__u32	return_lpswe;			/* 0x0400 */
 	__u32	return_mcck_lpswe;		/* 0x0404 */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 73e1e03317b4..4405084d55a4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -10,6 +10,7 @@
 
 #include <linux/const.h>
 #include <asm/types.h>
+#include <asm/asm.h>
 
 #define _PAGE_SHIFT	CONFIG_PAGE_SHIFT
 #define _PAGE_SIZE	(_AC(1, UL) << _PAGE_SHIFT)
@@ -148,11 +149,12 @@ static inline int page_reset_referenced(unsigned long addr)
 	int cc;
 
 	asm volatile(
-		"	rrbe	0,%1\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (cc) : "a" (addr) : "cc");
-	return cc;
+		"	rrbe	0,%[addr]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		: [addr] "a" (addr)
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 /* Bits int the storage key */
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 25f2077ba3c9..ebeabd0aaa51 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -11,6 +11,7 @@
 #include <linux/jump_label.h>
 #include <asm/lowcore.h>
 #include <asm/ptrace.h>
+#include <asm/asm.h>
 
 struct qpaci_info_block {
 	u64 header;
@@ -33,12 +34,11 @@ static inline int qpaci(struct qpaci_info_block *info)
 		"	lgr	0,%[size]\n"
 		"	.insn	s,0xb28f0000,%[info]\n"
 		"	lgr	%[size],0\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [info] "=Q" (*info), [size] "+&d" (size)
 		:
-		: "0", "cc", "memory");
-	return cc ? (size + 1) * sizeof(u64) : 0;
+		: CC_CLOBBER_LIST("0", "memory"));
+	return CC_TRANSFORM(cc) ? (size + 1) * sizeof(u64) : 0;
 }
 
 #define PAI_CRYPTO_BASE			0x1000	/* First event number */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 9d920ced6047..5013a690837e 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -107,9 +107,10 @@ struct zpci_bus {
 	struct list_head	resources;
 	struct list_head	bus_next;
 	struct resource		bus_resource;
-	int			pchid;
+	int			topo;		/* TID if topo_is_tid, PCHID otherwise */
 	int			domain_nr;
-	bool			multifunction;
+	u8			multifunction	: 1;
+	u8			topo_is_tid	: 1;
 	enum pci_bus_speed	max_bus_speed;
 };
 
@@ -130,9 +131,12 @@ struct zpci_dev {
 	u16		vfn;		/* virtual function number */
 	u16		pchid;		/* physical channel ID */
 	u16		maxstbl;	/* Maximum store block size */
+	u16		rid;		/* RID as supplied by firmware */
+	u16		tid;		/* Topology for which RID is valid */
 	u8		pfgid;		/* function group ID */
 	u8		pft;		/* pci function type */
 	u8		port;
+	u8		fidparm;
 	u8		dtsm;		/* Supported DT mask */
 	u8		rid_available	: 1;
 	u8		has_hp_slot	: 1;
@@ -140,7 +144,8 @@ struct zpci_dev {
 	u8		is_physfn	: 1;
 	u8		util_str_avail	: 1;
 	u8		irqs_registered	: 1;
-	u8		reserved	: 2;
+	u8		tid_avail	: 1;
+	u8		reserved	: 1;
 	unsigned int	devfn;		/* DEVFN part of the RID*/
 
 	u8 pfip[CLP_PFIP_NR_SEGMENTS];	/* pci function internal path */
@@ -210,12 +215,14 @@ extern struct airq_iv *zpci_aif_sbv;
 ----------------------------------------------------------------------------- */
 /* Base stuff */
 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
+int zpci_add_device(struct zpci_dev *zdev);
 int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
 int zpci_deconfigure_device(struct zpci_dev *zdev);
 void zpci_device_reserved(struct zpci_dev *zdev);
 bool zpci_is_device_configured(struct zpci_dev *zdev);
+int zpci_scan_devices(void);
 
 int zpci_hot_reset_device(struct zpci_dev *zdev);
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
@@ -225,7 +232,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
 
 /* CLP */
 int clp_setup_writeback_mio(void);
-int clp_scan_pci_devices(void);
+int clp_scan_pci_devices(struct list_head *scan_list);
 int clp_query_pci_fn(struct zpci_dev *zdev);
 int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
 int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index f0c677ddd270..3fff2f7095c8 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -110,7 +110,8 @@ struct clp_req_query_pci {
 struct clp_rsp_query_pci {
 	struct clp_rsp_hdr hdr;
 	u16 vfn;			/* virtual fn number */
-	u16			:  3;
+	u16			:  2;
+	u16 tid_avail		:  1;
 	u16 rid_avail		:  1;
 	u16 is_physfn		:  1;
 	u16 reserved1		:  1;
@@ -122,16 +123,18 @@ struct clp_rsp_query_pci {
 	u16 pchid;
 	__le32 bar[PCI_STD_NUM_BARS];
 	u8 pfip[CLP_PFIP_NR_SEGMENTS];	/* pci function internal path */
-	u16			: 12;
-	u16 port		:  4;
+	u8 fidparm;
+	u8 reserved3		:  4;
+	u8 port			:  4;
 	u8 fmb_len;
 	u8 pft;				/* pci function type */
 	u64 sdma;			/* start dma as */
 	u64 edma;			/* end dma as */
 #define ZPCI_RID_MASK_DEVFN 0x00ff
 	u16 rid;			/* BUS/DEVFN PCI address */
-	u16 reserved0;
-	u32 reserved[10];
+	u32 reserved0;
+	u16 tid;
+	u32 reserved[9];
 	u32 uid;			/* user defined id */
 	u8 util_str[CLP_UTIL_STR_LEN];	/* utility string */
 	u32 reserved2[16];
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 2686bee800e3..43a5ea4ee20f 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -143,7 +143,7 @@ static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
 
 static inline int zpci_memcpy_fromio(void *dst,
 				     const volatile void __iomem *src,
-				     unsigned long n)
+				     size_t n)
 {
 	int size, rc = 0;
 
@@ -162,7 +162,7 @@ static inline int zpci_memcpy_fromio(void *dst,
 }
 
 static inline int zpci_memcpy_toio(volatile void __iomem *dst,
-				   const void *src, unsigned long n)
+				   const void *src, size_t n)
 {
 	int size, rc = 0;
 
@@ -187,7 +187,7 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
 }
 
 static inline int zpci_memset_io(volatile void __iomem *dst,
-				 unsigned char val, size_t count)
+				 int val, size_t count)
 {
 	u8 *src = kmalloc(count, GFP_KERNEL);
 	int rc;
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index f45cfc8bc233..51b68a43e195 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -9,6 +9,7 @@ enum physmem_info_source {
 	MEM_DETECT_NONE = 0,
 	MEM_DETECT_SCLP_STOR_INFO,
 	MEM_DETECT_DIAG260,
+	MEM_DETECT_DIAG500_STOR_LIMIT,
 	MEM_DETECT_SCLP_READ_INFO,
 	MEM_DETECT_BIN_SEARCH
 };
@@ -107,6 +108,8 @@ static inline const char *get_physmem_info_source(void)
 		return "sclp storage info";
 	case MEM_DETECT_DIAG260:
 		return "diag260";
+	case MEM_DETECT_DIAG500_STOR_LIMIT:
+		return "diag500 storage limit";
 	case MEM_DETECT_SCLP_READ_INFO:
 		return "sclp read info";
 	case MEM_DETECT_BIN_SEARCH:
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index deca3f221836..0cde7e240373 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,6 +5,7 @@
 #include <asm/current.h>
 #include <linux/thread_info.h>
 #include <asm/atomic_ops.h>
+#include <asm/cmpxchg.h>
 #include <asm/march.h>
 
 #ifdef MARCH_HAS_Z196_FEATURES
@@ -22,12 +23,10 @@ static __always_inline void preempt_count_set(int pc)
 {
 	int old, new;
 
+	old = READ_ONCE(get_lowcore()->preempt_count);
 	do {
-		old = READ_ONCE(get_lowcore()->preempt_count);
-		new = (old & PREEMPT_NEED_RESCHED) |
-			(pc & ~PREEMPT_NEED_RESCHED);
-	} while (__atomic_cmpxchg(&get_lowcore()->preempt_count,
-				  old, new) != old);
+		new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED);
+	} while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new));
 }
 
 static __always_inline void set_preempt_need_resched(void)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9a5236acc0a8..8761fd01a9f0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -39,6 +39,7 @@
 #include <asm/runtime_instr.h>
 #include <asm/irqflags.h>
 #include <asm/alternative.h>
+#include <asm/fault.h>
 
 struct pcpu {
 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
@@ -187,10 +188,8 @@ struct thread_struct {
 	unsigned long hardirq_timer;		/* task cputime in hardirq context */
 	unsigned long softirq_timer;		/* task cputime in softirq context */
 	const sys_call_ptr_t *sys_call_table;	/* system call table address */
-	unsigned long gmap_addr;		/* address of last gmap fault. */
-	unsigned int gmap_write_flag;		/* gmap fault write indication */
+	union teid gmap_teid;			/* address and flags of last gmap fault */
 	unsigned int gmap_int_code;		/* int code of last gmap fault */
-	unsigned int gmap_pfault;		/* signal of a pending guest pfault */
 	int ufpu_flags;				/* user fpu flags */
 	int kfpu_flags;				/* kernel fpu flags */
 
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 2ad9324f6338..788bc4467445 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -14,11 +14,13 @@
 #define PIF_SYSCALL			0	/* inside a system call */
 #define PIF_EXECVE_PGSTE_RESTART	1	/* restart execve for PGSTE binaries */
 #define PIF_SYSCALL_RET_SET		2	/* return value was set via ptrace */
+#define PIF_GUEST_FAULT			3	/* indicates program check in sie64a */
 #define PIF_FTRACE_FULL_REGS		4	/* all register contents valid (ftrace) */
 
 #define _PIF_SYSCALL			BIT(PIF_SYSCALL)
 #define _PIF_EXECVE_PGSTE_RESTART	BIT(PIF_EXECVE_PGSTE_RESTART)
 #define _PIF_SYSCALL_RET_SET		BIT(PIF_SYSCALL_RET_SET)
+#define _PIF_GUEST_FAULT		BIT(PIF_GUEST_FAULT)
 #define _PIF_FTRACE_FULL_REGS		BIT(PIF_FTRACE_FULL_REGS)
 
 #define PSW32_MASK_PER		_AC(0x40000000, UL)
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 06fbabe2f66c..cb4cc0f59012 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -62,5 +62,6 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
 
 int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
 
 #endif
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index edee63da08e7..472943b77066 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -38,6 +38,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/asm.h>
+
 static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
 				u32 *status)
 {
@@ -46,13 +48,12 @@ static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
 
 	asm volatile(
 		"	sigp	%[r1],%[addr],0(%[order])\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [r1] "+d" (r1.pair)
 		: [addr] "d" (addr), [order] "a" (order)
-		: "cc");
+		: CC_CLOBBER);
 	*status = r1.even;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index c549893602ea..668dfc5de538 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -2,7 +2,23 @@
 #ifndef _ASM_S390_SPARSEMEM_H
 #define _ASM_S390_SPARSEMEM_H
 
-#define SECTION_SIZE_BITS	28
+#define SECTION_SIZE_BITS	27
 #define MAX_PHYSMEM_BITS	CONFIG_MAX_PHYSMEM_BITS
 
+#ifdef CONFIG_NUMA
+
+static inline int memory_add_physaddr_to_nid(u64 addr)
+{
+	return 0;
+}
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+
+static inline int phys_to_target_node(u64 start)
+{
+	return 0;
+}
+#define phys_to_target_node phys_to_target_node
+
+#endif /* CONFIG_NUMA */
+
 #endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 77d5e804af93..ac868a9bb0d1 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -57,8 +57,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
 
 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
 {
+	int old = 0;
+
 	barrier();
-	return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+	return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
 }
 
 static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -118,7 +120,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-	if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+	int old = 0;
+
+	if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
 		arch_write_lock_wait(rw);
 }
 
@@ -133,8 +137,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 	int old;
 
 	old = READ_ONCE(rw->cnts);
-	return (!(old & 0xffff0000) &&
-		__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
+	return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
 }
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -142,7 +145,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 	int old;
 
 	old = READ_ONCE(rw->cnts);
-	return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
+	return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
 }
 
 #endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 640901f2fbc3..8fe56456feab 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -13,6 +13,7 @@
 #include <linux/preempt.h>
 #include <linux/time64.h>
 #include <asm/lowcore.h>
+#include <asm/asm.h>
 
 /* The value of the TOD clock for 1.1.1970. */
 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -44,11 +45,12 @@ static inline int set_tod_clock(__u64 time)
 	int cc;
 
 	asm volatile(
-		"   sck   %1\n"
-		"   ipm   %0\n"
-		"   srl   %0,28\n"
-		: "=d" (cc) : "Q" (time) : "cc");
-	return cc;
+		"	sck	%[time]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		: [time] "Q" (time)
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 static inline int store_tod_clock_ext_cc(union tod_clock *clk)
@@ -56,11 +58,12 @@ static inline int store_tod_clock_ext_cc(union tod_clock *clk)
 	int cc;
 
 	asm volatile(
-		"   stcke  %1\n"
-		"   ipm   %0\n"
-		"   srl   %0,28\n"
-		: "=d" (cc), "=Q" (*clk) : : "cc");
-	return cc;
+		"	stcke	%[clk]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [clk] "=Q" (*clk)
+		:
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 static __always_inline void store_tod_clock_ext(union tod_clock *tod)
@@ -149,12 +152,11 @@ struct ptff_qui {
 		"	lgr	0,%[reg0]\n"				\
 		"	lgr	1,%[reg1]\n"				\
 		"	ptff\n"						\
-		"	ipm	%[rc]\n"				\
-		"	srl	%[rc],28\n"				\
-		: [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1)	\
+		CC_IPM(rc)						\
+		: CC_OUT(rc, rc), "+m" (*(struct addrtype *)reg1)	\
 		: [reg0] "d" (reg0), [reg1] "d" (reg1)			\
-		: "cc", "0", "1");					\
-	rc;								\
+		: CC_CLOBBER_LIST("0", "1"));				\
+	CC_TRANSFORM(rc);						\
 })
 
 static inline unsigned long local_tick_disable(void)
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 153d93468b77..dc332609f2c3 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -2,7 +2,7 @@
 /*
  * Ultravisor Interfaces
  *
- * Copyright IBM Corp. 2019, 2022
+ * Copyright IBM Corp. 2019, 2024
  *
  * Author(s):
  *	Vasily Gorbik <gor@linux.ibm.com>
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <asm/page.h>
 #include <asm/gmap.h>
+#include <asm/asm.h>
 
 #define UVC_CC_OK	0
 #define UVC_CC_ERROR	1
@@ -28,9 +29,11 @@
 #define UVC_RC_INV_STATE	0x0003
 #define UVC_RC_INV_LEN		0x0005
 #define UVC_RC_NO_RESUME	0x0007
+#define UVC_RC_MORE_DATA	0x0100
 #define UVC_RC_NEED_DESTROY	0x8000
 
 #define UVC_CMD_QUI			0x0001
+#define UVC_CMD_QUERY_KEYS		0x0002
 #define UVC_CMD_INIT_UV			0x000f
 #define UVC_CMD_CREATE_SEC_CONF		0x0100
 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
@@ -61,6 +64,7 @@
 #define UVC_CMD_ADD_SECRET		0x1031
 #define UVC_CMD_LIST_SECRETS		0x1033
 #define UVC_CMD_LOCK_SECRETS		0x1034
+#define UVC_CMD_RETR_SECRET		0x1035
 
 /* Bits in installed uv calls */
 enum uv_cmds_inst {
@@ -94,6 +98,8 @@ enum uv_cmds_inst {
 	BIT_UVC_CMD_ADD_SECRET = 29,
 	BIT_UVC_CMD_LIST_SECRETS = 30,
 	BIT_UVC_CMD_LOCK_SECRETS = 31,
+	BIT_UVC_CMD_RETR_SECRET = 33,
+	BIT_UVC_CMD_QUERY_KEYS = 34,
 };
 
 enum uv_feat_ind {
@@ -140,11 +146,27 @@ struct uv_cb_qui {
 	u64 reservedf0;				/* 0x00f0 */
 	u64 supp_add_secret_req_ver;		/* 0x00f8 */
 	u64 supp_add_secret_pcf;		/* 0x0100 */
-	u64 supp_secret_types;			/* 0x0180 */
-	u16 max_secrets;			/* 0x0110 */
-	u8 reserved112[0x120 - 0x112];		/* 0x0112 */
+	u64 supp_secret_types;			/* 0x0108 */
+	u16 max_assoc_secrets;			/* 0x0110 */
+	u16 max_retr_secrets;			/* 0x0112 */
+	u8 reserved114[0x120 - 0x114];		/* 0x0114 */
 } __packed __aligned(8);
 
+struct uv_key_hash {
+	u64 dword[4];
+} __packed __aligned(8);
+
+#define UVC_QUERY_KEYS_IDX_HK		0
+#define UVC_QUERY_KEYS_IDX_BACK_HK	1
+
+/* Query Ultravisor Keys */
+struct uv_cb_query_keys {
+	struct uv_cb_header header;		/* 0x0000 */
+	u64 reserved08[3];			/* 0x0008 */
+	struct uv_key_hash key_hashes[15];	/* 0x0020 */
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_cb_query_keys) == 0x200);
+
 /* Initialize Ultravisor */
 struct uv_cb_init {
 	struct uv_cb_header header;
@@ -317,7 +339,6 @@ struct uv_cb_dump_complete {
  * A common UV call struct for pv guests that contains a single address
  * Examples:
  * Add Secret
- * List Secrets
  */
 struct uv_cb_guest_addr {
 	struct uv_cb_header header;
@@ -326,18 +347,102 @@ struct uv_cb_guest_addr {
 	u64 reserved28[4];
 } __packed __aligned(8);
 
+#define UVC_RC_RETR_SECR_BUF_SMALL	0x0109
+#define UVC_RC_RETR_SECR_STORE_EMPTY	0x010f
+#define UVC_RC_RETR_SECR_INV_IDX	0x0110
+#define UVC_RC_RETR_SECR_INV_SECRET	0x0111
+
+struct uv_cb_retr_secr {
+	struct uv_cb_header header;
+	u64 reserved08[2];
+	u16 secret_idx;
+	u16 reserved1a;
+	u32 buf_size;
+	u64 buf_addr;
+	u64 reserved28[4];
+}  __packed __aligned(8);
+
+struct uv_cb_list_secrets {
+	struct uv_cb_header header;
+	u64 reserved08[2];
+	u8  reserved18[6];
+	u16 start_idx;
+	u64 list_addr;
+	u64 reserved28[4];
+} __packed __aligned(8);
+
+enum uv_secret_types {
+	UV_SECRET_INVAL = 0x0,
+	UV_SECRET_NULL = 0x1,
+	UV_SECRET_ASSOCIATION = 0x2,
+	UV_SECRET_PLAIN = 0x3,
+	UV_SECRET_AES_128 = 0x4,
+	UV_SECRET_AES_192 = 0x5,
+	UV_SECRET_AES_256 = 0x6,
+	UV_SECRET_AES_XTS_128 = 0x7,
+	UV_SECRET_AES_XTS_256 = 0x8,
+	UV_SECRET_HMAC_SHA_256 = 0x9,
+	UV_SECRET_HMAC_SHA_512 = 0xa,
+	/* 0x0b - 0x10 reserved */
+	UV_SECRET_ECDSA_P256 = 0x11,
+	UV_SECRET_ECDSA_P384 = 0x12,
+	UV_SECRET_ECDSA_P521 = 0x13,
+	UV_SECRET_ECDSA_ED25519 = 0x14,
+	UV_SECRET_ECDSA_ED448 = 0x15,
+};
+
+/**
+ * uv_secret_list_item_hdr - UV secret metadata.
+ * @index: Index of the secret in the secret list.
+ * @type: Type of the secret. See `enum uv_secret_types`.
+ * @length: Length of the stored secret.
+ */
+struct uv_secret_list_item_hdr {
+	u16 index;
+	u16 type;
+	u32 length;
+} __packed __aligned(8);
+
+#define UV_SECRET_ID_LEN 32
+/**
+ * uv_secret_list_item - UV secret entry.
+ * @hdr: The metadata of this secret.
+ * @id: The ID of this secret, not the secret itself.
+ */
+struct uv_secret_list_item {
+	struct uv_secret_list_item_hdr hdr;
+	u64 reserverd08;
+	u8 id[UV_SECRET_ID_LEN];
+} __packed __aligned(8);
+
+/**
+ * uv_secret_list - UV secret-metadata list.
+ * @num_secr_stored: Number of secrets stored in this list.
+ * @total_num_secrets: Number of secrets stored in the UV for this guest.
+ * @next_secret_idx: positive number if there are more secrets available or zero.
+ * @secrets: Up to 85 UV-secret metadata entries.
+ */
+struct uv_secret_list {
+	u16 num_secr_stored;
+	u16 total_num_secrets;
+	u16 next_secret_idx;
+	u16 reserved_06;
+	u64 reserved_08;
+	struct uv_secret_list_item secrets[85];
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE);
+
 static inline int __uv_call(unsigned long r1, unsigned long r2)
 {
 	int cc;
 
 	asm volatile(
-		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc)
+		"	.insn	 rrf,0xb9a40000,%[r1],%[r2],0,0\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
 		: [r1] "a" (r1), [r2] "a" (r2)
-		: "memory", "cc");
-	return cc;
+		: CC_CLOBBER_LIST("memory"));
+	return CC_TRANSFORM(cc);
 }
 
 static inline int uv_call(unsigned long r1, unsigned long r2)
@@ -382,6 +487,48 @@ static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
 	return cc ? -EINVAL : 0;
 }
 
+/**
+ * uv_list_secrets() - Do a List Secrets UVC.
+ *
+ * @buf: Buffer to write list into; size of one page.
+ * @start_idx: The smallest index that should be included in the list.
+ *		For the fist invocation use 0.
+ * @rc: Pointer to store the return code or NULL.
+ * @rrc: Pointer to store the return reason code or NULL.
+ *
+ * This function calls the List Secrets UVC. The result is written into `buf`,
+ * that needs to be at least one page of writable memory.
+ * `buf` consists of:
+ * * %struct uv_secret_list_hdr
+ * * %struct uv_secret_list_item (multiple)
+ *
+ * For `start_idx` use _0_ for the first call. If there are more secrets available
+ * but could not fit into the page then `rc` is `UVC_RC_MORE_DATA`.
+ * In this case use `uv_secret_list_hdr.next_secret_idx` for `start_idx`.
+ *
+ * Context: might sleep.
+ *
+ * Return: The UVC condition code.
+ */
+static inline int uv_list_secrets(struct uv_secret_list *buf, u16 start_idx,
+				  u16 *rc, u16 *rrc)
+{
+	struct uv_cb_list_secrets uvcb = {
+		.header.len = sizeof(uvcb),
+		.header.cmd = UVC_CMD_LIST_SECRETS,
+		.start_idx = start_idx,
+		.list_addr = (u64)buf,
+	};
+	int cc = uv_call_sched(0, (u64)&uvcb);
+
+	if (rc)
+		*rc = uvcb.header.rc;
+	if (rrc)
+		*rrc = uvcb.header.rrc;
+
+	return cc;
+}
+
 struct uv_info {
 	unsigned long inst_calls_list[4];
 	unsigned long uv_base_stor_len;
@@ -402,7 +549,8 @@ struct uv_info {
 	unsigned long supp_add_secret_req_ver;
 	unsigned long supp_add_secret_pcf;
 	unsigned long supp_secret_types;
-	unsigned short max_secrets;
+	unsigned short max_assoc_secrets;
+	unsigned short max_retr_secrets;
 };
 
 extern struct uv_info uv_info;
@@ -468,6 +616,10 @@ static inline int uv_remove_shared(unsigned long addr)
 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
 }
 
+int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+			   struct uv_secret_list_item_hdr *secret);
+int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size);
+
 extern int prot_virt_host;
 
 static inline int is_prot_virt_host(void)
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index b11d98800458..7c364b33c84d 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -294,7 +294,7 @@ struct dasd_snid_ioctl_data {
 /********************************************************************************
  * SECTION: Definition of IOCTLs
  *
- * Here ist how the ioctl-nr should be used:
+ * Here is how the ioctl-nr should be used:
  *    0 -   31   DASD driver itself
  *   32 -  239   still open
  *  240 -  255	 reserved for EMC
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 60431d00e6bd..ca42e941675d 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -48,21 +48,22 @@
 
 /* the newer ioctls use a pkey_key_type enum for type information */
 enum pkey_key_type {
-	PKEY_TYPE_CCA_DATA   = (__u32) 1,
-	PKEY_TYPE_CCA_CIPHER = (__u32) 2,
-	PKEY_TYPE_EP11	     = (__u32) 3,
-	PKEY_TYPE_CCA_ECC    = (__u32) 0x1f,
-	PKEY_TYPE_EP11_AES   = (__u32) 6,
-	PKEY_TYPE_EP11_ECC   = (__u32) 7,
-	PKEY_TYPE_PROTKEY    = (__u32) 8,
+	PKEY_TYPE_CCA_DATA   = (__u32)1,
+	PKEY_TYPE_CCA_CIPHER = (__u32)2,
+	PKEY_TYPE_EP11	     = (__u32)3,
+	PKEY_TYPE_CCA_ECC    = (__u32)0x1f,
+	PKEY_TYPE_EP11_AES   = (__u32)6,
+	PKEY_TYPE_EP11_ECC   = (__u32)7,
+	PKEY_TYPE_PROTKEY    = (__u32)8,
+	PKEY_TYPE_UVSECRET   = (__u32)9,
 };
 
 /* the newer ioctls use a pkey_key_size enum for key size information */
 enum pkey_key_size {
-	PKEY_SIZE_AES_128 = (__u32) 128,
-	PKEY_SIZE_AES_192 = (__u32) 192,
-	PKEY_SIZE_AES_256 = (__u32) 256,
-	PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF,
+	PKEY_SIZE_AES_128 = (__u32)128,
+	PKEY_SIZE_AES_192 = (__u32)192,
+	PKEY_SIZE_AES_256 = (__u32)256,
+	PKEY_SIZE_UNKNOWN = (__u32)0xFFFFFFFF,
 };
 
 /* some of the newer ioctls use these flags */
@@ -125,6 +126,7 @@ struct pkey_genseck {
 	__u32 keytype;		    /* in: key type to generate		 */
 	struct pkey_seckey seckey;  /* out: the secure key blob		 */
 };
+
 #define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
 
 /*
@@ -137,6 +139,7 @@ struct pkey_clr2seck {
 	struct pkey_clrkey clrkey;  /* in: the clear key value		 */
 	struct pkey_seckey seckey;  /* out: the secure key blob		 */
 };
+
 #define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
 
 /*
@@ -148,6 +151,7 @@ struct pkey_sec2protk {
 	struct pkey_seckey seckey;   /* in: the secure key blob		  */
 	struct pkey_protkey protkey; /* out: the protected key		  */
 };
+
 #define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
 
 /*
@@ -158,6 +162,7 @@ struct pkey_clr2protk {
 	struct pkey_clrkey clrkey;   /* in: the clear key value		  */
 	struct pkey_protkey protkey; /* out: the protected key		  */
 };
+
 #define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
 
 /*
@@ -169,6 +174,7 @@ struct pkey_findcard {
 	__u16  cardnr;			       /* out: card number	  */
 	__u16  domain;			       /* out: domain number	  */
 };
+
 #define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
 
 /*
@@ -178,6 +184,7 @@ struct pkey_skey2pkey {
 	struct pkey_seckey seckey;   /* in: the secure key blob		  */
 	struct pkey_protkey protkey; /* out: the protected key		  */
 };
+
 #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
 
 /*
@@ -195,6 +202,7 @@ struct pkey_verifykey {
 	__u16  keysize;			       /* out: key size in bits   */
 	__u32  attributes;		       /* out: attribute bits	  */
 };
+
 #define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey)
 #define PKEY_VERIFY_ATTR_AES	   0x00000001  /* key is an AES key */
 #define PKEY_VERIFY_ATTR_OLD_MKVP  0x00000100  /* key has old MKVP value */
@@ -226,6 +234,7 @@ struct pkey_kblob2pkey {
 	__u32 keylen;			/* in: the key blob length */
 	struct pkey_protkey protkey;	/* out: the protected key  */
 };
+
 #define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
 
 /*
@@ -258,6 +267,7 @@ struct pkey_genseck2 {
 	__u32 keylen;		    /* in: available key blob buffer size */
 				    /* out: actual key blob size	  */
 };
+
 #define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2)
 
 /*
@@ -292,6 +302,7 @@ struct pkey_clr2seck2 {
 	__u32 keylen;		    /* in: available key blob buffer size  */
 				    /* out: actual key blob size	   */
 };
+
 #define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2)
 
 /*
@@ -329,6 +340,7 @@ struct pkey_verifykey2 {
 	enum pkey_key_size size;    /* out: the key size		 */
 	__u32 flags;		    /* out: additional key info flags	 */
 };
+
 #define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2)
 
 /*
@@ -351,6 +363,7 @@ struct pkey_kblob2pkey2 {
 	__u32 apqn_entries;	     /* in: # of apqn target list entries  */
 	struct pkey_protkey protkey; /* out: the protected key		   */
 };
+
 #define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2)
 
 /*
@@ -387,6 +400,7 @@ struct pkey_apqns4key {
 	__u32 apqn_entries;	   /* in: max # of apqn entries in the list   */
 				   /* out: # apqns stored into the list	      */
 };
+
 #define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key)
 
 /*
@@ -426,6 +440,7 @@ struct pkey_apqns4keytype {
 	__u32 apqn_entries;	   /* in: max # of apqn entries in the list   */
 				   /* out: # apqns stored into the list	      */
 };
+
 #define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype)
 
 /*
@@ -452,6 +467,7 @@ struct pkey_kblob2pkey3 {
 	__u32 pkeylen;	 /* in/out: size of pkey buffer/actual len of pkey */
 	__u8 __user *pkey;		 /* in: pkey blob buffer space ptr */
 };
+
 #define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3)
 
 #endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h
index b9c2f14a6af3..4947f26ad9fb 100644
--- a/arch/s390/include/uapi/asm/uvdevice.h
+++ b/arch/s390/include/uapi/asm/uvdevice.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
- *  Copyright IBM Corp. 2022
+ *  Copyright IBM Corp. 2022, 2024
  *  Author(s): Steffen Eiden <seiden@linux.ibm.com>
  */
 #ifndef __S390_ASM_UVDEVICE_H
@@ -52,7 +52,7 @@ struct uvio_uvdev_info {
 	__u64 supp_uvio_cmds;
 	/*
 	 * If bit `n` is set, the Ultravisor(UV) supports the UV-call
-	 * corresponding to the IOCTL with nr `n` in the calling contextx (host
+	 * corresponding to the IOCTL with nr `n` in the calling context (host
 	 * or guest).  The value is only valid if the corresponding bit in
 	 * @supp_uvio_cmds is set as well.
 	 */
@@ -71,6 +71,7 @@ struct uvio_uvdev_info {
 #define UVIO_ATT_ADDITIONAL_MAX_LEN	0x8000
 #define UVIO_ADD_SECRET_MAX_LEN		0x100000
 #define UVIO_LIST_SECRETS_LEN		0x1000
+#define UVIO_RETR_SECRET_MAX_LEN	0x2000
 
 #define UVIO_DEVICE_NAME "uv"
 #define UVIO_TYPE_UVC 'u'
@@ -81,22 +82,25 @@ enum UVIO_IOCTL_NR {
 	UVIO_IOCTL_ADD_SECRET_NR,
 	UVIO_IOCTL_LIST_SECRETS_NR,
 	UVIO_IOCTL_LOCK_SECRETS_NR,
+	UVIO_IOCTL_RETR_SECRET_NR,
 	/* must be the last entry */
 	UVIO_IOCTL_NUM_IOCTLS
 };
 
-#define UVIO_IOCTL(nr)		_IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
-#define UVIO_IOCTL_UVDEV_INFO	UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
-#define UVIO_IOCTL_ATT		UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
-#define UVIO_IOCTL_ADD_SECRET	UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_IOCTL_LIST_SECRETS	UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_IOCTL_LOCK_SECRETS	UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL(nr)			_IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
+#define UVIO_IOCTL_UVDEV_INFO		UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
+#define UVIO_IOCTL_ATT			UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
+#define UVIO_IOCTL_ADD_SECRET		UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_IOCTL_LIST_SECRETS		UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_IOCTL_LOCK_SECRETS		UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL_RETR_SECRET		UVIO_IOCTL(UVIO_IOCTL_RETR_SECRET_NR)
 
-#define UVIO_SUPP_CALL(nr)	(1ULL << (nr))
-#define UVIO_SUPP_UDEV_INFO	UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
-#define UVIO_SUPP_ATT		UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
-#define UVIO_SUPP_ADD_SECRET	UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_SUPP_LIST_SECRETS	UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_SUPP_LOCK_SECRETS	UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_CALL(nr)		(1ULL << (nr))
+#define UVIO_SUPP_UDEV_INFO		UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
+#define UVIO_SUPP_ATT			UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
+#define UVIO_SUPP_ADD_SECRET		UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_SUPP_LIST_SECRETS		UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_SUPP_LOCK_SECRETS		UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_RETR_SECRET		UVIO_SUPP_CALL(UVIO_IOCTL_RETR_SECRET_NR)
 
 #endif /* __S390_ASM_UVDEVICE_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 5529248d84fb..1d7ed0faff8b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -13,7 +13,6 @@
 #include <linux/purgatory.h>
 #include <linux/pgtable.h>
 #include <linux/ftrace.h>
-#include <asm/gmap.h>
 #include <asm/stacktrace.h>
 
 int main(void)
@@ -138,7 +137,6 @@ int main(void)
 	OFFSET(__LC_USER_ASCE, lowcore, user_asce);
 	OFFSET(__LC_LPP, lowcore, lpp);
 	OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
-	OFFSET(__LC_GMAP, lowcore, gmap);
 	OFFSET(__LC_LAST_BREAK, lowcore, last_break);
 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
 	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
@@ -161,7 +159,6 @@ int main(void)
 	OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
 	BLANK();
 	/* gmap/sie offsets */
-	OFFSET(__GMAP_ASCE, gmap, asce);
 	OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
 	OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
 	/* kexec_sha_region */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index b210a29d3ee9..2f4174b961de 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -20,6 +20,7 @@
 #include <asm/diag.h>
 #include <asm/ebcdic.h>
 #include <asm/cpcmd.h>
+#include <asm/asm.h>
 
 static DEFINE_SPINLOCK(cpcmd_lock);
 static char cpcmd_buf[241];
@@ -45,12 +46,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
 	ry.odd	= *rlen;
 	asm volatile(
 		"	diag	%[rx],%[ry],0x8\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [ry] "+d" (ry.pair)
 		: [rx] "d" (rx.pair)
-		: "cc");
-	if (cc)
+		: CC_CLOBBER);
+	if (CC_TRANSFORM(cc))
 		*rlen += ry.odd;
 	else
 		*rlen = ry.odd;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index edae13416196..cd0c93a8fb8b 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -237,6 +237,17 @@ int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
 						       prot);
 }
 
+/*
+ * Return true only when in a kdump or stand-alone kdump environment.
+ * Note that /proc/vmcore might also be available in "standard zfcp/nvme dump"
+ * environments, where this function returns false; see dump_available().
+ */
+bool is_kdump_kernel(void)
+{
+	return oldmem_data.start;
+}
+EXPORT_SYMBOL_GPL(is_kdump_kernel);
+
 static const char *nt_name(Elf64_Word type)
 {
 	const char *name = "LINUX";
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index e62bea9ab21e..b3f2103694e4 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -38,13 +38,13 @@
 
 typedef struct file_private_info {
 	loff_t offset;			/* offset of last read in file */
-	int    act_area;		/* number of last formated area */
+	int    act_area;		/* number of last formatted area */
 	int    act_page;		/* act page in given area */
-	int    act_entry;		/* last formated entry (offset */
+	int    act_entry;		/* last formatted entry (offset */
 					/* relative to beginning of last */
-					/* formated page) */
+					/* formatted page) */
 	size_t act_entry_offset;	/* up to this offset we copied */
-					/* in last read the last formated */
+					/* in last read the last formatted */
 					/* entry to userland */
 	char   temp_buf[2048];		/* buffer for output */
 	debug_info_t *debug_info_org;	/* original debug information */
@@ -63,7 +63,7 @@ typedef struct {
 	long args[];
 } debug_sprintf_entry_t;
 
-/* internal function prototyes */
+/* internal function prototypes */
 
 static int debug_init(void);
 static ssize_t debug_output(struct file *file, char __user *user_buf,
@@ -380,7 +380,7 @@ static void debug_info_put(debug_info_t *db_info)
 
 /*
  * debug_format_entry:
- * - format one debug entry and return size of formated data
+ * - format one debug entry and return size of formatted data
  */
 static int debug_format_entry(file_private_info_t *p_info)
 {
@@ -449,7 +449,7 @@ out:
 /*
  * debug_output:
  * - called for user read()
- * - copies formated debug entries to the user buffer
+ * - copies formatted debug entries to the user buffer
  */
 static ssize_t debug_output(struct file *file,		/* file descriptor */
 			    char __user *user_buf,	/* user buffer */
@@ -523,7 +523,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
 /*
  * debug_open:
  * - called for user open()
- * - copies formated output to private_data area of the file
+ * - copies formatted output to private_data area of the file
  *   handle
  */
 static int debug_open(struct inode *inode, struct file *file)
@@ -1513,7 +1513,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
 EXPORT_SYMBOL(debug_dflt_header_fn);
 
 /*
- * prints debug data sprintf-formated:
+ * prints debug data sprintf-formatted:
  * debug_sprinf_event/exception calls must be used together with this view
  */
 
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 007e1795670e..cdd6e31344fa 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -16,6 +16,7 @@
 #include <asm/diag.h>
 #include <asm/trace/diag.h>
 #include <asm/sections.h>
+#include <asm/asm.h>
 #include "entry.h"
 
 struct diag_stat {
@@ -307,16 +308,15 @@ EXPORT_SYMBOL(diag26c);
 
 int diag49c(unsigned long subcode)
 {
-	int rc;
+	int cc;
 
 	diag_stat_inc(DIAG_STAT_X49C);
 	asm volatile(
 		"	diag	%[subcode],0,0x49c\n"
-		"	ipm	%[rc]\n"
-		"	srl	%[rc],28\n"
-		: [rc] "=d" (rc)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
 		: [subcode] "d" (subcode)
-		: "cc");
-	return rc;
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 EXPORT_SYMBOL(diag49c);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index d6d5317f768e..1ff13239d4e5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -222,17 +222,6 @@ SYM_FUNC_START(__sie64a)
 	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
 	lg	%r14,__LC_CURRENT(%r14)
 	mvi	__TI_sie(%r14),0
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
-# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
-# Other instructions between __sie64a and .Lsie_done should not cause program
-# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
-.Lrewind_pad6:
-	nopr	7
-.Lrewind_pad4:
-	nopr	7
-.Lrewind_pad2:
-	nopr	7
 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
 	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
@@ -244,15 +233,6 @@ SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
 	BR_EX	%r14
-.Lsie_fault:
-	lghi	%r14,-EFAULT
-	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
-	j	sie_exit
-
-	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
-	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
-	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
-	EX_TABLE(sie_exit,.Lsie_fault)
 SYM_FUNC_END(__sie64a)
 EXPORT_SYMBOL(__sie64a)
 EXPORT_SYMBOL(sie_exit)
@@ -327,13 +307,21 @@ SYM_CODE_START(pgm_check_handler)
 	GET_LC	%r13
 	stpt	__LC_SYS_ENTER_TIMER(%r13)
 	BPOFF
-	lgr	%r10,%r15
 	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
+	xgr	%r10,%r10
 	tmhh	%r8,0x0001		# coming from user space?
 	jno	.Lpgm_skip_asce
 	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
 	j	3f			# -> fault in user space
 .Lpgm_skip_asce:
+#if IS_ENABLED(CONFIG_KVM)
+	lg	%r11,__LC_CURRENT(%r13)
+	tm	__TI_sie(%r11),0xff
+	jz	1f
+	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
+	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
+	lghi	%r10,_PIF_GUEST_FAULT
+#endif
 1:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
 	jnz	2f			# -> enabled, can't be a double fault
 	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
@@ -344,21 +332,12 @@ SYM_CODE_START(pgm_check_handler)
 	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
 3:	lg	%r15,__LC_KERNEL_STACK(%r13)
 4:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
-	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+	stg	%r10,__PT_FLAGS(%r11)
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
 	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
-	stctg	%c1,%c1,__PT_CR1(%r11)
-#if IS_ENABLED(CONFIG_KVM)
-	ltg	%r12,__LC_GMAP(%r13)
-	jz	5f
-	clc	__GMAP_ASCE(8,%r12), __PT_CR1(%r11)
-	jne	5f
-	BPENTER	__SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST
-	SIEEXIT __SF_SIE_CONTROL(%r10),%r13
-#endif
-5:	stmg	%r8,%r9,__PT_PSW(%r11)
+	stmg	%r8,%r9,__PT_PSW(%r11)
 	# clear user controlled registers to prevent speculative use
 	xgr	%r0,%r0
 	xgr	%r1,%r1
@@ -367,6 +346,7 @@ SYM_CODE_START(pgm_check_handler)
 	xgr	%r5,%r5
 	xgr	%r6,%r6
 	xgr	%r7,%r7
+	xgr	%r12,%r12
 	lgr	%r2,%r11
 	brasl	%r14,__do_pgm_check
 	tmhh	%r8,0x0001		# returning to user space?
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f17bb7bf9392..edbb52ce3f1e 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -209,7 +209,7 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj,	\
 		struct kobj_attribute *attr,				\
 		char *page)						\
 {									\
-	return scnprintf(page, PAGE_SIZE, _format, ##args);		\
+	return sysfs_emit(page, _format, ##args);			\
 }
 
 #define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk)			\
@@ -372,7 +372,7 @@ EXPORT_SYMBOL_GPL(ipl_info);
 static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
 			     char *page)
 {
-	return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
+	return sysfs_emit(page, "%s\n", ipl_type_str(ipl_info.type));
 }
 
 static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
@@ -380,7 +380,7 @@ static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
 static ssize_t ipl_secure_show(struct kobject *kobj,
 			       struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%i\n", !!ipl_secure_flag);
+	return sysfs_emit(page, "%i\n", !!ipl_secure_flag);
 }
 
 static struct kobj_attribute sys_ipl_secure_attr =
@@ -389,7 +389,7 @@ static struct kobj_attribute sys_ipl_secure_attr =
 static ssize_t ipl_has_secure_show(struct kobject *kobj,
 				   struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%i\n", !!sclp.has_sipl);
+	return sysfs_emit(page, "%i\n", !!sclp.has_sipl);
 }
 
 static struct kobj_attribute sys_ipl_has_secure_attr =
@@ -402,7 +402,7 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
 
 	if (ipl_block_valid && (ipl_block.pb0_hdr.pbt == IPL_PBT_CCW))
 		ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
-	return sprintf(page, "%s\n", parm);
+	return sysfs_emit(page, "%s\n", parm);
 }
 
 static struct kobj_attribute sys_ipl_vm_parm_attr =
@@ -413,18 +413,18 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
 {
 	switch (ipl_info.type) {
 	case IPL_TYPE_CCW:
-		return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
-			       ipl_block.ccw.devno);
+		return sysfs_emit(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
+				  ipl_block.ccw.devno);
 	case IPL_TYPE_ECKD:
 	case IPL_TYPE_ECKD_DUMP:
-		return sprintf(page, "0.%x.%04x\n", ipl_block.eckd.ssid,
-			       ipl_block.eckd.devno);
+		return sysfs_emit(page, "0.%x.%04x\n", ipl_block.eckd.ssid,
+				  ipl_block.eckd.devno);
 	case IPL_TYPE_FCP:
 	case IPL_TYPE_FCP_DUMP:
-		return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
+		return sysfs_emit(page, "0.0.%04x\n", ipl_block.fcp.devno);
 	case IPL_TYPE_NVME:
 	case IPL_TYPE_NVME_DUMP:
-		return sprintf(page, "%08ux\n", ipl_block.nvme.fid);
+		return sysfs_emit(page, "%08ux\n", ipl_block.nvme.fid);
 	default:
 		return 0;
 	}
@@ -503,12 +503,12 @@ static ssize_t eckd_##_name##_br_chr_show(struct kobject *kobj,		\
 	if (!ipb->br_chr.cyl &&						\
 	    !ipb->br_chr.head &&					\
 	    !ipb->br_chr.record)					\
-		return sprintf(buf, "auto\n");				\
+		return sysfs_emit(buf, "auto\n");			\
 									\
-	return sprintf(buf, "0x%x,0x%x,0x%x\n",				\
-			ipb->br_chr.cyl,				\
-			ipb->br_chr.head,				\
-			ipb->br_chr.record);				\
+	return sysfs_emit(buf, "0x%x,0x%x,0x%x\n",			\
+			  ipb->br_chr.cyl,				\
+			  ipb->br_chr.head,				\
+			  ipb->br_chr.record);				\
 }
 
 #define IPL_ATTR_BR_CHR_STORE_FN(_name, _ipb)				\
@@ -573,11 +573,11 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
 	char loadparm[LOADPARM_LEN + 1] = {};
 
 	if (!sclp_ipl_info.is_valid)
-		return sprintf(page, "#unknown#\n");
+		return sysfs_emit(page, "#unknown#\n");
 	memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
 	EBCASC(loadparm, LOADPARM_LEN);
 	strim(loadparm);
-	return sprintf(page, "%s\n", loadparm);
+	return sysfs_emit(page, "%s\n", loadparm);
 }
 
 static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
@@ -731,7 +731,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
 	char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
 
 	ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
-	return sprintf(page, "%s\n", vmparm);
+	return sysfs_emit(page, "%s\n", vmparm);
 }
 
 static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
@@ -839,7 +839,7 @@ static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
 	char buf[LOADPARM_LEN + 1];
 
 	reipl_get_ascii_loadparm(buf, ipb);
-	return sprintf(page, "%s\n", buf);
+	return sysfs_emit(page, "%s\n", buf);
 }
 
 static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
@@ -895,7 +895,7 @@ DEFINE_GENERIC_LOADPARM(eckd);
 static ssize_t reipl_fcp_clear_show(struct kobject *kobj,
 				    struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%u\n", reipl_fcp_clear);
+	return sysfs_emit(page, "%u\n", reipl_fcp_clear);
 }
 
 static ssize_t reipl_fcp_clear_store(struct kobject *kobj,
@@ -963,7 +963,7 @@ static struct attribute_group reipl_nvme_attr_group = {
 static ssize_t reipl_nvme_clear_show(struct kobject *kobj,
 				     struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%u\n", reipl_nvme_clear);
+	return sysfs_emit(page, "%u\n", reipl_nvme_clear);
 }
 
 static ssize_t reipl_nvme_clear_store(struct kobject *kobj,
@@ -984,7 +984,7 @@ DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
 static ssize_t reipl_ccw_clear_show(struct kobject *kobj,
 				    struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%u\n", reipl_ccw_clear);
+	return sysfs_emit(page, "%u\n", reipl_ccw_clear);
 }
 
 static ssize_t reipl_ccw_clear_store(struct kobject *kobj,
@@ -1056,7 +1056,7 @@ static struct attribute_group reipl_eckd_attr_group = {
 static ssize_t reipl_eckd_clear_show(struct kobject *kobj,
 				     struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%u\n", reipl_eckd_clear);
+	return sysfs_emit(page, "%u\n", reipl_eckd_clear);
 }
 
 static ssize_t reipl_eckd_clear_store(struct kobject *kobj,
@@ -1086,7 +1086,7 @@ static ssize_t reipl_nss_name_show(struct kobject *kobj,
 	char nss_name[NSS_NAME_SIZE + 1] = {};
 
 	reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
-	return sprintf(page, "%s\n", nss_name);
+	return sysfs_emit(page, "%s\n", nss_name);
 }
 
 static ssize_t reipl_nss_name_store(struct kobject *kobj,
@@ -1171,7 +1171,7 @@ static int reipl_set_type(enum ipl_type type)
 static ssize_t reipl_type_show(struct kobject *kobj,
 			       struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", ipl_type_str(reipl_type));
+	return sysfs_emit(page, "%s\n", ipl_type_str(reipl_type));
 }
 
 static ssize_t reipl_type_store(struct kobject *kobj,
@@ -1692,7 +1692,7 @@ static int dump_set_type(enum dump_type type)
 static ssize_t dump_type_show(struct kobject *kobj,
 			      struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", dump_type_str(dump_type));
+	return sysfs_emit(page, "%s\n", dump_type_str(dump_type));
 }
 
 static ssize_t dump_type_store(struct kobject *kobj,
@@ -1717,6 +1717,24 @@ static ssize_t dump_type_store(struct kobject *kobj,
 static struct kobj_attribute dump_type_attr =
 	__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
 
+static ssize_t dump_area_size_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *page)
+{
+	return sysfs_emit(page, "%lu\n", sclp.hsa_size);
+}
+
+static struct kobj_attribute dump_area_size_attr = __ATTR_RO(dump_area_size);
+
+static struct attribute *dump_attrs[] = {
+	&dump_type_attr.attr,
+	&dump_area_size_attr.attr,
+	NULL,
+};
+
+static struct attribute_group dump_attr_group = {
+	.attrs = dump_attrs,
+};
+
 static struct kset *dump_kset;
 
 static void diag308_dump(void *dump_block)
@@ -1853,7 +1871,7 @@ static int __init dump_init(void)
 	dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
 	if (!dump_kset)
 		return -ENOMEM;
-	rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
+	rc = sysfs_create_group(&dump_kset->kobj, &dump_attr_group);
 	if (rc) {
 		kset_unregister(dump_kset);
 		return rc;
@@ -2034,7 +2052,7 @@ static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
 static ssize_t on_reboot_show(struct kobject *kobj,
 			      struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", on_reboot_trigger.action->name);
+	return sysfs_emit(page, "%s\n", on_reboot_trigger.action->name);
 }
 
 static ssize_t on_reboot_store(struct kobject *kobj,
@@ -2060,7 +2078,7 @@ static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
 static ssize_t on_panic_show(struct kobject *kobj,
 			     struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", on_panic_trigger.action->name);
+	return sysfs_emit(page, "%s\n", on_panic_trigger.action->name);
 }
 
 static ssize_t on_panic_store(struct kobject *kobj,
@@ -2086,7 +2104,7 @@ static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
 static ssize_t on_restart_show(struct kobject *kobj,
 			       struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", on_restart_trigger.action->name);
+	return sysfs_emit(page, "%s\n", on_restart_trigger.action->name);
 }
 
 static ssize_t on_restart_store(struct kobject *kobj,
@@ -2122,7 +2140,7 @@ static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
 static ssize_t on_halt_show(struct kobject *kobj,
 			    struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", on_halt_trigger.action->name);
+	return sysfs_emit(page, "%s\n", on_halt_trigger.action->name);
 }
 
 static ssize_t on_halt_store(struct kobject *kobj,
@@ -2148,7 +2166,7 @@ static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
 static ssize_t on_poff_show(struct kobject *kobj,
 			    struct kobj_attribute *attr, char *page)
 {
-	return sprintf(page, "%s\n", on_poff_trigger.action->name);
+	return sysfs_emit(page, "%s\n", on_poff_trigger.action->name);
 }
 
 static ssize_t on_poff_store(struct kobject *kobj,
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 2639a3d12736..24b625c1d35b 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -30,6 +30,7 @@
 #include <asm/stacktrace.h>
 #include <asm/softirq_stack.h>
 #include <asm/vtime.h>
+#include <asm/asm.h>
 #include "entry.h"
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -129,9 +130,13 @@ static int irq_pending(struct pt_regs *regs)
 {
 	int cc;
 
-	asm volatile("tpi 0\n"
-		     "ipm %0" : "=d" (cc) : : "cc");
-	return cc >> 28;
+	asm volatile(
+		"	tpi	 0\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		:
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 void noinstr do_io_irq(struct pt_regs *regs)
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index a95188818637..5970dd3ee7c5 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -7,17 +7,17 @@
 ssize_t cpu_show_spectre_v1(struct device *dev,
 			    struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+	return sysfs_emit(buf, "Mitigation: __user pointer sanitization\n");
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev,
 			    struct device_attribute *attr, char *buf)
 {
 	if (test_facility(156))
-		return sprintf(buf, "Mitigation: etokens\n");
+		return sysfs_emit(buf, "Mitigation: etokens\n");
 	if (nospec_uses_trampoline())
-		return sprintf(buf, "Mitigation: execute trampolines\n");
+		return sysfs_emit(buf, "Mitigation: execute trampolines\n");
 	if (nobp_enabled())
-		return sprintf(buf, "Mitigation: limited branch prediction\n");
-	return sprintf(buf, "Vulnerable\n");
+		return sysfs_emit(buf, "Mitigation: limited branch prediction\n");
+	return sysfs_emit(buf, "Vulnerable\n");
 }
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index b695f980bbde..29080d6d5d8d 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -180,7 +180,7 @@ fail:
 }
 
 /*
- * Return pointer to os infor entry and its size
+ * Return pointer to os info entry and its size
  */
 void *os_info_old_entry(int nr, unsigned long *size)
 {
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index e2e0aa463fbd..b0bc68da6a11 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -835,7 +835,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
 	return validate_ctr_version(hwc->config, set);
 }
 
-/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
+/* Events CPU_CYCLES and INSTRUCTIONS can be submitted with two different
  * attribute::type values:
  * - PERF_TYPE_HARDWARE:
  * - pmu->type:
@@ -879,8 +879,8 @@ static int hw_perf_event_reset(struct perf_event *event)
 	u64 prev, new;
 	int err;
 
+	prev = local64_read(&event->hw.prev_count);
 	do {
-		prev = local64_read(&event->hw.prev_count);
 		err = ecctr(event->hw.config, &new);
 		if (err) {
 			if (err != 3)
@@ -892,7 +892,7 @@ static int hw_perf_event_reset(struct perf_event *event)
 			 */
 			new = 0;
 		}
-	} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+	} while (!local64_try_cmpxchg(&event->hw.prev_count, &prev, new));
 
 	return err;
 }
@@ -902,12 +902,12 @@ static void hw_perf_event_update(struct perf_event *event)
 	u64 prev, new, delta;
 	int err;
 
+	prev = local64_read(&event->hw.prev_count);
 	do {
-		prev = local64_read(&event->hw.prev_count);
 		err = ecctr(event->hw.config, &new);
 		if (err)
 			return;
-	} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+	} while (!local64_try_cmpxchg(&event->hw.prev_count, &prev, new));
 
 	delta = (prev <= new) ? new - prev
 			      : (-1ULL - prev) + new + 1;	 /* overflow */
@@ -1054,7 +1054,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
 	 *
 	 * When a new perf event has been added but not yet started, this can
 	 * clear enable control and resets all counters in a set.  Therefore,
-	 * cpumf_pmu_start() always has to reenable a counter set.
+	 * cpumf_pmu_start() always has to re-enable a counter set.
 	 */
 	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
 		if (!atomic_read(&cpuhw->ctr_set[i]))
@@ -1863,7 +1863,7 @@ static const struct attribute_group *cfdiag_attr_groups[] = {
 /* Performance monitoring unit for event CF_DIAG. Since this event
  * is also started and stopped via the perf_event_open() system call, use
  * the same event enable/disable call back functions. They do not
- * have a pointer to the perf_event strcture as first parameter.
+ * have a pointer to the perf_event structure as first parameter.
  *
  * The functions XXX_add, XXX_del, XXX_start and XXX_stop are also common.
  * Reuse them and distinguish the event (always first parameter) via
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5b765e3ccf0c..0cde42f8af6e 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -404,7 +404,7 @@ static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
 
 static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
 {
-	if (cpuhw->sfb.sdbt)
+	if (sf_buffer_available(cpuhw))
 		free_sampling_buffer(&cpuhw->sfb);
 }
 
@@ -559,16 +559,15 @@ static void setup_pmc_cpu(void *flags)
 {
 	struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
 
+	sf_disable();
 	switch (*((int *)flags)) {
 	case PMC_INIT:
 		memset(cpuhw, 0, sizeof(*cpuhw));
 		qsi(&cpuhw->qsi);
 		cpuhw->flags |= PMU_F_RESERVED;
-		sf_disable();
 		break;
 	case PMC_RELEASE:
 		cpuhw->flags &= ~PMU_F_RESERVED;
-		sf_disable();
 		deallocate_buffers(cpuhw);
 		break;
 	}
@@ -759,7 +758,6 @@ static int __hw_perf_event_init(struct perf_event *event)
 		reserve_pmc_hardware();
 		refcount_set(&num_events, 1);
 	}
-	mutex_unlock(&pmc_reserve_mutex);
 	event->destroy = hw_perf_event_destroy;
 
 	/* Access per-CPU sampling information (query sampling info) */
@@ -818,7 +816,7 @@ static int __hw_perf_event_init(struct perf_event *event)
 
 	/* Use AUX buffer. No need to allocate it by ourself */
 	if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
-		return 0;
+		goto out;
 
 	/* Allocate the per-CPU sampling buffer using the CPU information
 	 * from the event.  If the event is not pinned to a particular
@@ -848,6 +846,7 @@ static int __hw_perf_event_init(struct perf_event *event)
 		if (is_default_overflow_handler(event))
 			event->overflow_handler = cpumsf_output_event_pid;
 out:
+	mutex_unlock(&pmc_reserve_mutex);
 	return err;
 }
 
@@ -910,10 +909,14 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
 	struct hw_perf_event *hwc;
 	int err;
 
-	if (cpuhw->flags & PMU_F_ENABLED)
-		return;
-
-	if (cpuhw->flags & PMU_F_ERR_MASK)
+	/*
+	 * Event must be
+	 * - added/started on this CPU (PMU_F_IN_USE set)
+	 * - and CPU must be available (PMU_F_RESERVED set)
+	 * - and not already enabled (PMU_F_ENABLED not set)
+	 * - and not in error condition (PMU_F_ERR_MASK not set)
+	 */
+	if (cpuhw->flags != (PMU_F_IN_USE | PMU_F_RESERVED))
 		return;
 
 	/* Check whether to extent the sampling buffer.
@@ -927,33 +930,27 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
 	 * facility, but it can be fully re-enabled using sampling controls that
 	 * have been saved in cpumsf_pmu_disable().
 	 */
-	if (cpuhw->event) {
-		hwc = &cpuhw->event->hw;
-		if (!(SAMPL_DIAG_MODE(hwc))) {
-			/*
-			 * Account number of overflow-designated
-			 * buffer extents
-			 */
-			sfb_account_overflows(cpuhw, hwc);
-			extend_sampling_buffer(&cpuhw->sfb, hwc);
-		}
-		/* Rate may be adjusted with ioctl() */
-		cpuhw->lsctl.interval = SAMPL_RATE(hwc);
+	hwc = &cpuhw->event->hw;
+	if (!(SAMPL_DIAG_MODE(hwc))) {
+		/*
+		 * Account number of overflow-designated buffer extents
+		 */
+		sfb_account_overflows(cpuhw, hwc);
+		extend_sampling_buffer(&cpuhw->sfb, hwc);
 	}
+	/* Rate may be adjusted with ioctl() */
+	cpuhw->lsctl.interval = SAMPL_RATE(hwc);
 
 	/* (Re)enable the PMU and sampling facility */
-	cpuhw->flags |= PMU_F_ENABLED;
-	barrier();
-
 	err = lsctl(&cpuhw->lsctl);
 	if (err) {
-		cpuhw->flags &= ~PMU_F_ENABLED;
 		pr_err("Loading sampling controls failed: op 1 err %i\n", err);
 		return;
 	}
 
 	/* Load current program parameter */
 	lpp(&get_lowcore()->lpp);
+	cpuhw->flags |= PMU_F_ENABLED;
 }
 
 static void cpumsf_pmu_disable(struct pmu *pmu)
@@ -1191,8 +1188,8 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
 static void hw_perf_event_update(struct perf_event *event, int flush_all)
 {
 	unsigned long long event_overflow, sampl_overflow, num_sdb;
-	union hws_trailer_header old, prev, new;
 	struct hw_perf_event *hwc = &event->hw;
+	union hws_trailer_header prev, new;
 	struct hws_trailer_entry *te;
 	unsigned long *sdbt, sdb;
 	int done;
@@ -1236,13 +1233,11 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
 		/* Reset trailer (using compare-double-and-swap) */
 		prev.val = READ_ONCE_ALIGNED_128(te->header.val);
 		do {
-			old.val = prev.val;
 			new.val = prev.val;
 			new.f = 0;
 			new.a = 1;
 			new.overflow = 0;
-			prev.val = cmpxchg128(&te->header.val, old.val, new.val);
-		} while (prev.val != old.val);
+		} while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
 
 		/* Advance to next sample-data-block */
 		sdbt++;
@@ -1408,16 +1403,15 @@ static int aux_output_begin(struct perf_output_handle *handle,
 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
 			  unsigned long long *overflow)
 {
-	union hws_trailer_header old, prev, new;
+	union hws_trailer_header prev, new;
 	struct hws_trailer_entry *te;
 
 	te = aux_sdb_trailer(aux, alert_index);
 	prev.val = READ_ONCE_ALIGNED_128(te->header.val);
 	do {
-		old.val = prev.val;
 		new.val = prev.val;
-		*overflow = old.overflow;
-		if (old.f) {
+		*overflow = prev.overflow;
+		if (prev.f) {
 			/*
 			 * SDB is already set by hardware.
 			 * Abort and try to set somewhere
@@ -1427,8 +1421,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
 		}
 		new.a = 1;
 		new.overflow = 0;
-		prev.val = cmpxchg128(&te->header.val, old.val, new.val);
-	} while (prev.val != old.val);
+	} while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
 	return true;
 }
 
@@ -1457,7 +1450,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
 			     unsigned long long *overflow)
 {
-	union hws_trailer_header old, prev, new;
+	union hws_trailer_header prev, new;
 	unsigned long i, range_scan, idx;
 	unsigned long long orig_overflow;
 	struct hws_trailer_entry *te;
@@ -1489,17 +1482,15 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
 		te = aux_sdb_trailer(aux, idx);
 		prev.val = READ_ONCE_ALIGNED_128(te->header.val);
 		do {
-			old.val = prev.val;
 			new.val = prev.val;
-			orig_overflow = old.overflow;
+			orig_overflow = prev.overflow;
 			new.f = 0;
 			new.overflow = 0;
 			if (idx == aux->alert_mark)
 				new.a = 1;
 			else
 				new.a = 0;
-			prev.val = cmpxchg128(&te->header.val, old.val, new.val);
-		} while (prev.val != old.val);
+		} while (!try_cmpxchg128(&te->header.val, &prev.val, new.val));
 		*overflow += orig_overflow;
 	}
 
@@ -1780,7 +1771,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
 	event->hw.state |= PERF_HES_STOPPED;
 
 	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
-		hw_perf_event_update(event, 1);
+		/* CPU hotplug off removes SDBs. No samples to extract. */
+		if (cpuhw->flags & PMU_F_RESERVED)
+			hw_perf_event_update(event, 1);
 		event->hw.state |= PERF_HES_UPTODATE;
 	}
 	perf_pmu_enable(event->pmu);
@@ -1795,7 +1788,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
 	if (cpuhw->flags & PMU_F_IN_USE)
 		return -EAGAIN;
 
-	if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
+	if (!SAMPL_DIAG_MODE(&event->hw) && !sf_buffer_available(cpuhw))
 		return -EINVAL;
 
 	perf_pmu_disable(event->pmu);
@@ -1957,13 +1950,12 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
 
 	/* Program alert request */
 	if (alert & CPU_MF_INT_SF_PRA) {
-		if (cpuhw->flags & PMU_F_IN_USE)
+		if (cpuhw->flags & PMU_F_IN_USE) {
 			if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
 				hw_collect_aux(cpuhw);
 			else
 				hw_perf_event_update(cpuhw->event, 0);
-		else
-			WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
+		}
 	}
 
 	/* Report measurement alerts only for non-PRA codes */
@@ -1984,7 +1976,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
 
 	/* Invalid sampling buffer entry */
 	if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
-		pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
+		pr_err("A sampling buffer entry is incorrect (alert=%#x)\n",
 		       alert);
 		cpuhw->flags |= PMU_F_ERR_IBE;
 		sf_disable();
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 5fff629b1a89..0c65eaf099f9 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -228,5 +228,5 @@ ssize_t cpumf_events_sysfs_show(struct device *dev,
 	struct perf_pmu_events_attr *pmu_attr;
 
 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+	return sysfs_emit(page, "event=0x%04llx\n", pmu_attr->id);
 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 4df56fdb2488..822d8e6f8717 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -574,7 +574,7 @@ int smp_store_status(int cpu)
 
 /*
  * Collect CPU state of the previous, crashed system.
- * There are four cases:
+ * There are three cases:
  * 1) standard zfcp/nvme dump
  *    condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
  *    The state for all CPUs except the boot CPU needs to be collected
@@ -587,16 +587,16 @@ int smp_store_status(int cpu)
  *    with sigp stop-and-store-status. The firmware or the boot-loader
  *    stored the registers of the boot CPU in the absolute lowcore in the
  *    memory of the old system.
- * 3) kdump and the old kernel did not store the CPU state,
- *    or stand-alone kdump for DASD
- *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
+ * 3) kdump or stand-alone kdump for DASD
+ *    condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == false
  *    The state for all CPUs except the boot CPU needs to be collected
  *    with sigp stop-and-store-status. The kexec code or the boot-loader
  *    stored the registers of the boot CPU in the memory of the old system.
- * 4) kdump and the old kernel stored the CPU state
- *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
- *    This case does not exist for s390 anymore, setup_arch explicitly
- *    deactivates the elfcorehdr= kernel parameter
+ *
+ * Note that the legacy kdump mode where the old kernel stored the CPU states
+ * does no longer exist: setup_arch() explicitly deactivates the elfcorehdr=
+ * kernel parameter. The is_kdump_kernel() implementation on s390 is independent
+ * of the elfcorehdr= parameter.
  */
 static bool dump_available(void)
 {
@@ -1011,7 +1011,7 @@ static ssize_t cpu_configure_show(struct device *dev,
 	ssize_t count;
 
 	mutex_lock(&smp_cpu_state_mutex);
-	count = sprintf(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
+	count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
 	mutex_unlock(&smp_cpu_state_mutex);
 	return count;
 }
@@ -1083,7 +1083,7 @@ static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
 static ssize_t show_cpu_address(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
+	return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
 }
 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
 
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 1cf2ad04f8e9..d40f0b983e74 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -17,6 +17,7 @@
 #include <asm/ebcdic.h>
 #include <asm/facility.h>
 #include <asm/sthyi.h>
+#include <asm/asm.h>
 #include "entry.h"
 
 #define DED_WEIGHT 0xffff
@@ -425,13 +426,12 @@ static int sthyi(u64 vaddr, u64 *rc)
 
 	asm volatile(
 		".insn   rre,0xB2560000,%[r1],%[r2]\n"
-		"ipm     %[cc]\n"
-		"srl     %[cc],28\n"
-		: [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [r2] "+&d" (r2.pair)
 		: [r1] "d" (r1.pair)
-		: "memory", "cc");
+		: CC_CLOBBER_LIST("memory"));
 	*rc = r2.odd;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 static int fill_dst(void *dst, u64 *rc)
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
index 1bb78b9468e8..c5d958a09ff4 100644
--- a/arch/s390/kernel/syscalls/Makefile
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -12,7 +12,7 @@ kapi-hdrs-y := $(kapi)/unistd_nr.h
 uapi-hdrs-y := $(uapi)/unistd_32.h
 uapi-hdrs-y += $(uapi)/unistd_64.h
 
-targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
+targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
 
 PHONY += kapi uapi
 
@@ -23,23 +23,26 @@ uapi:	$(uapi-hdrs-y)
 # Create output directory if not already present
 $(shell mkdir -p $(uapi) $(kapi))
 
-filechk_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
+quiet_cmd_syshdr = SYSHDR  $@
+      cmd_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$@" < $< > $@
 
-filechk_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $<
+quiet_cmd_sysnr = SYSNR   $@
+      cmd_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $< > $@
 
-filechk_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $<
+quiet_cmd_syscalls = SYSTBL  $@
+      cmd_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $< > $@
 
 syshdr_abi_unistd_32 := common,32
-$(uapi)/unistd_32.h: $(syscall) FORCE
-	$(call filechk,syshdr,$@)
+$(uapi)/unistd_32.h: $(syscall) $(systbl) FORCE
+	$(call if_changed,syshdr)
 
 syshdr_abi_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall) FORCE
-	$(call filechk,syshdr,$@)
+$(uapi)/unistd_64.h: $(syscall) $(systbl) FORCE
+	$(call if_changed,syshdr)
 
-$(kapi)/syscall_table.h: $(syscall) FORCE
-	$(call filechk,syscalls)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
+	$(call if_changed,syscalls)
 
 sysnr_abi_unistd_nr := common,32,64
-$(kapi)/unistd_nr.h: $(syscall) FORCE
-	$(call filechk,sysnr)
+$(kapi)/unistd_nr.h: $(syscall) $(systbl) FORCE
+	$(call if_changed,sysnr)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index b713effe0579..cd02ed7931b7 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -729,8 +729,8 @@ static ssize_t ctn_id_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid())
-		ret = sprintf(buf, "%016lx\n",
-			      *(unsigned long *) stp_info.ctnid);
+		ret = sysfs_emit(buf, "%016lx\n",
+				 *(unsigned long *)stp_info.ctnid);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -745,7 +745,7 @@ static ssize_t ctn_type_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid())
-		ret = sprintf(buf, "%i\n", stp_info.ctn);
+		ret = sysfs_emit(buf, "%i\n", stp_info.ctn);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -760,7 +760,7 @@ static ssize_t dst_offset_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid() && (stp_info.vbits & 0x2000))
-		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
+		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.dsto);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -775,7 +775,7 @@ static ssize_t leap_seconds_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid() && (stp_info.vbits & 0x8000))
-		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
+		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.leaps);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -801,11 +801,11 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
 		return ret;
 
 	if (!stzi.lsoib.p)
-		return sprintf(buf, "0,0\n");
+		return sysfs_emit(buf, "0,0\n");
 
-	return sprintf(buf, "%lu,%d\n",
-		       tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
-		       stzi.lsoib.nlso - stzi.lsoib.also);
+	return sysfs_emit(buf, "%lu,%d\n",
+			  tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
+			  stzi.lsoib.nlso - stzi.lsoib.also);
 }
 
 static DEVICE_ATTR_RO(leap_seconds_scheduled);
@@ -818,7 +818,7 @@ static ssize_t stratum_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid())
-		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
+		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.stratum);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -833,7 +833,7 @@ static ssize_t time_offset_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid() && (stp_info.vbits & 0x0800))
-		ret = sprintf(buf, "%i\n", (int) stp_info.tto);
+		ret = sysfs_emit(buf, "%i\n", (int)stp_info.tto);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -848,7 +848,7 @@ static ssize_t time_zone_offset_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid() && (stp_info.vbits & 0x4000))
-		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
+		ret = sysfs_emit(buf, "%i\n", (int)(s16)stp_info.tzo);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -863,7 +863,7 @@ static ssize_t timing_mode_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid())
-		ret = sprintf(buf, "%i\n", stp_info.tmd);
+		ret = sysfs_emit(buf, "%i\n", stp_info.tmd);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -878,7 +878,7 @@ static ssize_t timing_state_show(struct device *dev,
 
 	mutex_lock(&stp_mutex);
 	if (stpinfo_valid())
-		ret = sprintf(buf, "%i\n", stp_info.tst);
+		ret = sysfs_emit(buf, "%i\n", stp_info.tst);
 	mutex_unlock(&stp_mutex);
 	return ret;
 }
@@ -889,7 +889,7 @@ static ssize_t online_show(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
 {
-	return sprintf(buf, "%i\n", stp_online);
+	return sysfs_emit(buf, "%i\n", stp_online);
 }
 
 static ssize_t online_store(struct device *dev,
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 813e5da9a973..4f9c301a705b 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -26,6 +26,7 @@
 #include <linux/node.h>
 #include <asm/hiperdispatch.h>
 #include <asm/sysinfo.h>
+#include <asm/asm.h>
 
 #define PTF_HORIZONTAL	(0UL)
 #define PTF_VERTICAL	(1UL)
@@ -224,15 +225,15 @@ static void topology_update_polarization_simple(void)
 
 static int ptf(unsigned long fc)
 {
-	int rc;
+	int cc;
 
 	asm volatile(
-		"	.insn	rre,0xb9a20000,%1,%1\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (rc)
-		: "d" (fc)  : "cc");
-	return rc;
+		"	.insn	rre,0xb9a20000,%[fc],%[fc]\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
+		: [fc] "d" (fc)
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc);
 }
 
 int topology_set_cpu_management(int fc)
@@ -412,7 +413,7 @@ static ssize_t dispatching_show(struct device *dev,
 	ssize_t count;
 
 	mutex_lock(&smp_cpu_state_mutex);
-	count = sprintf(buf, "%d\n", cpu_management);
+	count = sysfs_emit(buf, "%d\n", cpu_management);
 	mutex_unlock(&smp_cpu_state_mutex);
 	return count;
 }
@@ -443,19 +444,19 @@ static ssize_t cpu_polarization_show(struct device *dev,
 	mutex_lock(&smp_cpu_state_mutex);
 	switch (smp_cpu_get_polarization(cpu)) {
 	case POLARIZATION_HRZ:
-		count = sprintf(buf, "horizontal\n");
+		count = sysfs_emit(buf, "horizontal\n");
 		break;
 	case POLARIZATION_VL:
-		count = sprintf(buf, "vertical:low\n");
+		count = sysfs_emit(buf, "vertical:low\n");
 		break;
 	case POLARIZATION_VM:
-		count = sprintf(buf, "vertical:medium\n");
+		count = sysfs_emit(buf, "vertical:medium\n");
 		break;
 	case POLARIZATION_VH:
-		count = sprintf(buf, "vertical:high\n");
+		count = sysfs_emit(buf, "vertical:high\n");
 		break;
 	default:
-		count = sprintf(buf, "unknown\n");
+		count = sysfs_emit(buf, "unknown\n");
 		break;
 	}
 	mutex_unlock(&smp_cpu_state_mutex);
@@ -479,7 +480,7 @@ static ssize_t cpu_dedicated_show(struct device *dev,
 	ssize_t count;
 
 	mutex_lock(&smp_cpu_state_mutex);
-	count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
+	count = sysfs_emit(buf, "%d\n", topology_cpu_dedicated(cpu));
 	mutex_unlock(&smp_cpu_state_mutex);
 	return count;
 }
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 160b2acba8db..24fee11b030d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -31,6 +31,7 @@
 #include <asm/asm-extable.h>
 #include <asm/vtime.h>
 #include <asm/fpu.h>
+#include <asm/fault.h>
 #include "entry.h"
 
 static inline void __user *get_trap_ip(struct pt_regs *regs)
@@ -317,9 +318,24 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
 	struct lowcore *lc = get_lowcore();
 	irqentry_state_t state;
 	unsigned int trapnr;
+	union teid teid;
 
+	teid.val = lc->trans_exc_code;
 	regs->int_code = lc->pgm_int_code;
-	regs->int_parm_long = lc->trans_exc_code;
+	regs->int_parm_long = teid.val;
+
+	/*
+	 * In case of a guest fault, short-circuit the fault handler and return.
+	 * This way the sie64a() function will return 0; fault address and
+	 * other relevant bits are saved in current->thread.gmap_teid, and
+	 * the fault number in current->thread.gmap_int_code. KVM will be
+	 * able to use this information to handle the fault.
+	 */
+	if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) {
+		current->thread.gmap_teid.val = regs->int_parm_long;
+		current->thread.gmap_int_code = regs->int_code & 0xffff;
+		return;
+	}
 
 	state = irqentry_enter(regs);
 
@@ -408,8 +424,8 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
 	[0x3b]		= do_dat_exception,
 	[0x3c]		= default_trap_handler,
 	[0x3d]		= do_secure_storage_access,
-	[0x3e]		= do_non_secure_storage_access,
-	[0x3f]		= do_secure_storage_violation,
+	[0x3e]		= default_trap_handler,
+	[0x3f]		= default_trap_handler,
 	[0x40]		= monitor_event_exception,
 	[0x41 ... 0x7f] = default_trap_handler,
 };
@@ -420,5 +436,3 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
 	__stringify(default_trap_handler))
 
 COND_TRAP(do_secure_storage_access);
-COND_TRAP(do_non_secure_storage_access);
-COND_TRAP(do_secure_storage_violation);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 9646f773208a..6f9654a191ad 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -2,7 +2,7 @@
 /*
  * Common Ultravisor functions and initialization
  *
- * Copyright IBM Corp. 2019, 2020
+ * Copyright IBM Corp. 2019, 2024
  */
 #define KMSG_COMPONENT "prot_virt"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@@ -696,12 +696,32 @@ static struct kobj_attribute uv_query_supp_secret_types_attr =
 static ssize_t uv_query_max_secrets(struct kobject *kobj,
 				    struct kobj_attribute *attr, char *buf)
 {
-	return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
+	return sysfs_emit(buf, "%d\n",
+			  uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
 }
 
 static struct kobj_attribute uv_query_max_secrets_attr =
 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
 
+static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
+					 struct kobj_attribute *attr, char *buf)
+{
+	return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
+}
+
+static struct kobj_attribute uv_query_max_retr_secrets_attr =
+	__ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
+
+static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  char *buf)
+{
+	return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
+}
+
+static struct kobj_attribute uv_query_max_assoc_secrets_attr =
+	__ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
+
 static struct attribute *uv_query_attrs[] = {
 	&uv_query_facilities_attr.attr,
 	&uv_query_feature_indications_attr.attr,
@@ -719,13 +739,81 @@ static struct attribute *uv_query_attrs[] = {
 	&uv_query_supp_add_secret_pcf_attr.attr,
 	&uv_query_supp_secret_types_attr.attr,
 	&uv_query_max_secrets_attr.attr,
+	&uv_query_max_assoc_secrets_attr.attr,
+	&uv_query_max_retr_secrets_attr.attr,
 	NULL,
 };
 
+static inline struct uv_cb_query_keys uv_query_keys(void)
+{
+	struct uv_cb_query_keys uvcb = {
+		.header.cmd = UVC_CMD_QUERY_KEYS,
+		.header.len = sizeof(uvcb)
+	};
+
+	uv_call(0, (uint64_t)&uvcb);
+	return uvcb;
+}
+
+static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
+{
+	return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
+			    hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
+}
+
+static ssize_t uv_keys_host_key(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	struct uv_cb_query_keys uvcb = uv_query_keys();
+
+	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
+}
+
+static struct kobj_attribute uv_keys_host_key_attr =
+	__ATTR(host_key, 0444, uv_keys_host_key, NULL);
+
+static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
+				       struct kobj_attribute *attr, char *buf)
+{
+	struct uv_cb_query_keys uvcb = uv_query_keys();
+
+	return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
+}
+
+static struct kobj_attribute uv_keys_backup_host_key_attr =
+	__ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
+
+static ssize_t uv_keys_all(struct kobject *kobj,
+			   struct kobj_attribute *attr, char *buf)
+{
+	struct uv_cb_query_keys uvcb = uv_query_keys();
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
+		len += emit_hash(uvcb.key_hashes + i, buf, len);
+
+	return len;
+}
+
+static struct kobj_attribute uv_keys_all_attr =
+	__ATTR(all, 0444, uv_keys_all, NULL);
+
 static struct attribute_group uv_query_attr_group = {
 	.attrs = uv_query_attrs,
 };
 
+static struct attribute *uv_keys_attrs[] = {
+	&uv_keys_host_key_attr.attr,
+	&uv_keys_backup_host_key_attr.attr,
+	&uv_keys_all_attr.attr,
+	NULL,
+};
+
+static struct attribute_group uv_keys_attr_group = {
+	.attrs = uv_keys_attrs,
+};
+
 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
 				     struct kobj_attribute *attr, char *buf)
 {
@@ -751,9 +839,27 @@ static const struct attribute *uv_prot_virt_attrs[] = {
 };
 
 static struct kset *uv_query_kset;
+static struct kset *uv_keys_kset;
 static struct kobject *uv_kobj;
 
-static int __init uv_info_init(void)
+static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
+				    struct kset **uv_dir_kset, const char *name)
+{
+	struct kset *kset;
+	int rc;
+
+	kset = kset_create_and_add(name, NULL, uv_kobj);
+	if (!kset)
+		return -ENOMEM;
+	*uv_dir_kset = kset;
+
+	rc = sysfs_create_group(&kset->kobj, grp);
+	if (rc)
+		kset_unregister(kset);
+	return rc;
+}
+
+static int __init uv_sysfs_init(void)
 {
 	int rc = -ENOMEM;
 
@@ -768,17 +874,16 @@ static int __init uv_info_init(void)
 	if (rc)
 		goto out_kobj;
 
-	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
-	if (!uv_query_kset) {
-		rc = -ENOMEM;
+	rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
+	if (rc)
 		goto out_ind_files;
-	}
 
-	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
-	if (!rc)
-		return 0;
+	/* Get installed key hashes if available, ignore any errors */
+	if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
+		uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
+
+	return 0;
 
-	kset_unregister(uv_query_kset);
 out_ind_files:
 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
 out_kobj:
@@ -786,4 +891,131 @@ out_kobj:
 	kobject_put(uv_kobj);
 	return rc;
 }
-device_initcall(uv_info_init);
+device_initcall(uv_sysfs_init);
+
+/*
+ * Find the secret with the secret_id in the provided list.
+ *
+ * Context: might sleep.
+ */
+static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
+			       const struct uv_secret_list *list,
+			       struct uv_secret_list_item_hdr *secret)
+{
+	u16 i;
+
+	for (i = 0; i < list->total_num_secrets; i++) {
+		if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
+			*secret = list->secrets[i].hdr;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+/*
+ * Do the actual search for `uv_get_secret_metadata`.
+ *
+ * Context: might sleep.
+ */
+static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+		       struct uv_secret_list *list,
+		       struct uv_secret_list_item_hdr *secret)
+{
+	u16 start_idx = 0;
+	u16 list_rc;
+	int ret;
+
+	do {
+		uv_list_secrets(list, start_idx, &list_rc, NULL);
+		if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
+			if (list_rc == UVC_RC_INV_CMD)
+				return -ENODEV;
+			else
+				return -EIO;
+		}
+		ret = find_secret_in_page(secret_id, list, secret);
+		if (ret == 0)
+			return ret;
+		start_idx = list->next_secret_idx;
+	} while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
+
+	return -ENOENT;
+}
+
+/**
+ * uv_get_secret_metadata() - get secret metadata for a given secret id.
+ * @secret_id: search pattern.
+ * @secret: output data, containing the secret's metadata.
+ *
+ * Search for a secret with the given secret_id in the Ultravisor secret store.
+ *
+ * Context: might sleep.
+ *
+ * Return:
+ * * %0:	- Found entry; secret->idx and secret->type are valid.
+ * * %ENOENT	- No entry found.
+ * * %ENODEV:	- Not supported: UV not available or command not available.
+ * * %EIO:	- Other unexpected UV error.
+ */
+int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+			   struct uv_secret_list_item_hdr *secret)
+{
+	struct uv_secret_list *buf;
+	int rc;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	rc = find_secret(secret_id, buf, secret);
+	kfree(buf);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
+
+/**
+ * uv_retrieve_secret() - get the secret value for the secret index.
+ * @secret_idx: Secret index for which the secret should be retrieved.
+ * @buf: Buffer to store retrieved secret.
+ * @buf_size: Size of the buffer. The correct buffer size is reported as part of
+ * the result from `uv_get_secret_metadata`.
+ *
+ * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
+ *
+ * Context: might sleep.
+ *
+ * Return:
+ * * %0		- Entry found; buffer contains a valid secret.
+ * * %ENOENT:	- No entry found or secret at the index is non-retrievable.
+ * * %ENODEV:	- Not supported: UV not available or command not available.
+ * * %EINVAL:	- Buffer too small for content.
+ * * %EIO:	- Other unexpected UV error.
+ */
+int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
+{
+	struct uv_cb_retr_secr uvcb = {
+		.header.len = sizeof(uvcb),
+		.header.cmd = UVC_CMD_RETR_SECRET,
+		.secret_idx = secret_idx,
+		.buf_addr = (u64)buf,
+		.buf_size = buf_size,
+	};
+
+	uv_call_sched(0, (u64)&uvcb);
+
+	switch (uvcb.header.rc) {
+	case UVC_RC_EXECUTED:
+		return 0;
+	case UVC_RC_INV_CMD:
+		return -ENODEV;
+	case UVC_RC_RETR_SECR_STORE_EMPTY:
+	case UVC_RC_RETR_SECR_INV_SECRET:
+	case UVC_RC_RETR_SECR_INV_IDX:
+		return -ENOENT;
+	case UVC_RC_RETR_SECR_BUF_SMALL:
+		return -EINVAL;
+	default:
+		return -EIO;
+	}
+}
+EXPORT_SYMBOL_GPL(uv_retrieve_secret);
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index b16352083ff9..5bbaadf75dc6 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -367,7 +367,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 					      reg2, &srcaddr, GACC_FETCH, 0);
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
-	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
+	rc = gmap_fault(vcpu->arch.gmap, srcaddr, 0);
 	if (rc != 0)
 		return rc;
 
@@ -376,7 +376,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 					      reg1, &dstaddr, GACC_STORE, 0);
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
-	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
+	rc = gmap_fault(vcpu->arch.gmap, dstaddr, FAULT_FLAG_WRITE);
 	if (rc != 0)
 		return rc;
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bb7134faaebf..deeb32034ad5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -43,6 +43,7 @@
 #include <asm/sclp.h>
 #include <asm/cpacf.h>
 #include <asm/timex.h>
+#include <asm/asm.h>
 #include <asm/fpu.h>
 #include <asm/ap.h>
 #include <asm/uv.h>
@@ -340,12 +341,11 @@ static inline int plo_test_bit(unsigned char nr)
 		"	lgr	0,%[function]\n"
 		/* Parameter registers are ignored for "test bit" */
 		"	plo	0,0,0,0(0)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (cc)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
 		: [function] "d" (function)
-		: "cc", "0");
-	return cc == 0;
+		: CC_CLOBBER_LIST("0"));
+	return CC_TRANSFORM(cc) == 0;
 }
 
 static __always_inline void __sortl_query(u8 (*query)[32])
@@ -3719,7 +3719,6 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 
-	gmap_enable(vcpu->arch.enabled_gmap);
 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
 		__start_cpu_timer_accounting(vcpu);
@@ -3732,8 +3731,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
 		__stop_cpu_timer_accounting(vcpu);
 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
-	vcpu->arch.enabled_gmap = gmap_get_enabled();
-	gmap_disable(vcpu->arch.enabled_gmap);
 
 }
 
@@ -3751,8 +3748,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 	}
 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
-	/* make vcpu_load load the right gmap on the first trigger */
-	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
 }
 
 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
@@ -4579,22 +4574,6 @@ int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clo
 	return 1;
 }
 
-/**
- * kvm_arch_fault_in_page - fault-in guest page if necessary
- * @vcpu: The corresponding virtual cpu
- * @gpa: Guest physical address
- * @writable: Whether the page should be writable or not
- *
- * Make sure that a guest page has been faulted-in on the host.
- *
- * Return: Zero on success, negative error code otherwise.
- */
-long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
-{
-	return gmap_fault(vcpu->arch.gmap, gpa,
-			  writable ? FAULT_FLAG_WRITE : 0);
-}
-
 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
 				      unsigned long token)
 {
@@ -4662,12 +4641,11 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
 	if (!vcpu->arch.gmap->pfault_enabled)
 		return false;
 
-	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
-	hva += current->thread.gmap_addr & ~PAGE_MASK;
+	hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
 		return false;
 
-	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
+	return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
 }
 
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
@@ -4705,6 +4683,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
 
 	vcpu->arch.sie_block->icptcode = 0;
+	current->thread.gmap_int_code = 0;
 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
@@ -4712,7 +4691,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
-static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
+static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
 {
 	struct kvm_s390_pgm_info pgm_info = {
 		.code = PGM_ADDRESSING,
@@ -4748,10 +4727,106 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
+static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
+{
+	unsigned int flags = 0;
+	unsigned long gaddr;
+	int rc = 0;
+
+	gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
+	if (kvm_s390_cur_gmap_fault_is_write())
+		flags = FAULT_FLAG_WRITE;
+
+	switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
+	case 0:
+		vcpu->stat.exit_null++;
+		break;
+	case PGM_NON_SECURE_STORAGE_ACCESS:
+		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
+			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
+			current->thread.gmap_int_code, current->thread.gmap_teid.val);
+		/*
+		 * This is normal operation; a page belonging to a protected
+		 * guest has not been imported yet. Try to import the page into
+		 * the protected guest.
+		 */
+		if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL)
+			send_sig(SIGSEGV, current, 0);
+		break;
+	case PGM_SECURE_STORAGE_ACCESS:
+	case PGM_SECURE_STORAGE_VIOLATION:
+		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
+			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
+			current->thread.gmap_int_code, current->thread.gmap_teid.val);
+		/*
+		 * This can happen after a reboot with asynchronous teardown;
+		 * the new guest (normal or protected) will run on top of the
+		 * previous protected guest. The old pages need to be destroyed
+		 * so the new guest can use them.
+		 */
+		if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) {
+			/*
+			 * Either KVM messed up the secure guest mapping or the
+			 * same page is mapped into multiple secure guests.
+			 *
+			 * This exception is only triggered when a guest 2 is
+			 * running and can therefore never occur in kernel
+			 * context.
+			 */
+			pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
+					    current->thread.gmap_int_code, current->comm,
+					    current->pid);
+			send_sig(SIGSEGV, current, 0);
+		}
+		break;
+	case PGM_PROTECTION:
+	case PGM_SEGMENT_TRANSLATION:
+	case PGM_PAGE_TRANSLATION:
+	case PGM_ASCE_TYPE:
+	case PGM_REGION_FIRST_TRANS:
+	case PGM_REGION_SECOND_TRANS:
+	case PGM_REGION_THIRD_TRANS:
+		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
+			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
+			current->thread.gmap_int_code, current->thread.gmap_teid.val);
+		if (vcpu->arch.gmap->pfault_enabled) {
+			rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
+			if (rc == -EFAULT)
+				return vcpu_post_run_addressing_exception(vcpu);
+			if (rc == -EAGAIN) {
+				trace_kvm_s390_major_guest_pfault(vcpu);
+				if (kvm_arch_setup_async_pf(vcpu))
+					return 0;
+				vcpu->stat.pfault_sync++;
+			} else {
+				return rc;
+			}
+		}
+		rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
+		if (rc == -EFAULT) {
+			if (kvm_is_ucontrol(vcpu->kvm)) {
+				vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
+				vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
+				vcpu->run->s390_ucontrol.pgm_code = 0x10;
+				return -EREMOTE;
+			}
+			return vcpu_post_run_addressing_exception(vcpu);
+		}
+		break;
+	default:
+		KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
+			current->thread.gmap_int_code, current->thread.gmap_teid.val);
+		send_sig(SIGSEGV, current, 0);
+		break;
+	}
+	return rc;
+}
+
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 {
 	struct mcck_volatile_info *mcck_info;
 	struct sie_page *sie_page;
+	int rc;
 
 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
 		   vcpu->arch.sie_block->icptcode);
@@ -4773,7 +4848,7 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 	}
 
 	if (vcpu->arch.sie_block->icptcode > 0) {
-		int rc = kvm_handle_sie_intercept(vcpu);
+		rc = kvm_handle_sie_intercept(vcpu);
 
 		if (rc != -EOPNOTSUPP)
 			return rc;
@@ -4782,24 +4857,9 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
 		return -EREMOTE;
-	} else if (exit_reason != -EFAULT) {
-		vcpu->stat.exit_null++;
-		return 0;
-	} else if (kvm_is_ucontrol(vcpu->kvm)) {
-		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
-		vcpu->run->s390_ucontrol.trans_exc_code =
-						current->thread.gmap_addr;
-		vcpu->run->s390_ucontrol.pgm_code = 0x10;
-		return -EREMOTE;
-	} else if (current->thread.gmap_pfault) {
-		trace_kvm_s390_major_guest_pfault(vcpu);
-		current->thread.gmap_pfault = 0;
-		if (kvm_arch_setup_async_pf(vcpu))
-			return 0;
-		vcpu->stat.pfault_sync++;
-		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
 	}
-	return vcpu_post_run_fault_in_sie(vcpu);
+
+	return vcpu_post_run_handle_fault(vcpu);
 }
 
 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
@@ -4835,7 +4895,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 		}
 		exit_reason = sie64a(vcpu->arch.sie_block,
 				     vcpu->run->s.regs.gprs,
-				     gmap_get_enabled()->asce);
+				     vcpu->arch.gmap->asce);
 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
 			memcpy(vcpu->run->s.regs.gprs,
 			       sie_page->pv_grregs,
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index e680c6bf0c9d..597d7a71deeb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -394,7 +394,6 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
 
 /* implemented in kvm-s390.c */
 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
-long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
@@ -529,6 +528,13 @@ static inline int kvm_s390_use_sca_entries(void)
 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
 				     struct mcck_volatile_info *mcck_info);
 
+static inline bool kvm_s390_cur_gmap_fault_is_write(void)
+{
+	if (current->thread.gmap_int_code == PGM_PROTECTION)
+		return true;
+	return test_facility(75) && (current->thread.gmap_teid.fsi == TEID_FSI_STORE);
+}
+
 /**
  * kvm_s390_vcpu_crypto_reset_all
  *
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c
index ffa7739c7a28..a61518b549f0 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/pci.c
@@ -103,7 +103,7 @@ static int zpci_reset_aipb(u8 nisc)
 	/*
 	 * AEN registration can only happen once per system boot.  If
 	 * an aipb already exists then AEN was already registered and
-	 * we can re-use the aipb contents.  This can only happen if
+	 * we can reuse the aipb contents.  This can only happen if
 	 * the KVM module was removed and re-inserted.  However, we must
 	 * ensure that the same forwarding ISC is used as this is assigned
 	 * during KVM module load.
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 89cafea4c41f..d3cdde1b18e5 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -922,19 +922,19 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 {
 	int rc;
 
-	if (current->thread.gmap_int_code == PGM_PROTECTION)
+	if ((current->thread.gmap_int_code & PGM_INT_CODE_MASK) == PGM_PROTECTION)
 		/* we can directly forward all protection exceptions */
 		return inject_fault(vcpu, PGM_PROTECTION,
-				    current->thread.gmap_addr, 1);
+				    current->thread.gmap_teid.addr * PAGE_SIZE, 1);
 
 	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
-				   current->thread.gmap_addr, NULL);
+				   current->thread.gmap_teid.addr * PAGE_SIZE, NULL);
 	if (rc > 0) {
 		rc = inject_fault(vcpu, rc,
-				  current->thread.gmap_addr,
-				  current->thread.gmap_write_flag);
+				  current->thread.gmap_teid.addr * PAGE_SIZE,
+				  kvm_s390_cur_gmap_fault_is_write());
 		if (rc >= 0)
-			vsie_page->fault_addr = current->thread.gmap_addr;
+			vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE;
 	}
 	return rc;
 }
@@ -1148,9 +1148,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 	 * also kick the vSIE.
 	 */
 	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
+	current->thread.gmap_int_code = 0;
 	barrier();
 	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
-		rc = sie64a(scb_s, vcpu->run->s.regs.gprs, gmap_get_enabled()->asce);
+		rc = sie64a(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
 	barrier();
 	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
 
@@ -1172,7 +1173,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 
 	if (rc > 0)
 		rc = 0; /* we could still have an icpt */
-	else if (rc == -EFAULT)
+	else if (current->thread.gmap_int_code)
 		return handle_fault(vcpu, vsie_page);
 
 	switch (scb_s->icptcode) {
@@ -1295,10 +1296,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 		if (!rc)
 			rc = map_prefix(vcpu, vsie_page);
 		if (!rc) {
-			gmap_enable(vsie_page->gmap);
 			update_intervention_requests(vsie_page);
 			rc = do_vsie_run(vcpu, vsie_page);
-			gmap_enable(vcpu->arch.gmap);
 		}
 		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
 
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 9f86ad8fa8b4..09d735010ee1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -127,8 +127,8 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 	node_id = node->node_id;
 
 	/* Enqueue the node for this CPU in the spinlock wait queue */
+	old = READ_ONCE(lp->lock);
 	while (1) {
-		old = READ_ONCE(lp->lock);
 		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
 		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
 			/*
@@ -139,7 +139,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 			 * waiter will get the lock.
 			 */
 			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
-			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+			if (arch_try_cmpxchg(&lp->lock, &old, new))
 				/* Got the lock */
 				goto out;
 			/* lock passing in progress */
@@ -147,7 +147,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 		}
 		/* Make the node of this CPU the new tail. */
 		new = node_id | (old & _Q_LOCK_MASK);
-		if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+		if (arch_try_cmpxchg(&lp->lock, &old, new))
 			break;
 	}
 	/* Set the 'next' pointer of the tail node in the queue */
@@ -184,7 +184,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 		if (!owner) {
 			tail_id = old & _Q_TAIL_MASK;
 			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
-			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+			if (arch_try_cmpxchg(&lp->lock, &old, new))
 				/* Got the lock */
 				break;
 			continue;
@@ -258,7 +258,7 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
 		owner = READ_ONCE(lp->lock);
 		/* Try to get the lock if it is free. */
 		if (!owner) {
-			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+			if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
 				return 1;
 		}
 	}
@@ -300,7 +300,7 @@ void arch_write_lock_wait(arch_rwlock_t *rw)
 	while (1) {
 		old = READ_ONCE(rw->cnts);
 		if ((old & 0x1ffff) == 0 &&
-		    __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
+		    arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000))
 			/* Got the lock */
 			break;
 		barrier();
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 7d8741818239..373fa1f01937 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/export.h>
+#include <asm/asm.h>
 
 /*
  * Helper functions to find the end of a string
@@ -238,12 +239,11 @@ static inline int clcle(const char *s1, unsigned long l1,
 	asm volatile(
 		"0:	clcle	%[r1],%[r3],0\n"
 		"	jo	0b\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [r1] "+d" (r1.pair), [r3] "+d" (r3.pair)
 		:
-		: "cc", "memory");
-	return cc;
+		: CC_CLOBBER_LIST("memory"));
+	return CC_TRANSFORM(cc);
 }
 
 /**
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 282fefe107a2..4692136c0af1 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -28,6 +28,7 @@
 #include <asm/extmem.h>
 #include <asm/cpcmd.h>
 #include <asm/setup.h>
+#include <asm/asm.h>
 
 #define DCSS_PURGESEG   0x08
 #define DCSS_LOADSHRX	0x20
@@ -134,20 +135,21 @@ dcss_diag(int *func, void *parameter,
            unsigned long *ret1, unsigned long *ret2)
 {
 	unsigned long rx, ry;
-	int rc;
+	int cc;
 
 	rx = virt_to_phys(parameter);
 	ry = (unsigned long) *func;
 
 	diag_stat_inc(DIAG_STAT_X064);
 	asm volatile(
-		"	diag	%0,%1,0x64\n"
-		"	ipm	%2\n"
-		"	srl	%2,28\n"
-		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+		"	diag	%[rx],%[ry],0x64\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [rx] "+d" (rx), [ry] "+d" (ry)
+		:
+		: CC_CLOBBER);
 	*ret1 = rx;
 	*ret2 = ry;
-	return rc;
+	return CC_TRANSFORM(cc);
 }
 
 static inline int
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ad8b0d6b77ea..94cb2e092075 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -46,12 +46,6 @@
 #include <asm/uv.h>
 #include "../kernel/entry.h"
 
-enum fault_type {
-	KERNEL_FAULT,
-	USER_FAULT,
-	GMAP_FAULT,
-};
-
 static DEFINE_STATIC_KEY_FALSE(have_store_indication);
 
 static int __init fault_init(void)
@@ -65,28 +59,15 @@ early_initcall(fault_init);
 /*
  * Find out which address space caused the exception.
  */
-static enum fault_type get_fault_type(struct pt_regs *regs)
+static bool is_kernel_fault(struct pt_regs *regs)
 {
 	union teid teid = { .val = regs->int_parm_long };
-	struct gmap *gmap;
 
-	if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
-		if (user_mode(regs))
-			return USER_FAULT;
-		if (!IS_ENABLED(CONFIG_PGSTE))
-			return KERNEL_FAULT;
-		gmap = (struct gmap *)get_lowcore()->gmap;
-		if (gmap && gmap->asce == regs->cr1)
-			return GMAP_FAULT;
-		return KERNEL_FAULT;
-	}
+	if (user_mode(regs))
+		return false;
 	if (teid.as == PSW_BITS_AS_SECONDARY)
-		return USER_FAULT;
-	/* Access register mode, not used in the kernel */
-	if (teid.as == PSW_BITS_AS_ACCREG)
-		return USER_FAULT;
-	/* Home space -> access via kernel ASCE */
-	return KERNEL_FAULT;
+		return false;
+	return true;
 }
 
 static unsigned long get_fault_address(struct pt_regs *regs)
@@ -181,21 +162,12 @@ static void dump_fault_info(struct pt_regs *regs)
 		break;
 	}
 	pr_cont("mode while using ");
-	switch (get_fault_type(regs)) {
-	case USER_FAULT:
-		asce = get_lowcore()->user_asce.val;
-		pr_cont("user ");
-		break;
-	case GMAP_FAULT:
-		asce = ((struct gmap *)get_lowcore()->gmap)->asce;
-		pr_cont("gmap ");
-		break;
-	case KERNEL_FAULT:
+	if (is_kernel_fault(regs)) {
 		asce = get_lowcore()->kernel_asce.val;
 		pr_cont("kernel ");
-		break;
-	default:
-		unreachable();
+	} else {
+		asce = get_lowcore()->user_asce.val;
+		pr_cont("user ");
 	}
 	pr_cont("ASCE.\n");
 	dump_pagetable(asce, get_fault_address(regs));
@@ -230,7 +202,6 @@ static void do_sigsegv(struct pt_regs *regs, int si_code)
 
 static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
 {
-	enum fault_type fault_type;
 	unsigned long address;
 	bool is_write;
 
@@ -241,17 +212,15 @@ static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
 	}
 	if (fixup_exception(regs))
 		return;
-	fault_type = get_fault_type(regs);
-	if (fault_type == KERNEL_FAULT) {
+	if (is_kernel_fault(regs)) {
 		address = get_fault_address(regs);
 		is_write = fault_is_write(regs);
 		if (kfence_handle_page_fault(address, is_write, regs))
 			return;
-	}
-	if (fault_type == KERNEL_FAULT)
 		pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
-	else
+	} else {
 		pr_alert("Unable to handle kernel paging request in virtual user address space\n");
+	}
 	dump_fault_info(regs);
 	die(regs, "Oops");
 }
@@ -285,9 +254,7 @@ static void do_exception(struct pt_regs *regs, int access)
 	struct vm_area_struct *vma;
 	unsigned long address;
 	struct mm_struct *mm;
-	enum fault_type type;
 	unsigned int flags;
-	struct gmap *gmap;
 	vm_fault_t fault;
 	bool is_write;
 
@@ -301,16 +268,8 @@ static void do_exception(struct pt_regs *regs, int access)
 	mm = current->mm;
 	address = get_fault_address(regs);
 	is_write = fault_is_write(regs);
-	type = get_fault_type(regs);
-	switch (type) {
-	case KERNEL_FAULT:
+	if (is_kernel_fault(regs) || faulthandler_disabled() || !mm)
 		return handle_fault_error_nolock(regs, 0);
-	case USER_FAULT:
-	case GMAP_FAULT:
-		if (faulthandler_disabled() || !mm)
-			return handle_fault_error_nolock(regs, 0);
-		break;
-	}
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 	flags = FAULT_FLAG_DEFAULT;
 	if (user_mode(regs))
@@ -334,14 +293,11 @@ static void do_exception(struct pt_regs *regs, int access)
 		vma_end_read(vma);
 	if (!(fault & VM_FAULT_RETRY)) {
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
-		if (unlikely(fault & VM_FAULT_ERROR))
-			goto error;
-		return;
+		goto done;
 	}
 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
 	if (fault & VM_FAULT_MAJOR)
 		flags |= FAULT_FLAG_TRIED;
-
 	/* Quick path to respond to signals */
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
@@ -349,81 +305,29 @@ static void do_exception(struct pt_regs *regs, int access)
 		return;
 	}
 lock_mmap:
-	mmap_read_lock(mm);
-	gmap = NULL;
-	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
-		gmap = (struct gmap *)get_lowcore()->gmap;
-		current->thread.gmap_addr = address;
-		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
-		current->thread.gmap_int_code = regs->int_code & 0xffff;
-		address = __gmap_translate(gmap, address);
-		if (address == -EFAULT)
-			return handle_fault_error(regs, SEGV_MAPERR);
-		if (gmap->pfault_enabled)
-			flags |= FAULT_FLAG_RETRY_NOWAIT;
-	}
 retry:
-	vma = find_vma(mm, address);
+	vma = lock_mm_and_find_vma(mm, address, regs);
 	if (!vma)
-		return handle_fault_error(regs, SEGV_MAPERR);
-	if (unlikely(vma->vm_start > address)) {
-		if (!(vma->vm_flags & VM_GROWSDOWN))
-			return handle_fault_error(regs, SEGV_MAPERR);
-		vma = expand_stack(mm, address);
-		if (!vma)
-			return handle_fault_error_nolock(regs, SEGV_MAPERR);
-	}
+		return handle_fault_error_nolock(regs, SEGV_MAPERR);
 	if (unlikely(!(vma->vm_flags & access)))
 		return handle_fault_error(regs, SEGV_ACCERR);
 	fault = handle_mm_fault(vma, address, flags, regs);
 	if (fault_signal_pending(fault, regs)) {
-		if (flags & FAULT_FLAG_RETRY_NOWAIT)
-			mmap_read_unlock(mm);
 		if (!user_mode(regs))
 			handle_fault_error_nolock(regs, 0);
 		return;
 	}
 	/* The fault is fully completed (including releasing mmap lock) */
-	if (fault & VM_FAULT_COMPLETED) {
-		if (gmap) {
-			mmap_read_lock(mm);
-			goto gmap;
-		}
+	if (fault & VM_FAULT_COMPLETED)
 		return;
-	}
-	if (unlikely(fault & VM_FAULT_ERROR)) {
-		mmap_read_unlock(mm);
-		goto error;
-	}
 	if (fault & VM_FAULT_RETRY) {
-		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&	(flags & FAULT_FLAG_RETRY_NOWAIT)) {
-			/*
-			 * FAULT_FLAG_RETRY_NOWAIT has been set,
-			 * mmap_lock has not been released
-			 */
-			current->thread.gmap_pfault = 1;
-			return handle_fault_error(regs, 0);
-		}
-		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
 		flags |= FAULT_FLAG_TRIED;
-		mmap_read_lock(mm);
 		goto retry;
 	}
-gmap:
-	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
-		address =  __gmap_link(gmap, current->thread.gmap_addr,
-				       address);
-		if (address == -EFAULT)
-			return handle_fault_error(regs, SEGV_MAPERR);
-		if (address == -ENOMEM) {
-			fault = VM_FAULT_OOM;
-			mmap_read_unlock(mm);
-			goto error;
-		}
-	}
 	mmap_read_unlock(mm);
-	return;
-error:
+done:
+	if (!(fault & VM_FAULT_ERROR))
+		return;
 	if (fault & VM_FAULT_OOM) {
 		if (!user_mode(regs))
 			handle_fault_error_nolock(regs, 0);
@@ -496,7 +400,6 @@ void do_secure_storage_access(struct pt_regs *regs)
 	struct folio_walk fw;
 	struct mm_struct *mm;
 	struct folio *folio;
-	struct gmap *gmap;
 	int rc;
 
 	/*
@@ -521,17 +424,15 @@ void do_secure_storage_access(struct pt_regs *regs)
 		 */
 		panic("Unexpected PGM 0x3d with TEID bit 61=0");
 	}
-	switch (get_fault_type(regs)) {
-	case GMAP_FAULT:
-		mm = current->mm;
-		gmap = (struct gmap *)get_lowcore()->gmap;
-		mmap_read_lock(mm);
-		addr = __gmap_translate(gmap, addr);
-		mmap_read_unlock(mm);
-		if (IS_ERR_VALUE(addr))
-			return handle_fault_error_nolock(regs, SEGV_MAPERR);
-		fallthrough;
-	case USER_FAULT:
+	if (is_kernel_fault(regs)) {
+		folio = phys_to_folio(addr);
+		if (unlikely(!folio_try_get(folio)))
+			return;
+		rc = arch_make_folio_accessible(folio);
+		folio_put(folio);
+		if (rc)
+			BUG();
+	} else {
 		mm = current->mm;
 		mmap_read_lock(mm);
 		vma = find_vma(mm, addr);
@@ -540,7 +441,7 @@ void do_secure_storage_access(struct pt_regs *regs)
 		folio = folio_walk_start(&fw, vma, addr, 0);
 		if (!folio) {
 			mmap_read_unlock(mm);
-			break;
+			return;
 		}
 		/* arch_make_folio_accessible() needs a raised refcount. */
 		folio_get(folio);
@@ -550,56 +451,8 @@ void do_secure_storage_access(struct pt_regs *regs)
 		if (rc)
 			send_sig(SIGSEGV, current, 0);
 		mmap_read_unlock(mm);
-		break;
-	case KERNEL_FAULT:
-		folio = phys_to_folio(addr);
-		if (unlikely(!folio_try_get(folio)))
-			break;
-		rc = arch_make_folio_accessible(folio);
-		folio_put(folio);
-		if (rc)
-			BUG();
-		break;
-	default:
-		unreachable();
 	}
 }
 NOKPROBE_SYMBOL(do_secure_storage_access);
 
-void do_non_secure_storage_access(struct pt_regs *regs)
-{
-	struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
-	unsigned long gaddr = get_fault_address(regs);
-
-	if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
-		return handle_fault_error_nolock(regs, SEGV_MAPERR);
-	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
-		send_sig(SIGSEGV, current, 0);
-}
-NOKPROBE_SYMBOL(do_non_secure_storage_access);
-
-void do_secure_storage_violation(struct pt_regs *regs)
-{
-	struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
-	unsigned long gaddr = get_fault_address(regs);
-
-	/*
-	 * If the VM has been rebooted, its address space might still contain
-	 * secure pages from the previous boot.
-	 * Clear the page so it can be reused.
-	 */
-	if (!gmap_destroy_page(gmap, gaddr))
-		return;
-	/*
-	 * Either KVM messed up the secure guest mapping or the same
-	 * page is mapped into multiple secure guests.
-	 *
-	 * This exception is only triggered when a guest 2 is running
-	 * and can therefore never occur in kernel context.
-	 */
-	pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
-			    current->comm, current->pid);
-	send_sig(SIGSEGV, current, 0);
-}
-
 #endif /* CONFIG_PGSTE */
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index eb0b51a36be0..329682655af2 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -281,37 +281,6 @@ void gmap_remove(struct gmap *gmap)
 }
 EXPORT_SYMBOL_GPL(gmap_remove);
 
-/**
- * gmap_enable - switch primary space to the guest address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_enable(struct gmap *gmap)
-{
-	get_lowcore()->gmap = (unsigned long)gmap;
-}
-EXPORT_SYMBOL_GPL(gmap_enable);
-
-/**
- * gmap_disable - switch back to the standard primary address space
- * @gmap: pointer to the guest address space structure
- */
-void gmap_disable(struct gmap *gmap)
-{
-	get_lowcore()->gmap = 0UL;
-}
-EXPORT_SYMBOL_GPL(gmap_disable);
-
-/**
- * gmap_get_enabled - get a pointer to the currently enabled gmap
- *
- * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
- */
-struct gmap *gmap_get_enabled(void)
-{
-	return (struct gmap *)get_lowcore()->gmap;
-}
-EXPORT_SYMBOL_GPL(gmap_get_enabled);
-
 /*
  * gmap_alloc_table is assumed to be called with mmap_lock held
  */
@@ -636,45 +605,125 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 	return rc;
 }
 
+/**
+ * fixup_user_fault_nowait - manually resolve a user page fault without waiting
+ * @mm:		mm_struct of target mm
+ * @address:	user address
+ * @fault_flags:flags to pass down to handle_mm_fault()
+ * @unlocked:	did we unlock the mmap_lock while retrying
+ *
+ * This function behaves similarly to fixup_user_fault(), but it guarantees
+ * that the fault will be resolved without waiting. The function might drop
+ * and re-acquire the mm lock, in which case @unlocked will be set to true.
+ *
+ * The guarantee is that the fault is handled without waiting, but the
+ * function itself might sleep, due to the lock.
+ *
+ * Context: Needs to be called with mm->mmap_lock held in read mode, and will
+ * return with the lock held in read mode; @unlocked will indicate whether
+ * the lock has been dropped and re-acquired. This is the same behaviour as
+ * fixup_user_fault().
+ *
+ * Return: 0 on success, -EAGAIN if the fault cannot be resolved without
+ * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of
+ * memory.
+ */
+static int fixup_user_fault_nowait(struct mm_struct *mm, unsigned long address,
+				   unsigned int fault_flags, bool *unlocked)
+{
+	struct vm_area_struct *vma;
+	unsigned int test_flags;
+	vm_fault_t fault;
+	int rc;
+
+	fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
+	test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ;
+
+	vma = find_vma(mm, address);
+	if (unlikely(!vma || address < vma->vm_start))
+		return -EFAULT;
+	if (unlikely(!(vma->vm_flags & test_flags)))
+		return -EFAULT;
+
+	fault = handle_mm_fault(vma, address, fault_flags, NULL);
+	/* the mm lock has been dropped, take it again */
+	if (fault & VM_FAULT_COMPLETED) {
+		*unlocked = true;
+		mmap_read_lock(mm);
+		return 0;
+	}
+	/* the mm lock has not been dropped */
+	if (fault & VM_FAULT_ERROR) {
+		rc = vm_fault_to_errno(fault, 0);
+		BUG_ON(!rc);
+		return rc;
+	}
+	/* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */
+	if (fault & VM_FAULT_RETRY)
+		return -EAGAIN;
+	/* nothing needed to be done and the mm lock has not been dropped */
+	return 0;
+}
+
+/**
+ * __gmap_fault - resolve a fault on a guest address
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: guest address
+ * @fault_flags: flags to pass down to handle_mm_fault()
+ *
+ * Context: Needs to be called with mm->mmap_lock held in read mode. Might
+ * drop and re-acquire the lock. Will always return with the lock held.
+ */
+static int __gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
+{
+	unsigned long vmaddr;
+	bool unlocked;
+	int rc = 0;
+
+retry:
+	unlocked = false;
+
+	vmaddr = __gmap_translate(gmap, gaddr);
+	if (IS_ERR_VALUE(vmaddr))
+		return vmaddr;
+
+	if (fault_flags & FAULT_FLAG_RETRY_NOWAIT)
+		rc = fixup_user_fault_nowait(gmap->mm, vmaddr, fault_flags, &unlocked);
+	else
+		rc = fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked);
+	if (rc)
+		return rc;
+	/*
+	 * In the case that fixup_user_fault unlocked the mmap_lock during
+	 * fault-in, redo __gmap_translate() to avoid racing with a
+	 * map/unmap_segment.
+	 * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(),
+	 * and __gmap_link() must all be called atomically in one go; if the
+	 * lock had been dropped in between, a retry is needed.
+	 */
+	if (unlocked)
+		goto retry;
+
+	return __gmap_link(gmap, gaddr, vmaddr);
+}
+
 /**
  * gmap_fault - resolve a fault on a guest address
  * @gmap: pointer to guest mapping meta data structure
  * @gaddr: guest address
  * @fault_flags: flags to pass down to handle_mm_fault()
  *
- * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
- * if the vm address is already mapped to a different guest segment.
+ * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the
+ * vm address is already mapped to a different guest segment, and -EAGAIN if
+ * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed
+ * immediately.
  */
-int gmap_fault(struct gmap *gmap, unsigned long gaddr,
-	       unsigned int fault_flags)
+int gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
 {
-	unsigned long vmaddr;
 	int rc;
-	bool unlocked;
 
 	mmap_read_lock(gmap->mm);
-
-retry:
-	unlocked = false;
-	vmaddr = __gmap_translate(gmap, gaddr);
-	if (IS_ERR_VALUE(vmaddr)) {
-		rc = vmaddr;
-		goto out_up;
-	}
-	if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
-			     &unlocked)) {
-		rc = -EFAULT;
-		goto out_up;
-	}
-	/*
-	 * In the case that fixup_user_fault unlocked the mmap_lock during
-	 * faultin redo __gmap_translate to not race with a map/unmap_segment.
-	 */
-	if (unlocked)
-		goto retry;
-
-	rc = __gmap_link(gmap, gaddr, vmaddr);
-out_up:
+	rc = __gmap_fault(gmap, gaddr, fault_flags);
 	mmap_read_unlock(gmap->mm);
 	return rc;
 }
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 5f805ad42d4c..4a0f422cfeb6 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -12,6 +12,7 @@
 #include <asm/pgalloc.h>
 #include <asm/kfence.h>
 #include <asm/page.h>
+#include <asm/asm.h>
 #include <asm/set_memory.h>
 
 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
@@ -406,6 +407,21 @@ int set_direct_map_default_noflush(struct page *page)
 	return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
 }
 
+bool kernel_page_present(struct page *page)
+{
+	unsigned long addr;
+	unsigned int cc;
+
+	addr = (unsigned long)page_address(page);
+	asm volatile(
+		"	lra	%[addr],0(%[addr])\n"
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [addr] "+a" (addr)
+		:
+		: CC_CLOBBER);
+	return CC_TRANSFORM(cc) == 0;
+}
+
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 
 static void ipte_range(pte_t *pte, unsigned long address, int nr)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2c944bafb030..cea5dba80468 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -525,7 +525,7 @@ static inline void pudp_idte_global(struct mm_struct *mm,
 	else
 		/*
 		 * Invalid bit position is the same for pmd and pud, so we can
-		 * re-use _pmd_csp() here
+		 * reuse _pmd_csp() here
 		 */
 		__pmdp_csp((pmd_t *) pudp);
 }
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bd9624c20b80..b7efa96776ea 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/printk.h>
 #include <linux/lockdep.h>
+#include <linux/list_sort.h>
 
 #include <asm/isc.h>
 #include <asm/airq.h>
@@ -785,7 +786,6 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
 	struct zpci_dev *zdev;
 	int rc;
 
-	zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 	if (!zdev)
 		return ERR_PTR(-ENOMEM);
@@ -805,6 +805,19 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
 	mutex_init(&zdev->fmb_lock);
 	mutex_init(&zdev->kzdev_lock);
 
+	return zdev;
+
+error:
+	zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
+	kfree(zdev);
+	return ERR_PTR(rc);
+}
+
+int zpci_add_device(struct zpci_dev *zdev)
+{
+	int rc;
+
+	zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
 	rc = zpci_init_iommu(zdev);
 	if (rc)
 		goto error;
@@ -816,15 +829,13 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
 	spin_lock(&zpci_list_lock);
 	list_add_tail(&zdev->entry, &zpci_list);
 	spin_unlock(&zpci_list_lock);
-
-	return zdev;
+	return 0;
 
 error_destroy_iommu:
 	zpci_destroy_iommu(zdev);
 error:
-	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
-	kfree(zdev);
-	return ERR_PTR(rc);
+	zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
+	return rc;
 }
 
 bool zpci_is_device_configured(struct zpci_dev *zdev)
@@ -1082,6 +1093,49 @@ bool zpci_is_enabled(void)
 	return s390_pci_initialized;
 }
 
+static int zpci_cmp_rid(void *priv, const struct list_head *a,
+			const struct list_head *b)
+{
+	struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
+	struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
+
+	/*
+	 * PCI functions without RID available maintain original order
+	 * between themselves but sort before those with RID.
+	 */
+	if (za->rid == zb->rid)
+		return za->rid_available > zb->rid_available;
+	/*
+	 * PCI functions with RID sort by RID ascending.
+	 */
+	return za->rid > zb->rid;
+}
+
+static void zpci_add_devices(struct list_head *scan_list)
+{
+	struct zpci_dev *zdev, *tmp;
+
+	list_sort(NULL, scan_list, &zpci_cmp_rid);
+	list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
+		list_del_init(&zdev->entry);
+		zpci_add_device(zdev);
+	}
+}
+
+int zpci_scan_devices(void)
+{
+	LIST_HEAD(scan_list);
+	int rc;
+
+	rc = clp_scan_pci_devices(&scan_list);
+	if (rc)
+		return rc;
+
+	zpci_add_devices(&scan_list);
+	zpci_bus_scan_busses();
+	return 0;
+}
+
 static int __init pci_base_init(void)
 {
 	int rc;
@@ -1111,10 +1165,9 @@ static int __init pci_base_init(void)
 	if (rc)
 		goto out_irq;
 
-	rc = clp_scan_pci_devices();
+	rc = zpci_scan_devices();
 	if (rc)
 		goto out_find;
-	zpci_bus_scan_busses();
 
 	s390_pci_initialized = 1;
 	return 0;
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index daa5d7450c7d..1b74a000ff64 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -168,9 +168,16 @@ void zpci_bus_scan_busses(void)
 	mutex_unlock(&zbus_list_lock);
 }
 
+static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
+{
+	return !s390_pci_no_rid && zdev->rid_available &&
+		zpci_is_device_configured(zdev) &&
+		!zdev->vfn;
+}
+
 /* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
  * @zbus: the zbus holding the zdevices
- * @fr: PCI root function that will determine the bus's domain, and bus speeed
+ * @fr: PCI root function that will determine the bus's domain, and bus speed
  * @ops: the pci operations
  *
  * The PCI function @fr determines the domain (its UID), multifunction property
@@ -188,7 +195,7 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
 		return domain;
 
 	zbus->domain_nr = domain;
-	zbus->multifunction = fr->rid_available;
+	zbus->multifunction = zpci_bus_is_multifunction_root(fr);
 	zbus->max_bus_speed = fr->max_bus_speed;
 
 	/*
@@ -232,13 +239,15 @@ static void zpci_bus_put(struct zpci_bus *zbus)
 	kref_put(&zbus->kref, zpci_bus_release);
 }
 
-static struct zpci_bus *zpci_bus_get(int pchid)
+static struct zpci_bus *zpci_bus_get(int topo, bool topo_is_tid)
 {
 	struct zpci_bus *zbus;
 
 	mutex_lock(&zbus_list_lock);
 	list_for_each_entry(zbus, &zbus_list, bus_next) {
-		if (pchid == zbus->pchid) {
+		if (!zbus->multifunction)
+			continue;
+		if (topo_is_tid == zbus->topo_is_tid && topo == zbus->topo) {
 			kref_get(&zbus->kref);
 			goto out_unlock;
 		}
@@ -249,7 +258,7 @@ out_unlock:
 	return zbus;
 }
 
-static struct zpci_bus *zpci_bus_alloc(int pchid)
+static struct zpci_bus *zpci_bus_alloc(int topo, bool topo_is_tid)
 {
 	struct zpci_bus *zbus;
 
@@ -257,7 +266,8 @@ static struct zpci_bus *zpci_bus_alloc(int pchid)
 	if (!zbus)
 		return NULL;
 
-	zbus->pchid = pchid;
+	zbus->topo = topo;
+	zbus->topo_is_tid = topo_is_tid;
 	INIT_LIST_HEAD(&zbus->bus_next);
 	mutex_lock(&zbus_list_lock);
 	list_add_tail(&zbus->bus_next, &zbus_list);
@@ -292,19 +302,22 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 {
 	int rc = -EINVAL;
 
+	if (zbus->multifunction) {
+		if (!zdev->rid_available) {
+			WARN_ONCE(1, "rid_available not set for multifunction\n");
+			return rc;
+		}
+		zdev->devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
+	}
+
 	if (zbus->function[zdev->devfn]) {
 		pr_err("devfn %04x is already assigned\n", zdev->devfn);
 		return rc;
 	}
-
 	zdev->zbus = zbus;
 	zbus->function[zdev->devfn] = zdev;
 	zpci_nb_devices++;
 
-	if (zbus->multifunction && !zdev->rid_available) {
-		WARN_ONCE(1, "rid_available not set for multifunction\n");
-		goto error;
-	}
 	rc = zpci_init_slot(zdev);
 	if (rc)
 		goto error;
@@ -321,8 +334,9 @@ error:
 
 int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
 {
+	bool topo_is_tid = zdev->tid_avail;
 	struct zpci_bus *zbus = NULL;
-	int rc = -EBADF;
+	int topo, rc = -EBADF;
 
 	if (zpci_nb_devices == ZPCI_NR_DEVICES) {
 		pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
@@ -330,14 +344,10 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
 		return -ENOSPC;
 	}
 
-	if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
-		return -EINVAL;
-
-	if (!s390_pci_no_rid && zdev->rid_available)
-		zbus = zpci_bus_get(zdev->pchid);
-
+	topo = topo_is_tid ? zdev->tid : zdev->pchid;
+	zbus = zpci_bus_get(topo, topo_is_tid);
 	if (!zbus) {
-		zbus = zpci_bus_alloc(zdev->pchid);
+		zbus = zpci_bus_alloc(topo, topo_is_tid);
 		if (!zbus)
 			return -ENOMEM;
 	}
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index af9f0ac79a1b..e86a9419d233 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -6,6 +6,10 @@
  *   Pierre Morel <pmorel@linux.ibm.com>
  *
  */
+#ifndef __S390_PCI_BUS_H
+#define __S390_PCI_BUS_H
+
+#include <linux/pci.h>
 
 int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
 void zpci_bus_device_unregister(struct zpci_dev *zdev);
@@ -40,3 +44,4 @@ static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
 	return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
 }
 
+#endif /* __S390_PCI_BUS_H */
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 6f55a59a0871..14bf7e8d06b7 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -20,6 +20,7 @@
 #include <asm/asm-extable.h>
 #include <asm/pci_debug.h>
 #include <asm/pci_clp.h>
+#include <asm/asm.h>
 #include <asm/clp.h>
 #include <uapi/asm/clp.h>
 
@@ -52,18 +53,20 @@ static inline void zpci_err_clp(unsigned int rsp, int rc)
 static inline int clp_get_ilp(unsigned long *ilp)
 {
 	unsigned long mask;
-	int cc = 3;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
 		"	.insn	rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
-		: "cc");
+		: CC_OUT(cc, cc), [mask] "=d" (mask), [exc] "+d" (exception)
+		: [cmd] "a" (1)
+		: CC_CLOBBER);
 	*ilp = mask;
-	return cc;
+	return exception ? 3 : CC_TRANSFORM(cc);
 }
 
 /*
@@ -72,19 +75,20 @@ static inline int clp_get_ilp(unsigned long *ilp)
 static __always_inline int clp_req(void *data, unsigned int lps)
 {
 	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
+	int cc, exception;
 	u64 ignored;
-	int cc = 3;
 
+	exception = 1;
 	asm volatile (
 		"	.insn	rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
+		: CC_OUT(cc, cc), [ign] "=d" (ignored), "+m" (*req), [exc] "+d" (exception)
 		: [req] "a" (req), [lps] "i" (lps)
-		: "cc");
-	return cc;
+		: CC_CLOBBER);
+	return exception ? 3 : CC_TRANSFORM(cc);
 }
 
 static void *clp_alloc_block(gfp_t gfp_mask)
@@ -162,12 +166,16 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
 	zdev->pft = response->pft;
 	zdev->vfn = response->vfn;
 	zdev->port = response->port;
+	zdev->fidparm = response->fidparm;
 	zdev->uid = response->uid;
 	zdev->fmb_length = sizeof(u32) * response->fmb_len;
-	zdev->rid_available = response->rid_avail;
 	zdev->is_physfn = response->is_physfn;
-	if (!s390_pci_no_rid && zdev->rid_available)
-		zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
+	zdev->rid_available = response->rid_avail;
+	if (zdev->rid_available)
+		zdev->rid = response->rid;
+	zdev->tid_avail = response->tid_avail;
+	if (zdev->tid_avail)
+		zdev->tid = response->tid;
 
 	memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
 	if (response->util_str_avail) {
@@ -407,6 +415,7 @@ static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
 
 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
 {
+	struct list_head *scan_list = data;
 	struct zpci_dev *zdev;
 
 	if (!entry->vendor_id)
@@ -417,10 +426,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
 		zpci_zdev_put(zdev);
 		return;
 	}
-	zpci_create_device(entry->fid, entry->fh, entry->config_state);
+	zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
+	list_add_tail(&zdev->entry, scan_list);
 }
 
-int clp_scan_pci_devices(void)
+int clp_scan_pci_devices(struct list_head *scan_list)
 {
 	struct clp_req_rsp_list_pci *rrb;
 	int rc;
@@ -429,7 +439,7 @@ int clp_scan_pci_devices(void)
 	if (!rrb)
 		return -ENOMEM;
 
-	rc = clp_list_pci(rrb, NULL, __clp_add);
+	rc = clp_list_pci(rrb, scan_list, __clp_add);
 
 	clp_free_block(rrb);
 	return rc;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index d4f19d33914c..47f934f4e828 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -340,6 +340,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 			zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
 			if (IS_ERR(zdev))
 				break;
+			zpci_add_device(zdev);
 		} else {
 			/* the configuration request may be stale */
 			if (zdev->state != ZPCI_FN_STATE_STANDBY)
@@ -349,10 +350,14 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 		zpci_scan_configured_device(zdev, ccdf->fh);
 		break;
 	case 0x0302: /* Reserved -> Standby */
-		if (!zdev)
-			zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
-		else
+		if (!zdev) {
+			zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
+			if (IS_ERR(zdev))
+				break;
+			zpci_add_device(zdev);
+		} else {
 			zpci_update_fh(zdev, ccdf->fh);
+		}
 		break;
 	case 0x0303: /* Deconfiguration requested */
 		if (zdev) {
@@ -381,7 +386,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 		break;
 	case 0x0306: /* 0x308 or 0x302 for multiple devices */
 		zpci_remove_reserved_devices();
-		clp_scan_pci_devices();
+		zpci_scan_devices();
 		break;
 	case 0x0308: /* Standby -> Reserved */
 		if (!zdev)
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 56480be48244..f5a75ea7629a 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -15,6 +15,7 @@
 #include <asm/pci_debug.h>
 #include <asm/pci_io.h>
 #include <asm/processor.h>
+#include <asm/asm.h>
 
 #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
 
@@ -57,16 +58,16 @@ static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
 /* Modify PCI Function Controls */
 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
 {
-	u8 cc;
+	int cc;
 
 	asm volatile (
 		"	.insn	rxy,0xe300000000d0,%[req],%[fib]\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
-		: : "cc");
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [req] "+d" (req), [fib] "+Q" (*fib)
+		:
+		: CC_CLOBBER);
 	*status = req >> 24 & 0xff;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
@@ -98,17 +99,16 @@ EXPORT_SYMBOL_GPL(zpci_mod_fc);
 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
 {
 	union register_pair addr_range = {.even = addr, .odd = range};
-	u8 cc;
+	int cc;
 
 	asm volatile (
 		"	.insn	rre,0xb9d30000,%[fn],%[addr_range]\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=d" (cc), [fn] "+d" (fn)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [fn] "+d" (fn)
 		: [addr_range] "d" (addr_range.pair)
-		: "cc");
+		: CC_CLOBBER);
 	*status = fn >> 24 & 0xff;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
@@ -156,20 +156,23 @@ EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl);
 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
 {
 	union register_pair req_off = {.even = req, .odd = offset};
-	int cc = -ENXIO;
+	int cc, exception;
 	u64 __data;
 
+	exception = 1;
 	asm volatile (
 		"	.insn	rre,0xb9d20000,%[data],%[req_off]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [data] "=d" (__data),
-		  [req_off] "+&d" (req_off.pair) :: "cc");
+		: CC_OUT(cc, cc), [data] "=d" (__data),
+		  [req_off] "+d" (req_off.pair), [exc] "+d" (exception)
+		:
+		: CC_CLOBBER);
 	*status = req_off.even >> 24 & 0xff;
 	*data = __data;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
@@ -222,20 +225,23 @@ static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
 {
 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
-	int cc = -ENXIO;
+	int cc, exception;
 	u64 __data;
 
+	exception = 1;
 	asm volatile (
 		"       .insn   rre,0xb9d60000,%[data],%[ioaddr_len]\n"
-		"0:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [data] "=d" (__data),
-		  [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
+		: CC_OUT(cc, cc), [data] "=d" (__data),
+		  [ioaddr_len] "+d" (ioaddr_len.pair), [exc] "+d" (exception)
+		:
+		: CC_CLOBBER);
 	*status = ioaddr_len.odd >> 24 & 0xff;
 	*data = __data;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
@@ -258,19 +264,20 @@ EXPORT_SYMBOL_GPL(zpci_load);
 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
 {
 	union register_pair req_off = {.even = req, .odd = offset};
-	int cc = -ENXIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
 		"	.insn	rre,0xb9d00000,%[data],%[req_off]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
+		: CC_OUT(cc, cc), [req_off] "+d" (req_off.pair), [exc] "+d" (exception)
 		: [data] "d" (data)
-		: "cc");
+		: CC_CLOBBER);
 	*status = req_off.even >> 24 & 0xff;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 int __zpci_store(u64 data, u64 req, u64 offset)
@@ -311,19 +318,20 @@ static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
 {
 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
-	int cc = -ENXIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
 		"       .insn   rre,0xb9d40000,%[data],%[ioaddr_len]\n"
-		"0:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
+		: CC_OUT(cc, cc), [ioaddr_len] "+d" (ioaddr_len.pair), [exc] "+d" (exception)
 		: [data] "d" (data)
-		: "cc", "memory");
+		: CC_CLOBBER_LIST("memory"));
 	*status = ioaddr_len.odd >> 24 & 0xff;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
@@ -345,19 +353,20 @@ EXPORT_SYMBOL_GPL(zpci_store);
 /* PCI Store Block */
 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
 {
-	int cc = -ENXIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
 		"	.insn	rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [req] "+d" (req)
+		: CC_OUT(cc, cc), [req] "+d" (req), [exc] "+d" (exception)
 		: [offset] "d" (offset), [data] "Q" (*data)
-		: "cc");
+		: CC_CLOBBER);
 	*status = req >> 24 & 0xff;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
@@ -398,19 +407,20 @@ static inline int zpci_write_block_fh(volatile void __iomem *dst,
 
 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
 {
-	int cc = -ENXIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
 		"       .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
-		"0:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+d" (cc), [len] "+d" (len)
+		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
 		: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
-		: "cc");
+		: CC_CLOBBER);
 	*status = len >> 24 & 0xff;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 int zpci_write_block(volatile void __iomem *dst,
diff --git a/arch/s390/pci/pci_iov.h b/arch/s390/pci/pci_iov.h
index b2c828003bad..e3fa4e77fc86 100644
--- a/arch/s390/pci/pci_iov.h
+++ b/arch/s390/pci/pci_iov.h
@@ -10,6 +10,8 @@
 #ifndef __S390_PCI_IOV_H
 #define __S390_PCI_IOV_H
 
+#include <linux/pci.h>
+
 #ifdef CONFIG_PCI_IOV
 void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn);
 
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index de5c0b389a3e..46f99dc164ad 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -14,6 +14,7 @@
 #include <asm/asm-extable.h>
 #include <asm/pci_io.h>
 #include <asm/pci_debug.h>
+#include <asm/asm.h>
 
 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
 {
@@ -30,20 +31,21 @@ static inline int __pcistb_mio_inuser(
 		void __iomem *ioaddr, const void __user *src,
 		u64 len, u8 *status)
 {
-	int cc = -ENXIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile (
-		"       sacf 256\n"
-		"0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
-		"1:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
-		"2:     sacf 768\n"
+		"	sacf	256\n"
+		"0:	.insn	rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
+		"1:	lhi	%[exc],0\n"
+		"2:	sacf	768\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: [cc] "+d" (cc), [len] "+d" (len)
+		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
 		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
-		: "cc", "memory");
+		: CC_CLOBBER_LIST("memory"));
 	*status = len >> 24 & 0xff;
-	return cc;
+	return exception ? -ENXIO : CC_TRANSFORM(cc);
 }
 
 static inline int __pcistg_mio_inuser(
@@ -51,7 +53,7 @@ static inline int __pcistg_mio_inuser(
 		u64 ulen, u8 *status)
 {
 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
-	int cc = -ENXIO;
+	int cc, exception;
 	u64 val = 0;
 	u64 cnt = ulen;
 	u8 tmp;
@@ -61,25 +63,27 @@ static inline int __pcistg_mio_inuser(
 	 * a register, then store it to PCI at @ioaddr while in secondary
 	 * address space. pcistg then uses the user mappings.
 	 */
+	exception = 1;
 	asm volatile (
-		"       sacf    256\n"
-		"0:     llgc    %[tmp],0(%[src])\n"
+		"	sacf	256\n"
+		"0:	llgc	%[tmp],0(%[src])\n"
 		"4:	sllg	%[val],%[val],8\n"
-		"       aghi    %[src],1\n"
-		"       ogr     %[val],%[tmp]\n"
-		"       brctg   %[cnt],0b\n"
-		"1:     .insn   rre,0xb9d40000,%[val],%[ioaddr_len]\n"
-		"2:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
-		"3:     sacf    768\n"
+		"	aghi	%[src],1\n"
+		"	ogr	%[val],%[tmp]\n"
+		"	brctg	%[cnt],0b\n"
+		"1:	.insn	rre,0xb9d40000,%[val],%[ioaddr_len]\n"
+		"2:	lhi	%[exc],0\n"
+		"3:	sacf	768\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
+		: [src] "+a" (src), [cnt] "+d" (cnt),
+		  [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
+		  CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
 		:
-		[src] "+a" (src), [cnt] "+d" (cnt),
-		[val] "+d" (val), [tmp] "=d" (tmp),
-		[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
-		:: "cc", "memory");
+		: CC_CLOBBER_LIST("memory"));
 	*status = ioaddr_len.odd >> 24 & 0xff;
 
+	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
 	/* did we read everything from user memory? */
 	if (!cc && cnt != 0)
 		cc = -EFAULT;
@@ -198,7 +202,7 @@ static inline int __pcilg_mio_inuser(
 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
 	u64 cnt = ulen;
 	int shift = ulen * 8;
-	int cc = -ENXIO;
+	int cc, exception;
 	u64 val, tmp;
 
 	/*
@@ -206,27 +210,33 @@ static inline int __pcilg_mio_inuser(
 	 * user space) into a register using pcilg then store these bytes at
 	 * user address @dst
 	 */
+	exception = 1;
 	asm volatile (
-		"       sacf    256\n"
-		"0:     .insn   rre,0xb9d60000,%[val],%[ioaddr_len]\n"
-		"1:     ipm     %[cc]\n"
-		"       srl     %[cc],28\n"
-		"       ltr     %[cc],%[cc]\n"
-		"       jne     4f\n"
-		"2:     ahi     %[shift],-8\n"
-		"       srlg    %[tmp],%[val],0(%[shift])\n"
-		"3:     stc     %[tmp],0(%[dst])\n"
+		"	sacf	256\n"
+		"0:	.insn	rre,0xb9d60000,%[val],%[ioaddr_len]\n"
+		"1:	lhi	%[exc],0\n"
+		"	jne	4f\n"
+		"2:	ahi	%[shift],-8\n"
+		"	srlg	%[tmp],%[val],0(%[shift])\n"
+		"3:	stc	%[tmp],0(%[dst])\n"
 		"5:	aghi	%[dst],1\n"
-		"       brctg   %[cnt],2b\n"
-		"4:     sacf    768\n"
+		"	brctg	%[cnt],2b\n"
+		/*
+		 * Use xr to clear exc and set condition code to zero
+		 * to ensure flag output is correct for this branch.
+		 */
+		"	xr	%[exc],%[exc]\n"
+		"4:	sacf	768\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
+		: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
+		  CC_OUT(cc, cc), [val] "=d" (val),
+		  [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
+		  [shift] "+d" (shift)
 		:
-		[ioaddr_len] "+&d" (ioaddr_len.pair),
-		[cc] "+d" (cc), [val] "=d" (val),
-		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
-		[shift] "+d" (shift)
-		:: "cc", "memory");
+		: CC_CLOBBER_LIST("memory"));
 
+	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
 	/* did we write everything to the user space buffer? */
 	if (!cc && cnt != 0)
 		cc = -EFAULT;
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 1f81f6ff7b95..5f46ad58dcd1 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -23,7 +23,7 @@ static ssize_t name##_show(struct device *dev,				\
 {									\
 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));		\
 									\
-	return sprintf(buf, fmt, zdev->member);				\
+	return sysfs_emit(buf, fmt, zdev->member);				\
 }									\
 static DEVICE_ATTR_RO(name)
 
@@ -34,6 +34,7 @@ zpci_attr(pfgid, "0x%02x\n", pfgid);
 zpci_attr(vfn, "0x%04x\n", vfn);
 zpci_attr(pft, "0x%02x\n", pft);
 zpci_attr(port, "%d\n", port);
+zpci_attr(fidparm, "0x%02x\n", fidparm);
 zpci_attr(uid, "0x%x\n", uid);
 zpci_attr(segment0, "0x%02x\n", pfip[0]);
 zpci_attr(segment1, "0x%02x\n", pfip[1]);
@@ -45,7 +46,7 @@ static ssize_t mio_enabled_show(struct device *dev,
 {
 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 
-	return sprintf(buf, zpci_use_mio(zdev) ? "1\n" : "0\n");
+	return sysfs_emit(buf, zpci_use_mio(zdev) ? "1\n" : "0\n");
 }
 static DEVICE_ATTR_RO(mio_enabled);
 
@@ -215,6 +216,7 @@ static struct attribute *zpci_dev_attrs[] = {
 	&dev_attr_pfgid.attr,
 	&dev_attr_pft.attr,
 	&dev_attr_port.attr,
+	&dev_attr_fidparm.attr,
 	&dev_attr_vfn.attr,
 	&dev_attr_uid.attr,
 	&dev_attr_recover.attr,
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 0f93f2e72eba..db3ab2402621 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -156,7 +156,7 @@ SYM_CODE_START(purgatory_start)
 	agr	%r10,%r9
 
 	/* Buffer location (in crash memory) and size. As the purgatory is
-	 * behind the point of no return it can re-use the stack as buffer.
+	 * behind the point of no return it can reuse the stack as buffer.
 	 */
 	larl	%r11,purgatory_end
 	larl	%r12,stack
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 08b1238bcd7b..0a9cdd31cbd9 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -95,6 +95,9 @@ config PKEY
 	    loaded when a CEX crypto card is available.
 	  - A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
 	    loaded when a CEX crypto card is available.
+	  - A pkey UV kernel module (pkey-uv.ko) which is automatically
+	    loaded when the Ultravisor feature is available within a
+	    protected execution environment.
 
 	  Select this option if you want to enable the kernel and userspace
 	  API for protected key handling.
@@ -152,6 +155,24 @@ config PKEY_PCKMO
 	  this option unless you are sure you never need to derive protected
 	  keys from clear key values directly via PCKMO.
 
+config PKEY_UV
+	tristate "PKEY UV support handler"
+	depends on PKEY
+	depends on S390_UV_UAPI
+	help
+	  This is the PKEY Ultravisor support handler for deriving protected
+	  keys from secrets stored within the Ultravisor (UV).
+
+	  This module works together with the UV device and supports the
+	  retrieval of protected keys from secrets stored within the
+	  UV firmware layer. This service is only available within
+	  a protected execution guest and thus this module will fail upon
+	  modprobe if no protected execution environment is detected.
+
+	  Enable this option if you intend to run this kernel with an KVM
+	  guest with protected execution and you want to use UV retrievable
+	  secrets via PKEY API.
+
 config CRYPTO_PAES_S390
 	tristate "PAES cipher algorithms"
 	depends on S390
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 8245b742e4a2..26812abddef1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -25,6 +25,7 @@
 #include <linux/io.h>
 #include <asm/irq.h>
 #include <asm/vtoc.h>
+#include <asm/asm.h>
 
 #include "dasd_int.h"
 #include "dasd_diag.h"
@@ -67,22 +68,24 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
 static inline int __dia250(void *iob, int cmd)
 {
 	union register_pair rx = { .even = (unsigned long)iob, };
+	int cc, exception;
 	typedef union {
 		struct dasd_diag_init_io init_io;
 		struct dasd_diag_rw_io rw_io;
 	} addr_type;
-	int cc;
 
-	cc = 3;
+	exception = 1;
 	asm volatile(
 		"	diag	%[rx],%[cmd],0x250\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b,1b)
-		: [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
+		: CC_OUT(cc, cc), [rx] "+d" (rx.pair),
+		  "+m" (*(addr_type *)iob), [exc] "+d" (exception)
 		: [cmd] "d" (cmd)
-		: "cc");
+		: CC_CLOBBER);
+	cc = exception ? 3 : CC_TRANSFORM(cc);
 	return cc | rx.odd;
 }
 
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 02a4a51da1b7..0f14d279d30b 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -339,7 +339,7 @@ dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf
 	struct dcssblk_dev_info *dev_info;
 
 	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
-	return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+	return sysfs_emit(buf, dev_info->is_shared ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -444,7 +444,7 @@ dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
 	struct dcssblk_dev_info *dev_info;
 
 	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
-	return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+	return sysfs_emit(buf, dev_info->save_pending ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -506,21 +506,15 @@ static ssize_t
 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
-	int i;
-
 	struct dcssblk_dev_info *dev_info;
 	struct segment_info *entry;
+	int i;
 
+	i = 0;
 	down_read(&dcssblk_devices_sem);
 	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
-	i = 0;
-	buf[0] = '\0';
-	list_for_each_entry(entry, &dev_info->seg_list, lh) {
-		strcpy(&buf[i], entry->segment_name);
-		i += strlen(entry->segment_name);
-		buf[i] = '\n';
-		i++;
-	}
+	list_for_each_entry(entry, &dev_info->seg_list, lh)
+		i += sysfs_emit_at(buf, i, "%s\n", entry->segment_name);
 	up_read(&dcssblk_devices_sem);
 	return i;
 }
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 053102d0fcd2..ae1b9aa3a2b5 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -528,7 +528,7 @@ static void tty3270_update(struct timer_list *t)
 	u8 cmd = TC_WRITE;
 	int rc, len;
 
-	wrq = xchg(&tp->write, 0);
+	wrq = xchg(&tp->write, NULL);
 	if (!wrq) {
 		tty3270_set_timer(tp, 1);
 		return;
@@ -746,7 +746,7 @@ static void tty3270_issue_read(struct tty3270 *tp, int lock)
 	struct raw3270_request *rrq;
 	int rc;
 
-	rrq = xchg(&tp->read, 0);
+	rrq = xchg(&tp->read, NULL);
 	if (!rrq)
 		/* Read already scheduled. */
 		return;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6a23ec286c70..6c91e422927f 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -14,6 +14,7 @@
 #include <asm/asm-extable.h>
 #include <asm/sclp.h>
 #include <asm/ebcdic.h>
+#include <asm/asm.h>
 
 /* maximum number of pages concerning our own memory management */
 #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
@@ -325,19 +326,22 @@ struct read_info_sccb * __init sclp_early_get_info(void);
 /* Perform service call. Return 0 on success, non-zero otherwise. */
 static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
 {
-	int cc = 4; /* Initialize for program check handling */
+	int cc, exception;
 
+	exception = 1;
 	asm volatile(
-		"0:	.insn	rre,0xb2200000,%1,%2\n"	 /* servc %1,%2 */
-		"1:	ipm	%0\n"
-		"	srl	%0,28\n"
+		"0:	.insn	rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */
+		"1:	lhi	%[exc],0\n"
 		"2:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 2b)
 		EX_TABLE(1b, 2b)
-		: "+&d" (cc) : "d" (command), "a" (__pa(sccb))
-		: "cc", "memory");
-	if (cc == 4)
+		: CC_OUT(cc, cc), [exc] "+d" (exception)
+		: [cmd] "d" (command), [sccb] "a" (__pa(sccb))
+		: CC_CLOBBER_LIST("memory"));
+	if (exception)
 		return -EINVAL;
+	cc = CC_TRANSFORM(cc);
 	if (cc == 3)
 		return -EIO;
 	if (cc == 2)
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index f60d7ea8268d..d8f91aab11e8 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -223,7 +223,7 @@ static ssize_t system_name_show(struct kobject *kobj,
 	int rc;
 
 	mutex_lock(&sclp_cpi_mutex);
-	rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
+	rc = sysfs_emit(page, "%s\n", system_name);
 	mutex_unlock(&sclp_cpi_mutex);
 	return rc;
 }
@@ -255,7 +255,7 @@ static ssize_t sysplex_name_show(struct kobject *kobj,
 	int rc;
 
 	mutex_lock(&sclp_cpi_mutex);
-	rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
+	rc = sysfs_emit(page, "%s\n", sysplex_name);
 	mutex_unlock(&sclp_cpi_mutex);
 	return rc;
 }
@@ -287,7 +287,7 @@ static ssize_t system_type_show(struct kobject *kobj,
 	int rc;
 
 	mutex_lock(&sclp_cpi_mutex);
-	rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
+	rc = sysfs_emit(page, "%s\n", system_type);
 	mutex_unlock(&sclp_cpi_mutex);
 	return rc;
 }
@@ -321,7 +321,7 @@ static ssize_t system_level_show(struct kobject *kobj,
 	mutex_lock(&sclp_cpi_mutex);
 	level = system_level;
 	mutex_unlock(&sclp_cpi_mutex);
-	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
+	return sysfs_emit(page, "%#018llx\n", level);
 }
 
 static ssize_t system_level_store(struct kobject *kobj,
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index d35f10ea5b52..ca6c5260dc53 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -101,7 +101,7 @@ static ssize_t cpc_name_show(struct kobject *kobj,
 	sclp_ocf_cpc_name_copy(name);
 	name[OCF_LENGTH_CPC_NAME] = 0;
 	EBCASC(name, OCF_LENGTH_CPC_NAME);
-	return snprintf(page, PAGE_SIZE, "%s\n", name);
+	return sysfs_emit(page, "%s\n", name);
 }
 
 static struct kobj_attribute cpc_name_attr =
@@ -113,7 +113,7 @@ static ssize_t hmc_network_show(struct kobject *kobj,
 	int rc;
 
 	spin_lock_irq(&sclp_ocf_lock);
-	rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
+	rc = sysfs_emit(page, "%s\n", hmc_network);
 	spin_unlock_irq(&sclp_ocf_lock);
 	return rc;
 }
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index a3e5a5fb0c1e..c3466a8c56bb 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -27,6 +27,7 @@
 #define SCLP_ERRNOTIFY_AQ_RESET			0
 #define SCLP_ERRNOTIFY_AQ_REPAIR		1
 #define SCLP_ERRNOTIFY_AQ_INFO_LOG		2
+#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA		3
 
 static DEFINE_MUTEX(sclp_pci_mutex);
 static struct sclp_register sclp_pci_event = {
@@ -116,6 +117,7 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report)
 	case SCLP_ERRNOTIFY_AQ_RESET:
 	case SCLP_ERRNOTIFY_AQ_REPAIR:
 	case SCLP_ERRNOTIFY_AQ_INFO_LOG:
+	case SCLP_ERRNOTIFY_AQ_OPTICS_DATA:
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index a6d2a4792185..ce8a440598a8 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -96,7 +96,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *
 	struct tape_device *tdev;
 
 	tdev = dev_get_drvdata(dev);
-	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+	return sysfs_emit(buf, "%i\n", tdev->medium_state);
 }
 
 static
@@ -108,7 +108,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b
 	struct tape_device *tdev;
 
 	tdev = dev_get_drvdata(dev);
-	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+	return sysfs_emit(buf, "%i\n", tdev->first_minor);
 }
 
 static
@@ -120,8 +120,8 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
 	struct tape_device *tdev;
 
 	tdev = dev_get_drvdata(dev);
-	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
-		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
+	return sysfs_emit(buf, "%s\n", (tdev->first_minor < 0) ?
+			  "OFFLINE" : tape_state_verbose[tdev->tape_state]);
 }
 
 static
@@ -135,17 +135,17 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf
 
 	tdev = dev_get_drvdata(dev);
 	if (tdev->first_minor < 0)
-		return scnprintf(buf, PAGE_SIZE, "N/A\n");
+		return sysfs_emit(buf, "N/A\n");
 
 	spin_lock_irq(get_ccwdev_lock(tdev->cdev));
 	if (list_empty(&tdev->req_queue))
-		rc = scnprintf(buf, PAGE_SIZE, "---\n");
+		rc = sysfs_emit(buf, "---\n");
 	else {
 		struct tape_request *req;
 
 		req = list_entry(tdev->req_queue.next, struct tape_request,
 			list);
-		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+		rc = sysfs_emit(buf, "%s\n", tape_op_verbose[req->op]);
 	}
 	spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
 	return rc;
@@ -161,7 +161,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf
 
 	tdev = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+	return sysfs_emit(buf, "%i\n", tdev->char_data.block_size);
 }
 
 static
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index f598edc5f251..2b83fb6dc1d7 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *  Copyright IBM Corp. 2022
+ *  Copyright IBM Corp. 2022, 2024
  *  Author(s): Steffen Eiden <seiden@linux.ibm.com>
  *
  *  This file provides a Linux misc device to give userspace access to some
@@ -40,6 +40,7 @@ static const u32 ioctl_nr_to_uvc_bit[] __initconst = {
 	[UVIO_IOCTL_ADD_SECRET_NR] = BIT_UVC_CMD_ADD_SECRET,
 	[UVIO_IOCTL_LIST_SECRETS_NR] = BIT_UVC_CMD_LIST_SECRETS,
 	[UVIO_IOCTL_LOCK_SECRETS_NR] = BIT_UVC_CMD_LOCK_SECRETS,
+	[UVIO_IOCTL_RETR_SECRET_NR] = BIT_UVC_CMD_RETR_ATTEST,
 };
 
 static_assert(ARRAY_SIZE(ioctl_nr_to_uvc_bit) == UVIO_IOCTL_NUM_IOCTLS);
@@ -62,11 +63,13 @@ static void __init set_supp_uv_cmds(unsigned long *supp_uv_cmds)
 }
 
 /**
- * uvio_uvdev_info() - get information about the uvdevice
+ * uvio_uvdev_info() - Get information about the uvdevice
  *
  * @uv_ioctl: ioctl control block
  *
  * Lists all IOCTLs that are supported by this uvdevice
+ *
+ * Return: 0 on success or a negative error code on error
  */
 static int uvio_uvdev_info(struct uvio_ioctl_cb *uv_ioctl)
 {
@@ -177,7 +180,7 @@ static int get_uvio_attest(struct uvio_ioctl_cb *uv_ioctl, struct uvio_attest *u
  *
  * Context: might sleep
  *
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
  */
 static int uvio_attestation(struct uvio_ioctl_cb *uv_ioctl)
 {
@@ -237,7 +240,8 @@ out:
 	return ret;
 }
 
-/** uvio_add_secret() - perform an Add Secret UVC
+/**
+ * uvio_add_secret() - Perform an Add Secret UVC
  *
  * @uv_ioctl: ioctl control block
  *
@@ -260,7 +264,7 @@ out:
  *
  * Context: might sleep
  *
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
  */
 static int uvio_add_secret(struct uvio_ioctl_cb *uv_ioctl)
 {
@@ -296,7 +300,44 @@ out:
 	return ret;
 }
 
-/** uvio_list_secrets() - perform a List Secret UVC
+/*
+ * Do the actual secret list creation. Calls the list secrets UVC until there
+ * is no more space in the user buffer, or the list ends.
+ */
+static int uvio_get_list(void *zpage, struct uvio_ioctl_cb *uv_ioctl)
+{
+	const size_t data_off = offsetof(struct uv_secret_list, secrets);
+	u8 __user *user_buf = (u8 __user *)uv_ioctl->argument_addr;
+	struct uv_secret_list *list = zpage;
+	u16 num_secrets_stored = 0;
+	size_t user_off = data_off;
+	size_t copy_len;
+
+	do {
+		uv_list_secrets(list, list->next_secret_idx, &uv_ioctl->uv_rc,
+				&uv_ioctl->uv_rrc);
+		if (uv_ioctl->uv_rc != UVC_RC_EXECUTED &&
+		    uv_ioctl->uv_rc != UVC_RC_MORE_DATA)
+			break;
+
+		copy_len = sizeof(list->secrets[0]) * list->num_secr_stored;
+		if (copy_to_user(user_buf + user_off, list->secrets, copy_len))
+			return -EFAULT;
+
+		user_off += copy_len;
+		num_secrets_stored += list->num_secr_stored;
+	} while (uv_ioctl->uv_rc == UVC_RC_MORE_DATA &&
+		 user_off + sizeof(*list) <= uv_ioctl->argument_len);
+
+	list->num_secr_stored = num_secrets_stored;
+	if (copy_to_user(user_buf, list, data_off))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * uvio_list_secrets() - Perform a List Secret UVC
+ *
  * @uv_ioctl: ioctl control block
  *
  * uvio_list_secrets() performs the List Secret Ultravisor Call. It verifies
@@ -307,45 +348,43 @@ out:
  *
  * The argument specifies the location for the result of the UV-Call.
  *
+ * Argument length must be a multiple of a page.
+ * The list secrets IOCTL will call the list UVC multiple times and fill
+ * the provided user-buffer with list elements until either the list ends or
+ * the buffer is full. The list header is merged over all list header from the
+ * individual UVCs.
+ *
  * If the List Secrets UV facility is not present, UV will return invalid
  * command rc. This won't be fenced in the driver and does not result in a
  * negative return value.
  *
  * Context: might sleep
  *
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
  */
 static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
 {
-	void __user *user_buf_arg = (void __user *)uv_ioctl->argument_addr;
-	struct uv_cb_guest_addr uvcb = {
-		.header.len = sizeof(uvcb),
-		.header.cmd = UVC_CMD_LIST_SECRETS,
-	};
-	void *secrets = NULL;
-	int ret = 0;
+	void *zpage;
+	int rc;
 
-	if (uv_ioctl->argument_len != UVIO_LIST_SECRETS_LEN)
+	if (uv_ioctl->argument_len == 0 ||
+	    uv_ioctl->argument_len % UVIO_LIST_SECRETS_LEN != 0)
 		return -EINVAL;
 
-	secrets = kvzalloc(UVIO_LIST_SECRETS_LEN, GFP_KERNEL);
-	if (!secrets)
+	zpage = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!zpage)
 		return -ENOMEM;
 
-	uvcb.addr = (u64)secrets;
-	uv_call_sched(0, (u64)&uvcb);
-	uv_ioctl->uv_rc = uvcb.header.rc;
-	uv_ioctl->uv_rrc = uvcb.header.rrc;
+	rc = uvio_get_list(zpage, uv_ioctl);
 
-	if (copy_to_user(user_buf_arg, secrets, UVIO_LIST_SECRETS_LEN))
-		ret = -EFAULT;
-
-	kvfree(secrets);
-	return ret;
+	free_page((unsigned long)zpage);
+	return rc;
 }
 
-/** uvio_lock_secrets() - perform a Lock Secret Store UVC
- * @uv_ioctl: ioctl control block
+/**
+ * uvio_lock_secrets() - Perform a Lock Secret Store UVC
+ *
+ * @ioctl: ioctl control block
  *
  * uvio_lock_secrets() performs the Lock Secret Store Ultravisor Call. It
  * performs the UV-call and copies the return codes to the ioctl control block.
@@ -360,7 +399,7 @@ static int uvio_list_secrets(struct uvio_ioctl_cb *uv_ioctl)
  *
  * Context: might sleep
  *
- * Return: 0 on success or a negative error code on error.
+ * Return: 0 on success or a negative error code on error
  */
 static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
 {
@@ -379,6 +418,59 @@ static int uvio_lock_secrets(struct uvio_ioctl_cb *ioctl)
 	return 0;
 }
 
+/**
+ * uvio_retr_secret() - Perform a retrieve secret UVC
+ *
+ * @uv_ioctl: ioctl control block.
+ *
+ * uvio_retr_secret() performs the Retrieve Secret Ultravisor Call.
+ * The first two bytes of the argument specify the index of the secret to be
+ * retrieved. The retrieved secret is copied into the argument buffer if there
+ * is enough space.
+ * The argument length must be at least two bytes and at max 8192 bytes.
+ *
+ * Context: might sleep
+ *
+ * Return: 0 on success or a negative error code on error
+ */
+static int uvio_retr_secret(struct uvio_ioctl_cb *uv_ioctl)
+{
+	u16 __user *user_index = (u16 __user *)uv_ioctl->argument_addr;
+	struct uv_cb_retr_secr uvcb = {
+		.header.len = sizeof(uvcb),
+		.header.cmd = UVC_CMD_RETR_SECRET,
+	};
+	u32 buf_len = uv_ioctl->argument_len;
+	void *buf = NULL;
+	int ret;
+
+	if (buf_len > UVIO_RETR_SECRET_MAX_LEN || buf_len < sizeof(*user_index))
+		return -EINVAL;
+
+	buf = kvzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (get_user(uvcb.secret_idx, user_index))
+		goto err;
+
+	uvcb.buf_addr = (u64)buf;
+	uvcb.buf_size = buf_len;
+	uv_call_sched(0, (u64)&uvcb);
+
+	if (copy_to_user((__user void *)uv_ioctl->argument_addr, buf, buf_len))
+		goto err;
+
+	ret = 0;
+	uv_ioctl->uv_rc = uvcb.header.rc;
+	uv_ioctl->uv_rrc = uvcb.header.rrc;
+
+err:
+	kvfree_sensitive(buf, buf_len);
+	return ret;
+}
+
 static int uvio_copy_and_check_ioctl(struct uvio_ioctl_cb *ioctl, void __user *argp,
 				     unsigned long cmd)
 {
@@ -432,6 +524,9 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	case UVIO_IOCTL_LOCK_SECRETS_NR:
 		ret = uvio_lock_secrets(&uv_ioctl);
 		break;
+	case UVIO_IOCTL_RETR_SECRET_NR:
+		ret = uvio_retr_secret(&uv_ioctl);
+		break;
 	default:
 		ret = -ENOIOCTLCMD;
 		break;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index bd5cecc44123..3dd50ac9c5b0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -531,7 +531,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev,
 				       char *buf)
 {
 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
-	return sprintf(buf, "%u\n", priv->autopurge);
+	return sysfs_emit(buf, "%u\n", priv->autopurge);
 }
 
 
@@ -605,7 +605,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev,
 					   char *buf)
 {
 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
-	return sprintf(buf, "%u\n", priv->autorecording);
+	return sysfs_emit(buf, "%u\n", priv->autorecording);
 }
 
 
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index fe94dec427b6..90ba7a2b9cb4 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -345,7 +345,7 @@ static ssize_t ur_attr_reclen_show(struct device *dev,
 	urd = urdev_get_from_cdev(to_ccwdev(dev));
 	if (!urd)
 		return -ENODEV;
-	rc = sprintf(buf, "%zu\n", urd->reclen);
+	rc = sysfs_emit(buf, "%zu\n", urd->reclen);
 	urdev_put(urd);
 	return rc;
 }
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 66b1bdc63284..7bcf8b98b8dd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -147,7 +147,7 @@ static ssize_t ccwgroup_online_show(struct device *dev,
 
 	online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
 
-	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+	return sysfs_emit(buf, "%d\n", online);
 }
 
 /*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index a07bbecba61c..cba2d048a96b 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -144,6 +144,18 @@ static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
 }
 static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
 
+static ssize_t measurement_chars_full_read(struct file *filp,
+					   struct kobject *kobj,
+					   struct bin_attribute *bin_attr,
+					   char *buf, loff_t off, size_t count)
+{
+	struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
+
+	return memory_read_from_buffer(buf, count, &off, &chp->cmcb,
+				       sizeof(chp->cmcb));
+}
+static BIN_ATTR_ADMIN_RO(measurement_chars_full, sizeof(struct cmg_cmcb));
+
 static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count,
 					  struct kobject *kobj, bool extended)
 {
@@ -201,6 +213,7 @@ static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
 
 static struct bin_attribute *measurement_attrs[] = {
 	&bin_attr_measurement_chars,
+	&bin_attr_measurement_chars_full,
 	&bin_attr_measurement,
 	&bin_attr_ext_measurement,
 	NULL,
@@ -230,7 +243,7 @@ static ssize_t chp_status_show(struct device *dev,
 	status = chp->state;
 	mutex_unlock(&chp->lock);
 
-	return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+	return status ? sysfs_emit(buf, "online\n") : sysfs_emit(buf, "offline\n");
 }
 
 static ssize_t chp_status_write(struct device *dev,
@@ -311,7 +324,7 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
 	mutex_lock(&chp->lock);
 	type = chp->desc.desc;
 	mutex_unlock(&chp->lock);
-	return sprintf(buf, "%x\n", type);
+	return sysfs_emit(buf, "%x\n", type);
 }
 
 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -324,8 +337,8 @@ static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
 	if (!chp)
 		return 0;
 	if (chp->cmg == -1) /* channel measurements not available */
-		return sprintf(buf, "unknown\n");
-	return sprintf(buf, "%d\n", chp->cmg);
+		return sysfs_emit(buf, "unknown\n");
+	return sysfs_emit(buf, "%d\n", chp->cmg);
 }
 
 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
@@ -338,8 +351,8 @@ static ssize_t chp_shared_show(struct device *dev,
 	if (!chp)
 		return 0;
 	if (chp->shared == -1) /* channel measurements not available */
-		return sprintf(buf, "unknown\n");
-	return sprintf(buf, "%x\n", chp->shared);
+		return sysfs_emit(buf, "unknown\n");
+	return sysfs_emit(buf, "%x\n", chp->shared);
 }
 
 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
@@ -352,7 +365,7 @@ static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
 
 	mutex_lock(&chp->lock);
 	if (chp->desc_fmt1.flags & 0x10)
-		rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+		rc = sysfs_emit(buf, "%04x\n", chp->desc_fmt1.chid);
 	else
 		rc = 0;
 	mutex_unlock(&chp->lock);
@@ -369,7 +382,7 @@ static ssize_t chp_chid_external_show(struct device *dev,
 
 	mutex_lock(&chp->lock);
 	if (chp->desc_fmt1.flags & 0x10)
-		rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+		rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
 	else
 		rc = 0;
 	mutex_unlock(&chp->lock);
@@ -385,7 +398,7 @@ static ssize_t chp_esc_show(struct device *dev,
 	ssize_t rc;
 
 	mutex_lock(&chp->lock);
-	rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
+	rc = sysfs_emit(buf, "%x\n", chp->desc_fmt1.esc);
 	mutex_unlock(&chp->lock);
 
 	return rc;
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index a15324a43aa3..391b52a7474c 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -54,6 +54,7 @@ struct channel_path {
 	int extended;
 	unsigned long speed;
 	struct cmg_chars cmg_chars;
+	struct cmg_cmcb cmcb;
 };
 
 /* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index dcc1e1c34ca2..e6462317abd0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -376,7 +376,7 @@ struct lir {
 #define PARAMS_LEN	10	/* PARAMS=xx,xxxxxx */
 #define NODEID_LEN	35	/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
 
-/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+/* Copy EBCDIC text, convert to ASCII and optionally add delimiter. */
 static char *store_ebcdic(char *dest, const char *src, unsigned long len,
 			  char delim)
 {
@@ -1092,19 +1092,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
 		u32 zeroes1;
 		struct chsc_header response;
 		u32 zeroes2;
-		u32 not_valid : 1;
-		u32 shared : 1;
-		u32 extended : 1;
-		u32 : 21;
-		u32 chpid : 8;
-		u32 cmcv : 5;
-		u32 : 7;
-		u32 cmgp : 4;
-		u32 cmgq : 8;
-		u32 cmg : 8;
-		u32 : 16;
-		u32 cmgs : 16;
-		u32 data[NR_MEASUREMENT_CHARS];
+		struct cmg_cmcb cmcb;
 	} *scmc_area;
 
 	chp->shared = -1;
@@ -1135,15 +1123,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
 			      scmc_area->response.code);
 		goto out;
 	}
-	if (scmc_area->not_valid)
+	chp->cmcb = scmc_area->cmcb;
+	if (scmc_area->cmcb.not_valid)
 		goto out;
 
-	chp->cmg = scmc_area->cmg;
-	chp->shared = scmc_area->shared;
-	chp->extended = scmc_area->extended;
-	chp->speed = scmc_get_speed(scmc_area->cmgs, scmc_area->cmgp);
-	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
-				  (struct cmg_chars *) &scmc_area->data);
+	chp->cmg = scmc_area->cmcb.cmg;
+	chp->shared = scmc_area->cmcb.shared;
+	chp->extended = scmc_area->cmcb.extended;
+	chp->speed = scmc_get_speed(scmc_area->cmcb.cmgs, scmc_area->cmcb.cmgp);
+	chsc_initialize_cmg_chars(chp, scmc_area->cmcb.cmcv,
+				  (struct cmg_chars *)&scmc_area->cmcb.data);
 out:
 	spin_unlock_irqrestore(&chsc_page_lock, flags);
 	return ret;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 24cd65dbc5a7..6fe983ebf4b3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -17,6 +17,22 @@ struct cmg_chars {
 	u32 values[NR_MEASUREMENT_CHARS];
 };
 
+struct cmg_cmcb {
+	u32 not_valid : 1;
+	u32 shared    : 1;
+	u32 extended  : 1;
+	u32	      : 21;
+	u32 chpid     : 8;
+	u32 cmcv      : 5;
+	u32	      : 7;
+	u32 cmgp      : 4;
+	u32 cmgq      : 8;
+	u32 cmg       : 8;
+	u32	      : 16;
+	u32 cmgs      : 16;
+	u32 data[NR_MEASUREMENT_CHARS];
+};
+
 #define NR_MEASUREMENT_ENTRIES 8
 struct cmg_entry {
 	u32 values[NR_MEASUREMENT_ENTRIES];
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c32e818f06db..ad17ab0a9314 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -459,10 +459,14 @@ int cio_update_schib(struct subchannel *sch)
 {
 	struct schib schib;
 
-	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+	if (stsch(sch->schid, &schib))
 		return -ENODEV;
 
 	memcpy(&sch->schib, &schib, sizeof(schib));
+
+	if (!css_sch_is_valid(&schib))
+		return -EACCES;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(cio_update_schib);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index a9057a5b670a..08a5e9380e75 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -19,7 +19,7 @@ struct pmcw {
 	u32 intparm;		/* interruption parameter */
 	u32 qf	 : 1;		/* qdio facility */
 	u32 w	 : 1;
-	u32 isc  : 3;		/* interruption sublass */
+	u32 isc  : 3;		/* interruption subclass */
 	u32 res5 : 3;		/* reserved zeros */
 	u32 ena  : 1;		/* enabled */
 	u32 lm	 : 2;		/* limit mode */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index f80dc18e2a76..fdab760f1f28 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -46,7 +46,7 @@
 /* indices for READCMB */
 enum cmb_index {
 	avg_utilization = -1,
- /* basic and exended format: */
+ /* basic and extended format: */
 	cmb_ssch_rsch_count = 0,
 	cmb_sample_count,
 	cmb_device_connect_time,
@@ -135,7 +135,7 @@ static inline u64 time_to_nsec(u32 value)
  * Users are usually interested in average times,
  * not accumulated time.
  * This also helps us with atomicity problems
- * when reading sinlge values.
+ * when reading single values.
  */
 static inline u64 time_to_avg_nsec(u32 value, u32 count)
 {
@@ -977,8 +977,7 @@ static struct cmb_operations cmbops_extended = {
 
 static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
 {
-	return sprintf(buf, "%lld\n",
-		(unsigned long long) cmf_read(to_ccwdev(dev), idx));
+	return sysfs_emit(buf, "%lld\n", cmf_read(to_ccwdev(dev), idx));
 }
 
 static ssize_t cmb_show_avg_sample_interval(struct device *dev,
@@ -998,7 +997,7 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev,
 	} else
 		interval = -1;
 	spin_unlock_irq(cdev->ccwlock);
-	return sprintf(buf, "%ld\n", interval);
+	return sysfs_emit(buf, "%ld\n", interval);
 }
 
 static ssize_t cmb_show_avg_utilization(struct device *dev,
@@ -1007,7 +1006,7 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
 {
 	unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
 
-	return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
+	return sysfs_emit(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
 }
 
 #define cmf_attr(name) \
@@ -1080,7 +1079,7 @@ static ssize_t cmb_enable_show(struct device *dev,
 {
 	struct ccw_device *cdev = to_ccwdev(dev);
 
-	return sprintf(buf, "%d\n", cmf_enabled(cdev));
+	return sysfs_emit(buf, "%d\n", cmf_enabled(cdev));
 }
 
 static ssize_t cmb_enable_store(struct device *dev,
@@ -1227,7 +1226,7 @@ int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
 	return cmbops->readall(cdev, data);
 }
 
-/* Reenable cmf when a disconnected device becomes available again. */
+/* Re-enable cmf when a disconnected device becomes available again. */
 int cmf_reenable(struct ccw_device *cdev)
 {
 	cmbops->reset(cdev);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7b59d20bf785..be78a57f9bfd 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -380,11 +380,11 @@ static ssize_t chpids_show(struct device *dev,
 	for (chp = 0; chp < 8; chp++) {
 		mask = 0x80 >> chp;
 		if (ssd->path_mask & mask)
-			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+			ret += sysfs_emit_at(buf, ret, "%02x ", ssd->chpid[chp].id);
 		else
-			ret += sprintf(buf + ret, "00 ");
+			ret += sysfs_emit_at(buf, ret, "00 ");
 	}
-	ret += sprintf(buf + ret, "\n");
+	ret += sysfs_emit_at(buf, ret, "\n");
 	return ret;
 }
 static DEVICE_ATTR_RO(chpids);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b0f23242e171..fb2c07cb4d3d 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -201,10 +201,9 @@ devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 	struct ccw_device_id *id = &(cdev->id);
 
 	if (id->dev_type != 0)
-		return sprintf(buf, "%04x/%02x\n",
-				id->dev_type, id->dev_model);
+		return sysfs_emit(buf, "%04x/%02x\n", id->dev_type, id->dev_model);
 	else
-		return sprintf(buf, "n/a\n");
+		return sysfs_emit(buf, "n/a\n");
 }
 
 static ssize_t
@@ -213,8 +212,7 @@ cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
 	struct ccw_device *cdev = to_ccwdev(dev);
 	struct ccw_device_id *id = &(cdev->id);
 
-	return sprintf(buf, "%04x/%02x\n",
-		       id->cu_type, id->cu_model);
+	return sysfs_emit(buf, "%04x/%02x\n", id->cu_type, id->cu_model);
 }
 
 static ssize_t
@@ -234,7 +232,7 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct ccw_device *cdev = to_ccwdev(dev);
 
-	return sprintf(buf, cdev->online ? "1\n" : "0\n");
+	return sysfs_emit(buf, cdev->online ? "1\n" : "0\n");
 }
 
 int ccw_device_is_orphan(struct ccw_device *cdev)
@@ -546,21 +544,21 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
 	struct subchannel *sch;
 
 	if (ccw_device_is_orphan(cdev))
-		return sprintf(buf, "no device\n");
+		return sysfs_emit(buf, "no device\n");
 	switch (cdev->private->state) {
 	case DEV_STATE_BOXED:
-		return sprintf(buf, "boxed\n");
+		return sysfs_emit(buf, "boxed\n");
 	case DEV_STATE_DISCONNECTED:
 	case DEV_STATE_DISCONNECTED_SENSE_ID:
 	case DEV_STATE_NOT_OPER:
 		sch = to_subchannel(dev->parent);
 		if (!sch->lpm)
-			return sprintf(buf, "no path\n");
+			return sysfs_emit(buf, "no path\n");
 		else
-			return sprintf(buf, "no device\n");
+			return sysfs_emit(buf, "no device\n");
 	default:
 		/* All other states considered fine. */
-		return sprintf(buf, "good\n");
+		return sysfs_emit(buf, "good\n");
 	}
 }
 
@@ -587,7 +585,7 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
 {
 	struct subchannel *sch = to_subchannel(dev);
 
-	return sprintf(buf, "%02x\n", sch->vpm);
+	return sysfs_emit(buf, "%02x\n", sch->vpm);
 }
 
 static DEVICE_ATTR_RO(devtype);
@@ -1387,14 +1385,18 @@ enum io_sch_action {
 	IO_SCH_VERIFY,
 	IO_SCH_DISC,
 	IO_SCH_NOP,
+	IO_SCH_ORPH_CDEV,
 };
 
 static enum io_sch_action sch_get_action(struct subchannel *sch)
 {
 	struct ccw_device *cdev;
+	int rc;
 
 	cdev = sch_get_cdev(sch);
-	if (cio_update_schib(sch)) {
+	rc = cio_update_schib(sch);
+
+	if (rc == -ENODEV) {
 		/* Not operational. */
 		if (!cdev)
 			return IO_SCH_UNREG;
@@ -1402,6 +1404,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
 			return IO_SCH_UNREG;
 		return IO_SCH_ORPH_UNREG;
 	}
+
+	/* Avoid unregistering subchannels without working device. */
+	if (rc == -EACCES) {
+		if (!cdev)
+			return IO_SCH_NOP;
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			return IO_SCH_UNREG_CDEV;
+		return IO_SCH_ORPH_CDEV;
+	}
+
 	/* Operational. */
 	if (!cdev)
 		return IO_SCH_ATTACH;
@@ -1471,6 +1483,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
 		rc = 0;
 		goto out_unlock;
 	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_CDEV:
 	case IO_SCH_ORPH_ATTACH:
 		ccw_device_set_disconnected(cdev);
 		break;
@@ -1502,6 +1515,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
 	/* Handle attached ccw device. */
 	switch (action) {
 	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_CDEV:
 	case IO_SCH_ORPH_ATTACH:
 		/* Move ccw device to orphanage. */
 		rc = ccw_device_move_to_orph(cdev);
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index acf1edd36549..5ff1e51cddf3 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -8,6 +8,7 @@
 #include <asm/asm-extable.h>
 #include <asm/chpid.h>
 #include <asm/schid.h>
+#include <asm/asm.h>
 #include <asm/crw.h>
 
 #include "ioasm.h"
@@ -18,19 +19,20 @@
 static inline int __stsch(struct subchannel_id schid, struct schib *addr)
 {
 	unsigned long r1 = *(unsigned int *)&schid;
-	int ccode = -EIO;
+	int ccode, exception;
 
+	exception = 1;
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	stsch	%[addr]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+&d" (ccode), [addr] "=Q" (*addr)
+		: CC_OUT(cc, ccode), [addr] "=Q" (*addr), [exc] "+d" (exception)
 		: [r1] "d" (r1)
-		: "cc", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("1"));
+	return exception ? -EIO : CC_TRANSFORM(ccode);
 }
 
 int stsch(struct subchannel_id schid, struct schib *addr)
@@ -47,19 +49,20 @@ EXPORT_SYMBOL(stsch);
 static inline int __msch(struct subchannel_id schid, struct schib *addr)
 {
 	unsigned long r1 = *(unsigned int *)&schid;
-	int ccode = -EIO;
+	int ccode, exception;
 
+	exception = 1;
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	msch	%[addr]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+&d" (ccode)
+		: CC_OUT(cc, ccode), [exc] "+d" (exception)
 		: [r1] "d" (r1), [addr] "Q" (*addr)
-		: "cc", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("1"));
+	return exception ? -EIO : CC_TRANSFORM(ccode);
 }
 
 int msch(struct subchannel_id schid, struct schib *addr)
@@ -80,12 +83,11 @@ static inline int __tsch(struct subchannel_id schid, struct irb *addr)
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	tsch	%[addr]\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28"
-		: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode), [addr] "=Q" (*addr)
 		: [r1] "d" (r1)
-		: "cc", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("1"));
+	return CC_TRANSFORM(ccode);
 }
 
 int tsch(struct subchannel_id schid, struct irb *addr)
@@ -101,19 +103,20 @@ int tsch(struct subchannel_id schid, struct irb *addr)
 static inline int __ssch(struct subchannel_id schid, union orb *addr)
 {
 	unsigned long r1 = *(unsigned int *)&schid;
-	int ccode = -EIO;
+	int ccode, exception;
 
+	exception = 1;
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	ssch	%[addr]\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+&d" (ccode)
+		: CC_OUT(cc, ccode), [exc] "+d" (exception)
 		: [r1] "d" (r1), [addr] "Q" (*addr)
-		: "cc", "memory", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("memory", "1"));
+	return CC_TRANSFORM(ccode);
 }
 
 int ssch(struct subchannel_id schid, union orb *addr)
@@ -135,12 +138,11 @@ static inline int __csch(struct subchannel_id schid)
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	csch\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (ccode)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode)
 		: [r1] "d" (r1)
-		: "cc", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("1"));
+	return CC_TRANSFORM(ccode);
 }
 
 int csch(struct subchannel_id schid)
@@ -160,11 +162,11 @@ int tpi(struct tpi_info *addr)
 
 	asm volatile(
 		"	tpi	%[addr]\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28"
-		: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode), [addr] "=Q" (*addr)
 		:
-		: "cc");
+		: CC_CLOBBER);
+	ccode = CC_TRANSFORM(ccode);
 	trace_s390_cio_tpi(addr, ccode);
 
 	return ccode;
@@ -173,17 +175,19 @@ int tpi(struct tpi_info *addr)
 int chsc(void *chsc_area)
 {
 	typedef struct { char _[4096]; } addr_type;
-	int cc = -EIO;
+	int cc, exception;
 
+	exception = 1;
 	asm volatile(
 		"	.insn	rre,0xb25f0000,%[chsc_area],0\n"
-		"0:	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
+		"0:	lhi	%[exc],0\n"
 		"1:\n"
+		CC_IPM(cc)
 		EX_TABLE(0b, 1b)
-		: [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area)
+		: CC_OUT(cc, cc), "+m" (*(addr_type *)chsc_area), [exc] "+d" (exception)
 		: [chsc_area] "d" (chsc_area)
-		: "cc");
+		: CC_CLOBBER);
+	cc = exception ? -EIO : CC_TRANSFORM(cc);
 	trace_s390_cio_chsc(chsc_area, cc);
 
 	return cc;
@@ -198,12 +202,11 @@ static inline int __rsch(struct subchannel_id schid)
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	rsch\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (ccode)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode)
 		: [r1] "d" (r1)
-		: "cc", "memory", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("memory", "1"));
+	return CC_TRANSFORM(ccode);
 }
 
 int rsch(struct subchannel_id schid)
@@ -224,12 +227,11 @@ static inline int __hsch(struct subchannel_id schid)
 	asm volatile(
 		"	lgr	1,%[r1]\n"
 		"	hsch\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (ccode)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode)
 		: [r1] "d" (r1)
-		: "cc", "1");
-	return ccode;
+		: CC_CLOBBER_LIST("1"));
+	return CC_TRANSFORM(ccode);
 }
 
 int hsch(struct subchannel_id schid)
@@ -256,7 +258,7 @@ static inline int __xsch(struct subchannel_id schid)
 		: [cc] "=&d" (ccode)
 		: [r1] "d" (r1)
 		: "cc", "1");
-	return ccode;
+	return CC_TRANSFORM(ccode);
 }
 
 int xsch(struct subchannel_id schid)
@@ -275,12 +277,11 @@ static inline int __stcrw(struct crw *crw)
 
 	asm volatile(
 		"	stcrw	%[crw]\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (ccode), [crw] "=Q" (*crw)
+		CC_IPM(cc)
+		: CC_OUT(cc, ccode), [crw] "=Q" (*crw)
 		:
-		: "cc");
-	return ccode;
+		: CC_CLOBBER);
+	return CC_TRANSFORM(ccode);
 }
 
 static inline int _stcrw(struct crw *crw)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index b711bb17f9da..07e82816b77a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -17,6 +17,7 @@
 #include <linux/atomic.h>
 #include <asm/debug.h>
 #include <asm/qdio.h>
+#include <asm/asm.h>
 #include <asm/ipl.h>
 
 #include "cio.h"
@@ -42,13 +43,12 @@ static inline int do_siga_sync(unsigned long schid,
 		"	lgr	2,%[out]\n"
 		"	lgr	3,%[in]\n"
 		"	siga	0\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
 		: [fc] "d" (fc), [schid] "d" (schid),
 		  [out] "d" (out_mask), [in] "d" (in_mask)
-		: "cc", "0", "1", "2", "3");
-	return cc;
+		: CC_CLOBBER_LIST("0", "1", "2", "3"));
+	return CC_TRANSFORM(cc);
 }
 
 static inline int do_siga_input(unsigned long schid, unsigned long mask,
@@ -61,12 +61,11 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask,
 		"	lgr	1,%[schid]\n"
 		"	lgr	2,%[mask]\n"
 		"	siga	0\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc)
 		: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
-		: "cc", "0", "1", "2");
-	return cc;
+		: CC_CLOBBER_LIST("0", "1", "2"));
+	return CC_TRANSFORM(cc);
 }
 
 /**
@@ -93,13 +92,12 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
 		"	lgr	3,%[aob]\n"
 		"	siga	0\n"
 		"	lgr	%[fc],0\n"
-		"	ipm	%[cc]\n"
-		"	srl	%[cc],28\n"
-		: [cc] "=&d" (cc), [fc] "+&d" (fc)
+		CC_IPM(cc)
+		: CC_OUT(cc, cc), [fc] "+&d" (fc)
 		: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
-		: "cc", "0", "1", "2", "3");
+		: CC_CLOBBER_LIST("0", "1", "2", "3"));
 	*bb = fc >> 31;
-	return cc;
+	return CC_TRANSFORM(cc);
 }
 
 /**
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index c7894d61306d..a0825e372d42 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -91,7 +91,7 @@ static ssize_t show_##name(struct device *dev,				\
 	int ret;							\
 									\
 	device_lock(dev);						\
-	ret = sprintf(buf, "%u\n", scmdev->attrs.name);			\
+	ret = sysfs_emit(buf, "%u\n", scmdev->attrs.name);		\
 	device_unlock(dev);						\
 									\
 	return ret;							\
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index c88b6e071847..e83c6603c858 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
 pkey-pckmo-objs := pkey_pckmo.o
 obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
 
+# pkey uv handler module
+pkey-uv-objs := pkey_uv.o
+obj-$(CONFIG_PKEY_UV) += pkey-uv.o
+
 # adjunct processor matrix
 vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
 obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
index fea243322838..64a376501d26 100644
--- a/drivers/s390/crypto/pkey_base.c
+++ b/drivers/s390/crypto/pkey_base.c
@@ -304,7 +304,19 @@ void pkey_handler_request_modules(void)
 {
 #ifdef CONFIG_MODULES
 	static const char * const pkey_handler_modules[] = {
-		"pkey_cca", "pkey_ep11", "pkey_pckmo" };
+#if IS_MODULE(CONFIG_PKEY_CCA)
+		"pkey_cca",
+#endif
+#if IS_MODULE(CONFIG_PKEY_EP11)
+		"pkey_ep11",
+#endif
+#if IS_MODULE(CONFIG_PKEY_PCKMO)
+		"pkey_pckmo",
+#endif
+#if IS_MODULE(CONFIG_PKEY_UV)
+		"pkey_uv",
+#endif
+	};
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
index 7a1a5ce192d8..7347647dfaa7 100644
--- a/drivers/s390/crypto/pkey_base.h
+++ b/drivers/s390/crypto/pkey_base.h
@@ -96,6 +96,42 @@ static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
 	}
 }
 
+/*
+ * helper function which translates the PKEY_KEYTYPE_*
+ * to the protected key size minus the WK VP length
+ */
+static inline u32 pkey_keytype_to_size(u32 keytype)
+{
+	switch (keytype) {
+	case PKEY_KEYTYPE_AES_128:
+		return 16;
+	case PKEY_KEYTYPE_AES_192:
+		return 24;
+	case PKEY_KEYTYPE_AES_256:
+		return 32;
+	case PKEY_KEYTYPE_ECC_P256:
+		return 32;
+	case PKEY_KEYTYPE_ECC_P384:
+		return 48;
+	case PKEY_KEYTYPE_ECC_P521:
+		return 80;
+	case PKEY_KEYTYPE_ECC_ED25519:
+		return 32;
+	case PKEY_KEYTYPE_ECC_ED448:
+		return 54;
+	case PKEY_KEYTYPE_AES_XTS_128:
+		return 32;
+	case PKEY_KEYTYPE_AES_XTS_256:
+		return 64;
+	case PKEY_KEYTYPE_HMAC_512:
+		return 64;
+	case PKEY_KEYTYPE_HMAC_1024:
+		return 128;
+	default:
+		return 0;
+	}
+}
+
 /*
  * pkey_api.c:
  */
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
index 937051381720..cda22db31f6c 100644
--- a/drivers/s390/crypto/pkey_cca.c
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 
-#include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
 #include "pkey_base.h"
 
@@ -225,14 +224,14 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
 	if (hdr->type == TOKTYPE_CCA_INTERNAL &&
 	    hdr->version == TOKVER_CCA_AES) {
 		/* CCA AES data key */
-		if (keylen != sizeof(struct secaeskeytoken))
+		if (keylen < sizeof(struct secaeskeytoken))
 			return -EINVAL;
 		if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
 			return -EINVAL;
 	} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
 		   hdr->version == TOKVER_CCA_VLSC) {
 		/* CCA AES cipher key */
-		if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+		if (keylen < hdr->len)
 			return -EINVAL;
 		if (cca_check_secaescipherkey(pkey_dbf_info,
 					      3, key, 0, 1))
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
index f42d397a9cb6..5b033ca3e828 100644
--- a/drivers/s390/crypto/pkey_ep11.c
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 
-#include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
 #include "zcrypt_ep11misc.h"
 #include "pkey_base.h"
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
index beeca8827c46..835d59f4fbc5 100644
--- a/drivers/s390/crypto/pkey_pckmo.c
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -15,7 +15,6 @@
 #include <crypto/aes.h>
 #include <linux/random.h>
 
-#include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
 #include "pkey_base.h"
 
@@ -38,23 +37,9 @@ static bool is_pckmo_key(const u8 *key, u32 keylen)
 	case TOKTYPE_NON_CCA:
 		switch (hdr->version) {
 		case TOKVER_CLEAR_KEY:
-			switch (t->keytype) {
-			case PKEY_KEYTYPE_AES_128:
-			case PKEY_KEYTYPE_AES_192:
-			case PKEY_KEYTYPE_AES_256:
-			case PKEY_KEYTYPE_ECC_P256:
-			case PKEY_KEYTYPE_ECC_P384:
-			case PKEY_KEYTYPE_ECC_P521:
-			case PKEY_KEYTYPE_ECC_ED25519:
-			case PKEY_KEYTYPE_ECC_ED448:
-			case PKEY_KEYTYPE_AES_XTS_128:
-			case PKEY_KEYTYPE_AES_XTS_256:
-			case PKEY_KEYTYPE_HMAC_512:
-			case PKEY_KEYTYPE_HMAC_1024:
+			if (pkey_keytype_to_size(t->keytype))
 				return true;
-			default:
-				return false;
-			}
+			return false;
 		case TOKVER_PROTECTED_KEY:
 			return true;
 		default:
@@ -86,80 +71,49 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
 
 	int keysize, rc = -EINVAL;
 	u8 paramblock[160];
-	u32 pkeytype;
-	long fc;
+	u32 pkeytype = 0;
+	unsigned int fc;
 
 	switch (keytype) {
 	case PKEY_KEYTYPE_AES_128:
-		/* 16 byte key, 32 byte aes wkvp, total 48 bytes */
-		keysize = 16;
-		pkeytype = keytype;
 		fc = CPACF_PCKMO_ENC_AES_128_KEY;
 		break;
 	case PKEY_KEYTYPE_AES_192:
-		/* 24 byte key, 32 byte aes wkvp, total 56 bytes */
-		keysize = 24;
-		pkeytype = keytype;
 		fc = CPACF_PCKMO_ENC_AES_192_KEY;
 		break;
 	case PKEY_KEYTYPE_AES_256:
-		/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
-		keysize = 32;
-		pkeytype = keytype;
 		fc = CPACF_PCKMO_ENC_AES_256_KEY;
 		break;
 	case PKEY_KEYTYPE_ECC_P256:
-		/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
-		keysize = 32;
 		pkeytype = PKEY_KEYTYPE_ECC;
 		fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
 		break;
 	case PKEY_KEYTYPE_ECC_P384:
-		/* 48 byte key, 32 byte aes wkvp, total 80 bytes */
-		keysize = 48;
 		pkeytype = PKEY_KEYTYPE_ECC;
 		fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
 		break;
 	case PKEY_KEYTYPE_ECC_P521:
-		/* 80 byte key, 32 byte aes wkvp, total 112 bytes */
-		keysize = 80;
 		pkeytype = PKEY_KEYTYPE_ECC;
 		fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
 		break;
 	case PKEY_KEYTYPE_ECC_ED25519:
-		/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
-		keysize = 32;
 		pkeytype = PKEY_KEYTYPE_ECC;
 		fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
 		break;
 	case PKEY_KEYTYPE_ECC_ED448:
-		/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
-		keysize = 64;
 		pkeytype = PKEY_KEYTYPE_ECC;
 		fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
 		break;
 	case PKEY_KEYTYPE_AES_XTS_128:
-		/* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */
-		keysize = 32;
-		pkeytype = PKEY_KEYTYPE_AES_XTS_128;
 		fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
 		break;
 	case PKEY_KEYTYPE_AES_XTS_256:
-		/* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */
-		keysize = 64;
-		pkeytype = PKEY_KEYTYPE_AES_XTS_256;
 		fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
 		break;
 	case PKEY_KEYTYPE_HMAC_512:
-		/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
-		keysize = 64;
-		pkeytype = PKEY_KEYTYPE_HMAC_512;
 		fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
 		break;
 	case PKEY_KEYTYPE_HMAC_1024:
-		/* 128 byte key, 32 byte aes wkvp, total 160 bytes */
-		keysize = 128;
-		pkeytype = PKEY_KEYTYPE_HMAC_1024;
 		fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
 		break;
 	default:
@@ -168,6 +122,9 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
 		goto out;
 	}
 
+	keysize = pkey_keytype_to_size(keytype);
+	pkeytype = pkeytype ?: keytype;
+
 	if (clrkeylen && clrkeylen < keysize) {
 		PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
 			     __func__, clrkeylen, keysize);
@@ -190,7 +147,8 @@ static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
 	}
 	/* check for the pckmo subfunction we need now */
 	if (!cpacf_test_func(&pckmo_functions, fc)) {
-		PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
+		PKEY_DBF_ERR("%s pckmo fc 0x%02x not available\n",
+			     __func__, fc);
 		rc = -ENODEV;
 		goto out;
 	}
@@ -216,60 +174,42 @@ out:
 
 /*
  * Verify a raw protected key blob.
- * Currently only AES protected keys are supported.
  */
 static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
 				u32 protkeytype)
 {
-	struct {
-		u8 iv[AES_BLOCK_SIZE];
-		u8 key[MAXPROTKEYSIZE];
-	} param;
-	u8 null_msg[AES_BLOCK_SIZE];
-	u8 dest_buf[AES_BLOCK_SIZE];
-	unsigned int k, pkeylen;
-	unsigned long fc;
-	int rc = -EINVAL;
+	u8 clrkey[16] = { 0 }, tmpkeybuf[16 + AES_WK_VP_SIZE];
+	u32 tmpkeybuflen, tmpkeytype;
+	int keysize, rc = -EINVAL;
+	u8 *wkvp;
 
-	switch (protkeytype) {
-	case PKEY_KEYTYPE_AES_128:
-		pkeylen = 16 + AES_WK_VP_SIZE;
-		fc = CPACF_KMC_PAES_128;
-		break;
-	case PKEY_KEYTYPE_AES_192:
-		pkeylen = 24 + AES_WK_VP_SIZE;
-		fc = CPACF_KMC_PAES_192;
-		break;
-	case PKEY_KEYTYPE_AES_256:
-		pkeylen = 32 + AES_WK_VP_SIZE;
-		fc = CPACF_KMC_PAES_256;
-		break;
-	default:
+	/* check protkey type and size */
+	keysize = pkey_keytype_to_size(protkeytype);
+	if (!keysize) {
 		PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
 			     protkeytype);
 		goto out;
 	}
-	if (protkeylen != pkeylen) {
-		PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
-			     __func__, protkeylen, protkeytype);
+	if (protkeylen < keysize + AES_WK_VP_SIZE)
 		goto out;
-	}
 
-	memset(null_msg, 0, sizeof(null_msg));
+	/* generate a dummy AES 128 protected key */
+	tmpkeybuflen = sizeof(tmpkeybuf);
+	rc = pckmo_clr2protkey(PKEY_KEYTYPE_AES_128,
+			       clrkey, sizeof(clrkey),
+			       tmpkeybuf, &tmpkeybuflen, &tmpkeytype);
+	if (rc)
+		goto out;
+	memzero_explicit(tmpkeybuf, 16);
+	wkvp = tmpkeybuf + 16;
 
-	memset(param.iv, 0, sizeof(param.iv));
-	memcpy(param.key, protkey, protkeylen);
-
-	k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
-		      sizeof(null_msg));
-	if (k != sizeof(null_msg)) {
-		PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
+	/* compare WK VP from the temp key with that of the given prot key */
+	if (memcmp(wkvp, protkey + keysize, AES_WK_VP_SIZE)) {
+		PKEY_DBF_ERR("%s protected key WK VP mismatch\n", __func__);
 		rc = -EKEYREJECTED;
 		goto out;
 	}
 
-	rc = 0;
-
 out:
 	pr_debug("rc=%d\n", rc);
 	return rc;
@@ -289,37 +229,33 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen,
 	switch (hdr->version) {
 	case TOKVER_PROTECTED_KEY: {
 		struct protkeytoken *t = (struct protkeytoken *)key;
+		u32 keysize;
 
 		if (keylen < sizeof(*t))
 			goto out;
+		keysize = pkey_keytype_to_size(t->keytype);
+		if (!keysize) {
+			PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
+				     __func__, t->keytype);
+			goto out;
+		}
 		switch (t->keytype) {
 		case PKEY_KEYTYPE_AES_128:
 		case PKEY_KEYTYPE_AES_192:
 		case PKEY_KEYTYPE_AES_256:
-			if (keylen != sizeof(struct protaeskeytoken))
+			if (t->len != keysize + AES_WK_VP_SIZE ||
+			    keylen < sizeof(struct protaeskeytoken))
 				goto out;
 			rc = pckmo_verify_protkey(t->protkey, t->len,
 						  t->keytype);
 			if (rc)
 				goto out;
 			break;
-		case PKEY_KEYTYPE_AES_XTS_128:
-			if (t->len != 64 || keylen != sizeof(*t) + t->len)
-				goto out;
-			break;
-		case PKEY_KEYTYPE_AES_XTS_256:
-		case PKEY_KEYTYPE_HMAC_512:
-			if (t->len != 96 || keylen != sizeof(*t) + t->len)
-				goto out;
-			break;
-		case PKEY_KEYTYPE_HMAC_1024:
-			if (t->len != 160 || keylen != sizeof(*t) + t->len)
-				goto out;
-			break;
 		default:
-			PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
-				     __func__, t->keytype);
-			goto out;
+			if (t->len != keysize + AES_WK_VP_SIZE ||
+			    keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
+				goto out;
+			break;
 		}
 		memcpy(protkey, t->protkey, t->len);
 		*protkeylen = t->len;
@@ -329,47 +265,12 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen,
 	}
 	case TOKVER_CLEAR_KEY: {
 		struct clearkeytoken *t = (struct clearkeytoken *)key;
-		u32 keysize = 0;
+		u32 keysize;
 
-		if (keylen < sizeof(struct clearkeytoken) ||
-		    keylen != sizeof(*t) + t->len)
+		if (keylen < sizeof(*t) ||
+		    keylen < sizeof(*t) + t->len)
 			goto out;
-		switch (t->keytype) {
-		case PKEY_KEYTYPE_AES_128:
-		case PKEY_KEYTYPE_AES_192:
-		case PKEY_KEYTYPE_AES_256:
-			keysize = pkey_keytype_aes_to_size(t->keytype);
-			break;
-		case PKEY_KEYTYPE_ECC_P256:
-			keysize = 32;
-			break;
-		case PKEY_KEYTYPE_ECC_P384:
-			keysize = 48;
-			break;
-		case PKEY_KEYTYPE_ECC_P521:
-			keysize = 80;
-			break;
-		case PKEY_KEYTYPE_ECC_ED25519:
-			keysize = 32;
-			break;
-		case PKEY_KEYTYPE_ECC_ED448:
-			keysize = 64;
-			break;
-		case PKEY_KEYTYPE_AES_XTS_128:
-			keysize = 32;
-			break;
-		case PKEY_KEYTYPE_AES_XTS_256:
-			keysize = 64;
-			break;
-		case PKEY_KEYTYPE_HMAC_512:
-			keysize = 64;
-			break;
-		case PKEY_KEYTYPE_HMAC_1024:
-			keysize = 128;
-			break;
-		default:
-			break;
-		}
+		keysize = pkey_keytype_to_size(t->keytype);
 		if (!keysize) {
 			PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
 				     __func__, t->keytype);
@@ -397,8 +298,6 @@ out:
 
 /*
  * Generate a random protected key.
- * Currently only the generation of AES protected keys
- * is supported.
  */
 static int pckmo_gen_protkey(u32 keytype, u32 subtype,
 			     u8 *protkey, u32 *protkeylen, u32 *protkeytype)
@@ -407,23 +306,8 @@ static int pckmo_gen_protkey(u32 keytype, u32 subtype,
 	int keysize;
 	int rc;
 
-	switch (keytype) {
-	case PKEY_KEYTYPE_AES_128:
-	case PKEY_KEYTYPE_AES_192:
-	case PKEY_KEYTYPE_AES_256:
-		keysize = pkey_keytype_aes_to_size(keytype);
-		break;
-	case PKEY_KEYTYPE_AES_XTS_128:
-		keysize = 32;
-		break;
-	case PKEY_KEYTYPE_AES_XTS_256:
-	case PKEY_KEYTYPE_HMAC_512:
-		keysize = 64;
-		break;
-	case PKEY_KEYTYPE_HMAC_1024:
-		keysize = 128;
-		break;
-	default:
+	keysize = pkey_keytype_to_size(keytype);
+	if (!keysize) {
 		PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
 			     __func__, keytype);
 		return -EINVAL;
@@ -434,6 +318,21 @@ static int pckmo_gen_protkey(u32 keytype, u32 subtype,
 		return -EINVAL;
 	}
 
+	switch (keytype) {
+	case PKEY_KEYTYPE_AES_128:
+	case PKEY_KEYTYPE_AES_192:
+	case PKEY_KEYTYPE_AES_256:
+	case PKEY_KEYTYPE_AES_XTS_128:
+	case PKEY_KEYTYPE_AES_XTS_256:
+	case PKEY_KEYTYPE_HMAC_512:
+	case PKEY_KEYTYPE_HMAC_1024:
+		break;
+	default:
+		PKEY_DBF_ERR("%s unsupported keytype %d\n",
+			     __func__, keytype);
+		return -EINVAL;
+	}
+
 	/* generate a dummy random clear key */
 	get_random_bytes(clrkey, keysize);
 
@@ -453,7 +352,6 @@ out:
 
 /*
  * Verify a protected key token blob.
- * Currently only AES protected keys are supported.
  */
 static int pckmo_verify_key(const u8 *key, u32 keylen)
 {
@@ -467,11 +365,26 @@ static int pckmo_verify_key(const u8 *key, u32 keylen)
 
 	switch (hdr->version) {
 	case TOKVER_PROTECTED_KEY: {
-		struct protaeskeytoken *t;
+		struct protkeytoken *t = (struct protkeytoken *)key;
+		u32 keysize;
 
-		if (keylen != sizeof(struct protaeskeytoken))
+		if (keylen < sizeof(*t))
 			goto out;
-		t = (struct protaeskeytoken *)key;
+		keysize = pkey_keytype_to_size(t->keytype);
+		if (!keysize || t->len != keysize + AES_WK_VP_SIZE)
+			goto out;
+		switch (t->keytype) {
+		case PKEY_KEYTYPE_AES_128:
+		case PKEY_KEYTYPE_AES_192:
+		case PKEY_KEYTYPE_AES_256:
+			if (keylen < sizeof(struct protaeskeytoken))
+				goto out;
+			break;
+		default:
+			if (keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
+				goto out;
+			break;
+		}
 		rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
 		break;
 	}
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index cc0fc1e264bd..a4eb45803f5e 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -10,7 +10,6 @@
 
 #include <linux/sysfs.h>
 
-#include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
 #include "zcrypt_ep11misc.h"
 
diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c
new file mode 100644
index 000000000000..805817b14354
--- /dev/null
+++ b/drivers/s390/crypto/pkey_uv.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  pkey uv specific code
+ *
+ *  Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/uv.h>
+
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key UV handler");
+
+/*
+ * UV secret token struct and defines.
+ */
+
+#define TOKVER_UV_SECRET 0x09
+
+struct uvsecrettoken {
+	u8  type;		/* 0x00 = TOKTYPE_NON_CCA */
+	u8  res0[3];
+	u8  version;		/* 0x09 = TOKVER_UV_SECRET */
+	u8  res1[3];
+	u16 secret_type;	/* one of enum uv_secret_types from uv.h */
+	u16 secret_len;		/* length in bytes of the secret */
+	u8  secret_id[UV_SECRET_ID_LEN]; /* the secret id for this secret */
+} __packed;
+
+/*
+ * Check key blob for known and supported UV key.
+ */
+static bool is_uv_key(const u8 *key, u32 keylen)
+{
+	struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+
+	if (keylen < sizeof(*t))
+		return false;
+
+	switch (t->type) {
+	case TOKTYPE_NON_CCA:
+		switch (t->version) {
+		case TOKVER_UV_SECRET:
+			switch (t->secret_type) {
+			case UV_SECRET_AES_128:
+			case UV_SECRET_AES_192:
+			case UV_SECRET_AES_256:
+			case UV_SECRET_AES_XTS_128:
+			case UV_SECRET_AES_XTS_256:
+			case UV_SECRET_HMAC_SHA_256:
+			case UV_SECRET_HMAC_SHA_512:
+			case UV_SECRET_ECDSA_P256:
+			case UV_SECRET_ECDSA_P384:
+			case UV_SECRET_ECDSA_P521:
+			case UV_SECRET_ECDSA_ED25519:
+			case UV_SECRET_ECDSA_ED448:
+				return true;
+			default:
+				return false;
+			}
+		default:
+			return false;
+		}
+	default:
+		return false;
+	}
+}
+
+static bool is_uv_keytype(enum pkey_key_type keytype)
+{
+	switch (keytype) {
+	case PKEY_TYPE_UVSECRET:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+			   u16 *secret_type, u8 *buf, u32 *buflen)
+{
+	struct uv_secret_list_item_hdr secret_meta_data;
+	int rc;
+
+	rc = uv_get_secret_metadata(secret_id, &secret_meta_data);
+	if (rc)
+		return rc;
+
+	if (*buflen < secret_meta_data.length)
+		return -EINVAL;
+
+	rc = uv_retrieve_secret(secret_meta_data.index,
+				buf, secret_meta_data.length);
+	if (rc)
+		return rc;
+
+	*secret_type = secret_meta_data.type;
+	*buflen = secret_meta_data.length;
+
+	return 0;
+}
+
+static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype)
+{
+	int rc = 0;
+
+	switch (secret_type) {
+	case UV_SECRET_AES_128:
+		*pkeysize = 16 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_AES_128;
+		break;
+	case UV_SECRET_AES_192:
+		*pkeysize = 24 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_AES_192;
+		break;
+	case UV_SECRET_AES_256:
+		*pkeysize = 32 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_AES_256;
+		break;
+	case UV_SECRET_AES_XTS_128:
+		*pkeysize = 16 + 16 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_AES_XTS_128;
+		break;
+	case UV_SECRET_AES_XTS_256:
+		*pkeysize = 32 + 32 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_AES_XTS_256;
+		break;
+	case UV_SECRET_HMAC_SHA_256:
+		*pkeysize = 64 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_HMAC_512;
+		break;
+	case UV_SECRET_HMAC_SHA_512:
+		*pkeysize = 128 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_HMAC_1024;
+		break;
+	case UV_SECRET_ECDSA_P256:
+		*pkeysize = 32 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_ECC_P256;
+		break;
+	case UV_SECRET_ECDSA_P384:
+		*pkeysize = 48 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_ECC_P384;
+		break;
+	case UV_SECRET_ECDSA_P521:
+		*pkeysize = 80 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_ECC_P521;
+		break;
+	case UV_SECRET_ECDSA_ED25519:
+		*pkeysize = 32 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_ECC_ED25519;
+		break;
+	case UV_SECRET_ECDSA_ED448:
+		*pkeysize = 64 + AES_WK_VP_SIZE;
+		*pkeytype = PKEY_KEYTYPE_ECC_ED448;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused,
+			  size_t _nr_apqns __always_unused,
+			  const u8 *key, u32 keylen,
+			  u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+{
+	struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+	u32 pkeysize, pkeytype;
+	u16 secret_type;
+	int rc;
+
+	rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+	if (rc)
+		goto out;
+
+	if (*protkeylen < pkeysize) {
+		PKEY_DBF_ERR("%s prot key buffer size too small: %u < %u\n",
+			     __func__, *protkeylen, pkeysize);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = retrieve_secret(t->secret_id, &secret_type, protkey, protkeylen);
+	if (rc) {
+		PKEY_DBF_ERR("%s retrieve_secret() failed with %d\n",
+			     __func__, rc);
+		goto out;
+	}
+	if (secret_type != t->secret_type) {
+		PKEY_DBF_ERR("%s retrieved secret type %u != expected type %u\n",
+			     __func__, secret_type, t->secret_type);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (keyinfo)
+		*keyinfo = pkeytype;
+
+out:
+	pr_debug("rc=%d\n", rc);
+	return rc;
+}
+
+static int uv_verifykey(const u8 *key, u32 keylen,
+			u16 *_card __always_unused,
+			u16 *_dom __always_unused,
+			u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+	struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+	struct uv_secret_list_item_hdr secret_meta_data;
+	u32 pkeysize, pkeytype, bitsize;
+	int rc;
+
+	rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+	if (rc)
+		goto out;
+
+	rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data);
+	if (rc)
+		goto out;
+
+	if (secret_meta_data.type != t->secret_type) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* set keytype; keybitsize and flags are not supported */
+	if (keytype)
+		*keytype = PKEY_TYPE_UVSECRET;
+	if (keybitsize) {
+		bitsize = 8 * pkey_keytype_to_size(pkeytype);
+		*keybitsize = bitsize ?: PKEY_SIZE_UNKNOWN;
+	}
+	if (flags)
+		*flags = pkeytype;
+
+out:
+	pr_debug("rc=%d\n", rc);
+	return rc;
+}
+
+static struct pkey_handler uv_handler = {
+	.module			 = THIS_MODULE,
+	.name			 = "PKEY UV handler",
+	.is_supported_key	 = is_uv_key,
+	.is_supported_keytype	 = is_uv_keytype,
+	.key_to_protkey		 = uv_key2protkey,
+	.verify_key		 = uv_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_uv_init(void)
+{
+	if (!is_prot_virt_guest())
+		return -ENODEV;
+
+	if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list))
+		return -ENODEV;
+
+	return pkey_handler_register(&uv_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_uv_exit(void)
+{
+	pkey_handler_unregister(&uv_handler);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init);
+module_exit(pkey_uv_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 9f76f2d7b66e..8c0b40d8eb39 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1521,18 +1521,13 @@ static ssize_t control_domains_show(struct device *dev,
 				    char *buf)
 {
 	unsigned long id;
-	int nchars = 0;
-	int n;
-	char *bufpos = buf;
 	struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
 	unsigned long max_domid = matrix_mdev->matrix.adm_max;
+	int nchars = 0;
 
 	mutex_lock(&matrix_dev->mdevs_lock);
-	for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
-		n = sprintf(bufpos, "%04lx\n", id);
-		bufpos += n;
-		nchars += n;
-	}
+	for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1)
+		nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id);
 	mutex_unlock(&matrix_dev->mdevs_lock);
 
 	return nchars;
@@ -1541,7 +1536,6 @@ static DEVICE_ATTR_RO(control_domains);
 
 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
 {
-	char *bufpos = buf;
 	unsigned long apid;
 	unsigned long apqi;
 	unsigned long apid1;
@@ -1549,33 +1543,21 @@ static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
 	unsigned long napm_bits = matrix->apm_max + 1;
 	unsigned long naqm_bits = matrix->aqm_max + 1;
 	int nchars = 0;
-	int n;
 
 	apid1 = find_first_bit_inv(matrix->apm, napm_bits);
 	apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
 
 	if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
 		for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
-			for_each_set_bit_inv(apqi, matrix->aqm,
-					     naqm_bits) {
-				n = sprintf(bufpos, "%02lx.%04lx\n", apid,
-					    apqi);
-				bufpos += n;
-				nchars += n;
-			}
+			for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+				nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi);
 		}
 	} else if (apid1 < napm_bits) {
-		for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
-			n = sprintf(bufpos, "%02lx.\n", apid);
-			bufpos += n;
-			nchars += n;
-		}
+		for_each_set_bit_inv(apid, matrix->apm, napm_bits)
+			nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid);
 	} else if (apqi1 < naqm_bits) {
-		for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
-			n = sprintf(bufpos, ".%04lx\n", apqi);
-			bufpos += n;
-			nchars += n;
-		}
+		for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+			nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi);
 	}
 
 	return nchars;
@@ -2263,14 +2245,11 @@ static ssize_t status_show(struct device *dev,
 		if (matrix_mdev->kvm &&
 		    test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
 		    test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
-			nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
-					   AP_QUEUE_IN_USE);
+			nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE);
 		else
-			nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
-					   AP_QUEUE_ASSIGNED);
+			nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED);
 	} else {
-		nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
-				   AP_QUEUE_UNASSIGNED);
+		nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED);
 	}
 
 	mutex_unlock(&matrix_dev->mdevs_lock);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index aed7e8384542..26bdca702523 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -12,6 +12,7 @@
 
 #include <asm/zcrypt.h>
 #include <asm/pkey.h>
+#include "zcrypt_api.h"
 
 /* Key token types */
 #define TOKTYPE_NON_CCA		 0x00 /* Non-CCA key token */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 039e18d46f76..31c9f95d809d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1319,7 +1319,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+	return sysfs_emit(buf, "%s\n", netiucv_printuser(priv->conn));
 }
 
 static int netiucv_check_user(const char *buf, size_t count, char *username,
@@ -1415,7 +1415,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+	return sysfs_emit(buf, "%d\n", priv->conn->max_buffsize);
 }
 
 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
@@ -1473,7 +1473,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+	return sysfs_emit(buf, "%s\n", fsm_getstate_str(priv->fsm));
 }
 
 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
@@ -1484,7 +1484,7 @@ static ssize_t conn_fsm_show (struct device *dev,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+	return sysfs_emit(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
 }
 
 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
@@ -1495,7 +1495,7 @@ static ssize_t maxmulti_show (struct device *dev,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.maxmulti);
 }
 
 static ssize_t maxmulti_write (struct device *dev,
@@ -1517,7 +1517,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.maxcqueue);
 }
 
 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
@@ -1538,7 +1538,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.doios_single);
 }
 
 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
@@ -1559,7 +1559,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.doios_multi);
 }
 
 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
@@ -1580,7 +1580,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.txlen);
 }
 
 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
@@ -1601,7 +1601,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_time);
 }
 
 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
@@ -1622,7 +1622,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_pending);
 }
 
 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
@@ -1643,7 +1643,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
 	struct netiucv_priv *priv = dev_get_drvdata(dev);
 
 	IUCV_DBF_TEXT(trace, 5, __func__);
-	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+	return sysfs_emit(buf, "%ld\n", priv->conn->prof.tx_max_pending);
 }
 
 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index cb67fa80fb12..304b81bb5f90 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -24,7 +24,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
 {									       \
 	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
 									       \
-	return sprintf(buf, _format, _value);				       \
+	return sysfs_emit(buf, _format, _value);			       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
 		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
@@ -34,7 +34,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
 						   struct device_attribute *at,\
 						   char *buf)		       \
 {									       \
-	return sprintf(buf, _format, _value);				       \
+	return sysfs_emit(buf, _format, _value);			       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
 		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
@@ -51,7 +51,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	     \
 	if (!adapter)							     \
 		return -ENODEV;						     \
 									     \
-	i = sprintf(buf, _format, _value);				     \
+	i = sysfs_emit(buf, _format, _value);				     \
 	zfcp_ccw_adapter_put(adapter);					     \
 	return i;							     \
 }									     \
@@ -95,9 +95,9 @@ static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
-		return sprintf(buf, "1\n");
+		return sysfs_emit(buf, "1\n");
 
-	return sprintf(buf, "0\n");
+	return sysfs_emit(buf, "0\n");
 }
 
 static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
@@ -135,7 +135,7 @@ static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
 		scsi_device_put(sdev);
 	}
 
-	return sprintf(buf, "%d\n", failed);
+	return sysfs_emit(buf, "%d\n", failed);
 }
 
 static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
@@ -176,9 +176,9 @@ static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
 		return -ENODEV;
 
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
-		i = sprintf(buf, "1\n");
+		i = sysfs_emit(buf, "1\n");
 	else
-		i = sprintf(buf, "0\n");
+		i = sysfs_emit(buf, "0\n");
 
 	zfcp_ccw_adapter_put(adapter);
 	return i;
@@ -348,8 +348,7 @@ zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
 	if (!adapter)
 		return -ENODEV;
 
-	/* ceil(log(2^64 - 1) / log(10)) = 20 */
-	rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+	rc = sysfs_emit(buf, "%lu\n", adapter->diagnostics->max_age);
 
 	zfcp_ccw_adapter_put(adapter);
 	return rc;
@@ -401,14 +400,14 @@ static ssize_t zfcp_sysfs_adapter_fc_security_show(
 	 */
 	status = atomic_read(&adapter->status);
 	if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
-		i = sprintf(buf, "unknown\n");
+		i = sysfs_emit(buf, "unknown\n");
 	else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
-		i = sprintf(buf, "unsupported\n");
+		i = sysfs_emit(buf, "unsupported\n");
 	else {
 		i = zfcp_fsf_scnprint_fc_security(
 			buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
 			ZFCP_FSF_PRINT_FMT_LIST);
-		i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+		i += sysfs_emit_at(buf, i, "\n");
 	}
 
 	zfcp_ccw_adapter_put(adapter);
@@ -490,14 +489,14 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
 	    0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
 	    0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
 	    0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
-		i = sprintf(buf, "unknown\n");
+		i = sysfs_emit(buf, "unknown\n");
 	else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
-		i = sprintf(buf, "unsupported\n");
+		i = sysfs_emit(buf, "unsupported\n");
 	else {
 		i = zfcp_fsf_scnprint_fc_security(
 			buf, PAGE_SIZE - 1, port->connection_info,
 			ZFCP_FSF_PRINT_FMT_SINGLEITEM);
-		i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+		i += sysfs_emit_at(buf, i, "\n");
 	}
 
 	return i;
@@ -569,8 +568,8 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev,		\
 	do_div(cmin, 1000);						\
 	do_div(cmax, 1000);						\
 									\
-	return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n",	\
-		       fmin, fmax, fsum, cmin, cmax, csum, cc); 	\
+	return sysfs_emit(buf, "%llu %llu %llu %llu %llu %llu %llu\n",	\
+			  fmin, fmax, fsum, cmin, cmax, csum, cc);	\
 }									\
 static ssize_t								\
 zfcp_sysfs_unit_##_name##_latency_store(struct device *dev,		\
@@ -610,8 +609,8 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev,	\
 	struct scsi_device *sdev = to_scsi_device(dev);			 \
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		 \
 									 \
-	return sprintf(buf, _format, _value);                            \
-}                                                                        \
+	return sysfs_emit(buf, _format, _value);			 \
+}									 \
 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
 
 ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
@@ -625,7 +624,7 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 
-	return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+	return sysfs_emit(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
 }
 static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
 
@@ -641,7 +640,7 @@ static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
 	unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
 	unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
 
-	return sprintf(buf, "%d\n", failed);
+	return sysfs_emit(buf, "%d\n", failed);
 }
 
 static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
@@ -714,8 +713,8 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
 
 	retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
 	if (retval == 0 || retval == -EAGAIN)
-		retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
-				 qtcb_port->cb_util, qtcb_port->a_util);
+		retval = sysfs_emit(buf, "%u %u %u\n", qtcb_port->cp_util,
+				    qtcb_port->cb_util, qtcb_port->a_util);
 	kfree(qtcb_port);
 	return retval;
 }
@@ -758,7 +757,7 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	\
 	if (retval)							\
 		return retval;						\
 									\
-	return sprintf(buf, _format, ## _arg);				\
+	return sysfs_emit(buf, _format, ## _arg);			\
 }									\
 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
 
@@ -787,8 +786,8 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
 	util = qdio->req_q_util;
 	spin_unlock_bh(&qdio->stat_lock);
 
-	return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
-		       (unsigned long long)util);
+	return sysfs_emit(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
+			  (unsigned long long)util);
 }
 static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
 
@@ -843,8 +842,7 @@ static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
 						      .data.nport_serv_param -
 				      sizeof(u32));
 
-	rc = scnprintf(buf, 5 + 2, "%hu\n",
-		       be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+	rc = sysfs_emit(buf, "%hu\n", be16_to_cpu(nsp->fl_csp.sp_bb_cred));
 	spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
 
 out:
@@ -854,7 +852,7 @@ out:
 static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
 		     zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
 
-#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt)      \
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtfmt)		       \
 	static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show(	       \
 		struct device *dev, struct device_attribute *attr, char *buf)  \
 	{								       \
@@ -887,8 +885,8 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
 			goto out;					       \
 									       \
 		spin_lock_irqsave(&diag_hdr->access_lock, flags);	       \
-		rc = scnprintf(						       \
-			buf, (_prtsize) + 2, _prtfmt "\n",		       \
+		rc = sysfs_emit(					       \
+			buf, _prtfmt "\n",				       \
 			adapter->diagnostics->port_data.data._qtcb_member);    \
 		spin_unlock_irqrestore(&diag_hdr->access_lock, flags);	       \
 									       \
@@ -899,16 +897,16 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
 	static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400,		       \
 			     zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
 
-ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
-ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
-ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, "%hd");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, "%hu");
 
 static struct attribute *zfcp_sysfs_diag_attrs[] = {
 	&dev_attr_adapter_diag_sfp_temperature.attr,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 62eca9419ad7..21fa7ac849e5 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -58,6 +58,8 @@ struct virtio_ccw_device {
 	struct virtio_device vdev;
 	__u8 config[VIRTIO_CCW_CONFIG_SIZE];
 	struct ccw_device *cdev;
+	/* we make cdev->dev.dma_parms point to this */
+	struct device_dma_parameters dma_parms;
 	__u32 curr_io;
 	int err;
 	unsigned int revision; /* Transport revision */
@@ -1303,6 +1305,7 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
 	unregister_virtio_device(&vcdev->vdev);
 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 	dev_set_drvdata(&cdev->dev, NULL);
+	cdev->dev.dma_parms = NULL;
 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 	return 0;
 }
@@ -1366,6 +1369,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
 	}
 	vcdev->vdev.dev.parent = &cdev->dev;
 	vcdev->cdev = cdev;
+	cdev->dev.dma_parms = &vcdev->dma_parms;
 	vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
 						sizeof(*vcdev->dma_area),
 						&vcdev->dma_area_addr);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 42a48ac763ee..2eb747311bfd 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -122,7 +122,7 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
 	tristate "Virtio mem driver"
-	depends on X86_64 || ARM64 || RISCV
+	depends on X86_64 || ARM64 || RISCV || S390
 	depends on VIRTIO
 	depends on MEMORY_HOTPLUG
 	depends on MEMORY_HOTREMOVE
@@ -132,11 +132,11 @@ config VIRTIO_MEM
 	 This driver provides access to virtio-mem paravirtualized memory
 	 devices, allowing to hotplug and hotunplug memory.
 
-	 This driver currently only supports x86-64 and arm64. Although it
-	 should compile on other architectures that implement memory
-	 hot(un)plug, architecture-specific and/or common
-	 code changes may be required for virtio-mem, kdump and kexec to work as
-	 expected.
+	 This driver currently supports x86-64, arm64, riscv and s390.
+	 Although it should compile on other architectures that implement
+	 memory hot(un)plug, architecture-specific and/or common
+	 code changes may be required for virtio-mem, kdump and kexec to
+	 work as expected.
 
 	 If unsure, say M.
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7312ae7c3cc5..fcad505e7c8b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1905,7 +1905,7 @@ config STRICT_DEVMEM
 	bool "Filter access to /dev/mem"
 	depends on MMU && DEVMEM
 	depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
-	default y if PPC || X86 || ARM64
+	default y if PPC || X86 || ARM64 || S390
 	help
 	  If this option is disabled, you allow userspace (root) access to all
 	  of memory, including kernel and userspace memory. Accidental