ALSA: memalloc: Don't fall back for SG-buffer with IOMMU

When the non-contiguous page allocation for SG buffer allocation
fails, the memalloc helper tries to fall back to the old page
allocation methods.  This would, however, result in the bogus page
addresses when IOMMU is enabled.  Usually in such a case, the fallback
allocation should fail as well, but occasionally it succeeds and
hitting a bad access.

The fallback was thought for non-IOMMU case, and as the error from
dma_alloc_noncontiguous() with IOMMU essentially implies a fatal
memory allocation error, we should return the error straightforwardly
without fallback.  This avoids the corner case like the above.

The patch also renames the local variable "dma_ops" with snd_ prefix
for avoiding the name conflict.

Fixes: a8d302a0b7 ("ALSA: memalloc: Revive x86-specific WC page allocations again")
Reported-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
Link: https://lore.kernel.org/r/alpine.DEB.2.22.394.2211041541090.3532114@eliteleevi.tm.intel.com
Link: https://lore.kernel.org/r/20221110132216.30605-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2022-11-10 14:22:16 +01:00
parent bf990c1023
commit 9736a32513

View File

@ -9,6 +9,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
@ -541,19 +542,20 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
struct sg_table *sgt; struct sg_table *sgt;
void *p; void *p;
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
DEFAULT_GFP, 0);
if (!sgt) {
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
if (!get_dma_ops(dmab->dev.dev)) {
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
else else
dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
return snd_dma_sg_fallback_alloc(dmab, size); return snd_dma_sg_fallback_alloc(dmab, size);
#else
return NULL;
#endif
} }
#endif
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
DEFAULT_GFP, 0);
if (!sgt)
return NULL;
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
sg_dma_address(sgt->sgl)); sg_dma_address(sgt->sgl));
@ -857,7 +859,7 @@ static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
/* /*
* Entry points * Entry points
*/ */
static const struct snd_malloc_ops *dma_ops[] = { static const struct snd_malloc_ops *snd_dma_ops[] = {
[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
#ifdef CONFIG_HAS_DMA #ifdef CONFIG_HAS_DMA
@ -883,7 +885,7 @@ static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
if (WARN_ON_ONCE(!dmab)) if (WARN_ON_ONCE(!dmab))
return NULL; return NULL;
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
dmab->dev.type >= ARRAY_SIZE(dma_ops))) dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
return NULL; return NULL;
return dma_ops[dmab->dev.type]; return snd_dma_ops[dmab->dev.type];
} }