2016-01-21 11:47:05 +00:00
|
|
|
/*
|
|
|
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
|
|
|
* redistributing this file, you may do so under either license.
|
|
|
|
*
|
|
|
|
* GPL LICENSE SUMMARY
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
|
2017-01-11 00:11:33 +00:00
|
|
|
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
|
2016-01-21 11:47:05 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved.
|
2017-01-11 00:11:33 +00:00
|
|
|
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
|
2016-01-21 11:47:05 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copy
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of AMD Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* AMD PCIe NTB Linux driver
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* Xiangliang Yu <Xiangliang.Yu@amd.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/ntb.h>
|
|
|
|
|
|
|
|
#include "ntb_hw_amd.h"
|
|
|
|
|
|
|
|
#define NTB_NAME "ntb_hw_amd"
|
|
|
|
#define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver"
|
|
|
|
#define NTB_VER "1.0"
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION(NTB_DESC);
|
|
|
|
MODULE_VERSION(NTB_VER);
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_AUTHOR("AMD Inc.");
|
|
|
|
|
|
|
|
static const struct file_operations amd_ntb_debugfs_info;
|
|
|
|
static struct dentry *debugfs_dir;
|
|
|
|
|
|
|
|
static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
|
|
|
|
{
|
|
|
|
if (idx < 0 || idx > ndev->mw_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-09-15 17:08:35 +00:00
|
|
|
return ndev->dev_data->mw_idx << idx;
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
2017-01-11 00:11:33 +00:00
|
|
|
if (pidx != NTB_DEF_PEER_IDX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
return ntb_ndev(ntb)->mw_count;
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
|
|
|
|
resource_size_t *addr_align,
|
|
|
|
resource_size_t *size_align,
|
|
|
|
resource_size_t *size_max)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
int bar;
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
if (pidx != NTB_DEF_PEER_IDX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
bar = ndev_mw_to_bar(ndev, idx);
|
|
|
|
if (bar < 0)
|
|
|
|
return bar;
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
if (addr_align)
|
|
|
|
*addr_align = SZ_4K;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
if (size_align)
|
|
|
|
*size_align = 1;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
if (size_max)
|
|
|
|
*size_max = pci_resource_len(ndev->ntb.pdev, bar);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
|
2016-01-21 11:47:05 +00:00
|
|
|
dma_addr_t addr, resource_size_t size)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
unsigned long xlat_reg, limit_reg = 0;
|
|
|
|
resource_size_t mw_size;
|
|
|
|
void __iomem *mmio, *peer_mmio;
|
|
|
|
u64 base_addr, limit, reg_val;
|
|
|
|
int bar;
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
if (pidx != NTB_DEF_PEER_IDX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
bar = ndev_mw_to_bar(ndev, idx);
|
|
|
|
if (bar < 0)
|
|
|
|
return bar;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
mw_size = pci_resource_len(ntb->pdev, bar);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* make sure the range fits in the usable mw size */
|
|
|
|
if (size > mw_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mmio = ndev->self_mmio;
|
|
|
|
peer_mmio = ndev->peer_mmio;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
base_addr = pci_resource_start(ntb->pdev, bar);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
if (bar != 1) {
|
2016-12-01 19:14:28 +00:00
|
|
|
xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
|
|
|
|
limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* Set the limit if supported */
|
2016-12-01 19:14:28 +00:00
|
|
|
limit = size;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* set and verify setting the translation address */
|
|
|
|
write64(addr, peer_mmio + xlat_reg);
|
|
|
|
reg_val = read64(peer_mmio + xlat_reg);
|
|
|
|
if (reg_val != addr) {
|
|
|
|
write64(0, peer_mmio + xlat_reg);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set and verify setting the limit */
|
2019-02-15 09:21:46 +00:00
|
|
|
write64(limit, peer_mmio + limit_reg);
|
|
|
|
reg_val = read64(peer_mmio + limit_reg);
|
2016-01-21 11:47:05 +00:00
|
|
|
if (reg_val != limit) {
|
|
|
|
write64(base_addr, mmio + limit_reg);
|
|
|
|
write64(0, peer_mmio + xlat_reg);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
xlat_reg = AMD_BAR1XLAT_OFFSET;
|
|
|
|
limit_reg = AMD_BAR1LMT_OFFSET;
|
|
|
|
|
|
|
|
/* Set the limit if supported */
|
2016-12-01 19:14:28 +00:00
|
|
|
limit = size;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* set and verify setting the translation address */
|
|
|
|
write64(addr, peer_mmio + xlat_reg);
|
|
|
|
reg_val = read64(peer_mmio + xlat_reg);
|
|
|
|
if (reg_val != addr) {
|
|
|
|
write64(0, peer_mmio + xlat_reg);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set and verify setting the limit */
|
2019-02-15 09:21:46 +00:00
|
|
|
writel(limit, peer_mmio + limit_reg);
|
|
|
|
reg_val = readl(peer_mmio + limit_reg);
|
2016-01-21 11:47:05 +00:00
|
|
|
if (reg_val != limit) {
|
|
|
|
writel(base_addr, mmio + limit_reg);
|
|
|
|
writel(0, peer_mmio + xlat_reg);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:21 +00:00
|
|
|
static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = NULL;
|
|
|
|
struct pci_dev *pci_swds = NULL;
|
|
|
|
struct pci_dev *pci_swus = NULL;
|
|
|
|
u32 stat;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (ndev->ntb.topo == NTB_TOPO_SEC) {
|
|
|
|
/* Locate the pointer to Downstream Switch for this device */
|
|
|
|
pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
|
|
|
|
if (pci_swds) {
|
|
|
|
/*
|
|
|
|
* Locate the pointer to Upstream Switch for
|
|
|
|
* the Downstream Switch.
|
|
|
|
*/
|
|
|
|
pci_swus = pci_upstream_bridge(pci_swds);
|
|
|
|
if (pci_swus) {
|
|
|
|
rc = pcie_capability_read_dword(pci_swus,
|
|
|
|
PCI_EXP_LNKCTL,
|
|
|
|
&stat);
|
|
|
|
if (rc)
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else if (ndev->ntb.topo == NTB_TOPO_PRI) {
|
|
|
|
/*
|
|
|
|
* For NTB primary, we simply read the Link Status and control
|
|
|
|
* register of the NTB device itself.
|
|
|
|
*/
|
|
|
|
pdev = ndev->ntb.pdev;
|
|
|
|
rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
|
|
|
|
if (rc)
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
/* Catch all for everything else */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndev->lnk_sta = stat;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
static int amd_link_is_up(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
NTB: return link up status correctly for PRI and SEC
Since NTB connects two physically separate systems,
there can be scenarios where one system goes down
while the other one remains active. In case of NTB
primary, if the NTB secondary goes down, a Link-Down
event is received. For the NTB secondary, if the
NTB primary goes down, the PCIe hotplug mechanism
ensures that the driver on the secondary side is also
unloaded.
But there are other scenarios to consider as well,
when suppose the physical link remains active, but
the driver on primary or secondary side is loaded
or un-loaded.
When the driver is loaded, on either side, it sets
SIDE_READY bit(bit-1) of SIDE_INFO register. Similarly,
when the driver is un-loaded, it resets the same bit.
We consider the NTB link to be up and operational
only when the driver on both sides of link are loaded
and ready. But we also need to take account of
Link Up and Down events which signify the physical
link status. So amd_link_is_up() is modified to take
care of the above scenarios.
Signed-off-by: Arindam Nath <arindam.nath@amd.com>
Signed-off-by: Jon Mason <jdmason@kudzu.us>
2020-02-05 15:54:29 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We consider the link to be up under two conditions:
|
|
|
|
*
|
|
|
|
* - When a link-up event is received. This is indicated by
|
|
|
|
* AMD_LINK_UP_EVENT set in peer_sta.
|
|
|
|
* - When driver on both sides of the link have been loaded.
|
|
|
|
* This is indicated by bit 1 being set in the peer
|
|
|
|
* SIDEINFO register.
|
|
|
|
*
|
|
|
|
* This function should return 1 when the latter of the above
|
|
|
|
* two conditions is true.
|
|
|
|
*
|
|
|
|
* Now consider the sequence of events - Link-Up event occurs,
|
|
|
|
* then the peer side driver loads. In this case, we would have
|
|
|
|
* received LINK_UP event and bit 1 of peer SIDEINFO is also
|
|
|
|
* set. What happens now if the link goes down? Bit 1 of
|
|
|
|
* peer SIDEINFO remains set, but LINK_DOWN bit is set in
|
|
|
|
* peer_sta. So we should return 0 from this function. Not only
|
|
|
|
* that, we clear bit 1 of peer SIDEINFO to 0, since the peer
|
|
|
|
* side driver did not even get a chance to clear it before
|
|
|
|
* the link went down. This can be the case of surprise link
|
|
|
|
* removal.
|
|
|
|
*
|
|
|
|
* LINK_UP event will always occur before the peer side driver
|
|
|
|
* gets loaded the very first time. So there can be a case when
|
|
|
|
* the LINK_UP event has occurred, but the peer side driver hasn't
|
|
|
|
* yet loaded. We return 0 in that case.
|
|
|
|
*
|
|
|
|
* There is also a special case when the primary side driver is
|
|
|
|
* unloaded and then loaded again. Since there is no change in
|
|
|
|
* the status of NTB secondary in this case, there is no Link-Up
|
|
|
|
* or Link-Down notification received. We recognize this condition
|
|
|
|
* with peer_sta being set to 0.
|
|
|
|
*
|
|
|
|
* If bit 1 of peer SIDEINFO register is not set, then we
|
|
|
|
* simply return 0 irrespective of the link up or down status
|
|
|
|
* set in peer_sta.
|
|
|
|
*/
|
|
|
|
ret = amd_poll_link(ndev);
|
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* We need to check the below only for NTB primary. For NTB
|
|
|
|
* secondary, simply checking the result of PSIDE_INFO
|
|
|
|
* register will suffice.
|
|
|
|
*/
|
|
|
|
if (ndev->ntb.topo == NTB_TOPO_PRI) {
|
|
|
|
if ((ndev->peer_sta & AMD_LINK_UP_EVENT) ||
|
|
|
|
(ndev->peer_sta == 0))
|
|
|
|
return ret;
|
|
|
|
else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) {
|
|
|
|
/* Clear peer sideinfo register */
|
|
|
|
amd_clear_side_info_reg(ndev, true);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
NTB: return link up status correctly for PRI and SEC
Since NTB connects two physically separate systems,
there can be scenarios where one system goes down
while the other one remains active. In case of NTB
primary, if the NTB secondary goes down, a Link-Down
event is received. For the NTB secondary, if the
NTB primary goes down, the PCIe hotplug mechanism
ensures that the driver on the secondary side is also
unloaded.
But there are other scenarios to consider as well,
when suppose the physical link remains active, but
the driver on primary or secondary side is loaded
or un-loaded.
When the driver is loaded, on either side, it sets
SIDE_READY bit(bit-1) of SIDE_INFO register. Similarly,
when the driver is un-loaded, it resets the same bit.
We consider the NTB link to be up and operational
only when the driver on both sides of link are loaded
and ready. But we also need to take account of
Link Up and Down events which signify the physical
link status. So amd_link_is_up() is modified to take
care of the above scenarios.
Signed-off-by: Arindam Nath <arindam.nath@amd.com>
Signed-off-by: Jon Mason <jdmason@kudzu.us>
2020-02-05 15:54:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else { /* NTB_TOPO_SEC */
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-18 09:21:41 +00:00
|
|
|
}
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-13 23:49:15 +00:00
|
|
|
static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
|
2016-01-21 11:47:05 +00:00
|
|
|
enum ntb_speed *speed,
|
|
|
|
enum ntb_width *width)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (amd_link_is_up(ndev)) {
|
|
|
|
if (speed)
|
|
|
|
*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
|
|
|
|
if (width)
|
|
|
|
*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ntb->pdev->dev, "link is up.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
} else {
|
|
|
|
if (speed)
|
|
|
|
*speed = NTB_SPEED_NONE;
|
|
|
|
if (width)
|
|
|
|
*width = NTB_WIDTH_NONE;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ntb->pdev->dev, "link is down.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_link_enable(struct ntb_dev *ntb,
|
|
|
|
enum ntb_speed max_speed,
|
|
|
|
enum ntb_width max_width)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
/* Enable event interrupt */
|
|
|
|
ndev->int_mask &= ~AMD_EVENT_INTMASK;
|
|
|
|
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
|
|
|
|
|
|
|
|
if (ndev->ntb.topo == NTB_TOPO_SEC)
|
|
|
|
return -EINVAL;
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_link_disable(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
/* Disable event interrupt */
|
|
|
|
ndev->int_mask |= AMD_EVENT_INTMASK;
|
|
|
|
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
|
|
|
|
|
|
|
|
if (ndev->ntb.topo == NTB_TOPO_SEC)
|
|
|
|
return -EINVAL;
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:11:33 +00:00
|
|
|
static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
/* The same as for inbound MWs */
|
|
|
|
return ntb_ndev(ntb)->mw_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
|
|
|
|
phys_addr_t *base, resource_size_t *size)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
bar = ndev_mw_to_bar(ndev, idx);
|
|
|
|
if (bar < 0)
|
|
|
|
return bar;
|
|
|
|
|
|
|
|
if (base)
|
|
|
|
*base = pci_resource_start(ndev->ntb.pdev, bar);
|
|
|
|
|
|
|
|
if (size)
|
|
|
|
*size = pci_resource_len(ndev->ntb.pdev, bar);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
return ntb_ndev(ntb)->db_valid_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
return ntb_ndev(ntb)->db_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
|
|
|
|
if (db_vector < 0 || db_vector > ndev->db_count)
|
|
|
|
return 0;
|
|
|
|
|
2019-03-25 09:23:26 +00:00
|
|
|
return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector);
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64 amd_ntb_db_read(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (db_bits & ~ndev->db_valid_mask)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ndev->db_mask_lock, flags);
|
|
|
|
ndev->db_mask |= db_bits;
|
|
|
|
writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
|
|
|
|
spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (db_bits & ~ndev->db_valid_mask)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ndev->db_mask_lock, flags);
|
|
|
|
ndev->db_mask &= ~db_bits;
|
|
|
|
writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
|
|
|
|
spin_unlock_irqrestore(&ndev->db_mask_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_spad_count(struct ntb_dev *ntb)
|
|
|
|
{
|
|
|
|
return ntb_ndev(ntb)->spad_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 offset;
|
|
|
|
|
|
|
|
if (idx < 0 || idx >= ndev->spad_count)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
offset = ndev->self_spad + (idx << 2);
|
|
|
|
return readl(mmio + AMD_SPAD_OFFSET + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_spad_write(struct ntb_dev *ntb,
|
|
|
|
int idx, u32 val)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 offset;
|
|
|
|
|
|
|
|
if (idx < 0 || idx >= ndev->spad_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
offset = ndev->self_spad + (idx << 2);
|
|
|
|
writel(val, mmio + AMD_SPAD_OFFSET + offset);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 offset;
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
if (sidx < 0 || sidx >= ndev->spad_count)
|
2016-01-21 11:47:05 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
offset = ndev->peer_spad + (sidx << 2);
|
2016-01-21 11:47:05 +00:00
|
|
|
return readl(mmio + AMD_SPAD_OFFSET + offset);
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
|
|
|
|
int sidx, u32 val)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 offset;
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
if (sidx < 0 || sidx >= ndev->spad_count)
|
2016-01-21 11:47:05 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-11 00:13:20 +00:00
|
|
|
offset = ndev->peer_spad + (sidx << 2);
|
2016-01-21 11:47:05 +00:00
|
|
|
writel(val, mmio + AMD_SPAD_OFFSET + offset);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ntb_dev_ops amd_ntb_ops = {
|
|
|
|
.mw_count = amd_ntb_mw_count,
|
2017-01-11 00:11:33 +00:00
|
|
|
.mw_get_align = amd_ntb_mw_get_align,
|
2016-01-21 11:47:05 +00:00
|
|
|
.mw_set_trans = amd_ntb_mw_set_trans,
|
2017-01-11 00:11:33 +00:00
|
|
|
.peer_mw_count = amd_ntb_peer_mw_count,
|
|
|
|
.peer_mw_get_addr = amd_ntb_peer_mw_get_addr,
|
2016-01-21 11:47:05 +00:00
|
|
|
.link_is_up = amd_ntb_link_is_up,
|
|
|
|
.link_enable = amd_ntb_link_enable,
|
|
|
|
.link_disable = amd_ntb_link_disable,
|
|
|
|
.db_valid_mask = amd_ntb_db_valid_mask,
|
|
|
|
.db_vector_count = amd_ntb_db_vector_count,
|
|
|
|
.db_vector_mask = amd_ntb_db_vector_mask,
|
|
|
|
.db_read = amd_ntb_db_read,
|
|
|
|
.db_clear = amd_ntb_db_clear,
|
|
|
|
.db_set_mask = amd_ntb_db_set_mask,
|
|
|
|
.db_clear_mask = amd_ntb_db_clear_mask,
|
|
|
|
.peer_db_set = amd_ntb_peer_db_set,
|
|
|
|
.spad_count = amd_ntb_spad_count,
|
|
|
|
.spad_read = amd_ntb_spad_read,
|
|
|
|
.spad_write = amd_ntb_spad_write,
|
|
|
|
.peer_spad_read = amd_ntb_peer_spad_read,
|
|
|
|
.peer_spad_write = amd_ntb_peer_spad_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
int reg;
|
|
|
|
|
|
|
|
reg = readl(mmio + AMD_SMUACK_OFFSET);
|
|
|
|
reg |= bit;
|
|
|
|
writel(reg, mmio + AMD_SMUACK_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_handle_event(struct amd_ntb_dev *ndev, int vec)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
2017-01-11 00:33:36 +00:00
|
|
|
struct device *dev = &ndev->ntb.pdev->dev;
|
2016-01-21 11:47:05 +00:00
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = readl(mmio + AMD_INTSTAT_OFFSET);
|
|
|
|
if (!(status & AMD_EVENT_INTMASK))
|
|
|
|
return;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
status &= AMD_EVENT_INTMASK;
|
|
|
|
switch (status) {
|
|
|
|
case AMD_PEER_FLUSH_EVENT:
|
2020-02-05 15:54:23 +00:00
|
|
|
ndev->peer_sta |= AMD_PEER_FLUSH_EVENT;
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_info(dev, "Flush is done.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
break;
|
|
|
|
case AMD_PEER_RESET_EVENT:
|
2020-02-05 15:54:25 +00:00
|
|
|
case AMD_LINK_DOWN_EVENT:
|
|
|
|
ndev->peer_sta |= status;
|
|
|
|
if (status == AMD_LINK_DOWN_EVENT)
|
|
|
|
ndev->peer_sta &= ~AMD_LINK_UP_EVENT;
|
|
|
|
|
|
|
|
amd_ack_smu(ndev, status);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* link down first */
|
|
|
|
ntb_link_event(&ndev->ntb);
|
|
|
|
/* polling peer status */
|
|
|
|
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
|
|
|
|
|
|
|
|
break;
|
|
|
|
case AMD_PEER_D3_EVENT:
|
|
|
|
case AMD_PEER_PMETO_EVENT:
|
2016-11-18 09:21:41 +00:00
|
|
|
case AMD_LINK_UP_EVENT:
|
2020-02-05 15:54:23 +00:00
|
|
|
ndev->peer_sta |= status;
|
2020-02-05 15:54:26 +00:00
|
|
|
if (status == AMD_LINK_UP_EVENT)
|
|
|
|
ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT;
|
|
|
|
else if (status == AMD_PEER_D3_EVENT)
|
|
|
|
ndev->peer_sta &= ~AMD_PEER_D0_EVENT;
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
amd_ack_smu(ndev, status);
|
|
|
|
|
|
|
|
/* link down */
|
|
|
|
ntb_link_event(&ndev->ntb);
|
|
|
|
|
|
|
|
break;
|
|
|
|
case AMD_PEER_D0_EVENT:
|
|
|
|
mmio = ndev->peer_mmio;
|
|
|
|
status = readl(mmio + AMD_PMESTAT_OFFSET);
|
|
|
|
/* check if this is WAKEUP event */
|
|
|
|
if (status & 0x1)
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_info(dev, "Wakeup is done.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
|
2020-02-05 15:54:23 +00:00
|
|
|
ndev->peer_sta |= AMD_PEER_D0_EVENT;
|
2020-02-05 15:54:26 +00:00
|
|
|
ndev->peer_sta &= ~AMD_PEER_D3_EVENT;
|
2016-01-21 11:47:05 +00:00
|
|
|
amd_ack_smu(ndev, AMD_PEER_D0_EVENT);
|
|
|
|
|
|
|
|
/* start a timer to poll link status */
|
|
|
|
schedule_delayed_work(&ndev->hb_timer,
|
|
|
|
AMD_LINK_HB_TIMEOUT);
|
|
|
|
break;
|
|
|
|
default:
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_info(dev, "event status = 0x%x.\n", status);
|
2016-01-21 11:47:05 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-02-05 15:54:19 +00:00
|
|
|
|
|
|
|
/* Clear the interrupt status */
|
|
|
|
writel(status, mmio + AMD_INTSTAT_OFFSET);
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:31 +00:00
|
|
|
static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec)
|
|
|
|
{
|
|
|
|
struct device *dev = &ndev->ntb.pdev->dev;
|
|
|
|
u64 status;
|
|
|
|
|
|
|
|
status = amd_ntb_db_read(&ndev->ntb);
|
|
|
|
|
|
|
|
dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we had reserved highest order bit of DB for signaling peer of
|
|
|
|
* a special event, this is the only status bit we should be concerned
|
|
|
|
* here now.
|
|
|
|
*/
|
|
|
|
if (status & BIT(ndev->db_last_bit)) {
|
|
|
|
ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit));
|
|
|
|
/* send link down event notification */
|
|
|
|
ntb_link_event(&ndev->ntb);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are here, that means the peer has signalled a special
|
|
|
|
* event which notifies that the peer driver has been
|
|
|
|
* un-loaded for some reason. Since there is a chance that the
|
|
|
|
* peer will load its driver again sometime, we schedule link
|
|
|
|
* polling routine.
|
|
|
|
*/
|
|
|
|
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec)
|
|
|
|
{
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1))
|
|
|
|
amd_handle_event(ndev, vec);
|
|
|
|
|
2020-02-05 15:54:31 +00:00
|
|
|
if (vec < AMD_DB_CNT) {
|
|
|
|
amd_handle_db_event(ndev, vec);
|
2016-01-21 11:47:05 +00:00
|
|
|
ntb_db_event(&ndev->ntb, vec);
|
2020-02-05 15:54:31 +00:00
|
|
|
}
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t ndev_vec_isr(int irq, void *dev)
|
|
|
|
{
|
|
|
|
struct amd_ntb_vec *nvec = dev;
|
|
|
|
|
|
|
|
return ndev_interrupt(nvec->ndev, nvec->num);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t ndev_irq_isr(int irq, void *dev)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = dev;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ndev_init_isr(struct amd_ntb_dev *ndev,
|
|
|
|
int msix_min, int msix_max)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
int rc, i, msix_count, node;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
pdev = ndev->ntb.pdev;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
node = dev_to_node(&pdev->dev);
|
|
|
|
|
|
|
|
ndev->db_mask = ndev->db_valid_mask;
|
|
|
|
|
|
|
|
/* Try to set up msix irq */
|
treewide: kzalloc_node() -> kcalloc_node()
The kzalloc_node() function has a 2-factor argument form, kcalloc_node(). This
patch replaces cases of:
kzalloc_node(a * b, gfp, node)
with:
kcalloc_node(a * b, gfp, node)
as well as handling cases of:
kzalloc_node(a * b * c, gfp, node)
with:
kzalloc_node(array3_size(a, b, c), gfp, node)
as it's slightly less ugly than:
kcalloc_node(array_size(a, b), c, gfp, node)
This does, however, attempt to ignore constant size factors like:
kzalloc_node(4 * 1024, gfp, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc_node
+ kcalloc_node
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc_node(sizeof(THING) * C2, ...)
|
kzalloc_node(sizeof(TYPE) * C2, ...)
|
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(C1 * C2, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:20 +00:00
|
|
|
ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
|
2016-01-21 11:47:05 +00:00
|
|
|
GFP_KERNEL, node);
|
|
|
|
if (!ndev->vec)
|
|
|
|
goto err_msix_vec_alloc;
|
|
|
|
|
treewide: kzalloc_node() -> kcalloc_node()
The kzalloc_node() function has a 2-factor argument form, kcalloc_node(). This
patch replaces cases of:
kzalloc_node(a * b, gfp, node)
with:
kcalloc_node(a * b, gfp, node)
as well as handling cases of:
kzalloc_node(a * b * c, gfp, node)
with:
kzalloc_node(array3_size(a, b, c), gfp, node)
as it's slightly less ugly than:
kcalloc_node(array_size(a, b), c, gfp, node)
This does, however, attempt to ignore constant size factors like:
kzalloc_node(4 * 1024, gfp, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc_node
+ kcalloc_node
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc_node(sizeof(THING) * C2, ...)
|
kzalloc_node(sizeof(TYPE) * C2, ...)
|
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(C1 * C2, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:20 +00:00
|
|
|
ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
|
2016-01-21 11:47:05 +00:00
|
|
|
GFP_KERNEL, node);
|
|
|
|
if (!ndev->msix)
|
|
|
|
goto err_msix_alloc;
|
|
|
|
|
|
|
|
for (i = 0; i < msix_max; ++i)
|
|
|
|
ndev->msix[i].entry = i;
|
|
|
|
|
|
|
|
msix_count = pci_enable_msix_range(pdev, ndev->msix,
|
|
|
|
msix_min, msix_max);
|
|
|
|
if (msix_count < 0)
|
|
|
|
goto err_msix_enable;
|
|
|
|
|
|
|
|
/* NOTE: Disable MSIX if msix count is less than 16 because of
|
|
|
|
* hardware limitation.
|
|
|
|
*/
|
|
|
|
if (msix_count < msix_min) {
|
|
|
|
pci_disable_msix(pdev);
|
|
|
|
goto err_msix_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < msix_count; ++i) {
|
|
|
|
ndev->vec[i].ndev = ndev;
|
|
|
|
ndev->vec[i].num = i;
|
|
|
|
rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
|
|
|
|
"ndev_vec_isr", &ndev->vec[i]);
|
|
|
|
if (rc)
|
|
|
|
goto err_msix_request;
|
|
|
|
}
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&pdev->dev, "Using msix interrupts\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev->db_count = msix_min;
|
|
|
|
ndev->msix_vec_count = msix_max;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_msix_request:
|
|
|
|
while (i-- > 0)
|
2016-12-19 05:52:55 +00:00
|
|
|
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
|
2016-01-21 11:47:05 +00:00
|
|
|
pci_disable_msix(pdev);
|
|
|
|
err_msix_enable:
|
|
|
|
kfree(ndev->msix);
|
|
|
|
err_msix_alloc:
|
|
|
|
kfree(ndev->vec);
|
|
|
|
err_msix_vec_alloc:
|
|
|
|
ndev->msix = NULL;
|
|
|
|
ndev->vec = NULL;
|
|
|
|
|
|
|
|
/* Try to set up msi irq */
|
|
|
|
rc = pci_enable_msi(pdev);
|
|
|
|
if (rc)
|
|
|
|
goto err_msi_enable;
|
|
|
|
|
|
|
|
rc = request_irq(pdev->irq, ndev_irq_isr, 0,
|
|
|
|
"ndev_irq_isr", ndev);
|
|
|
|
if (rc)
|
|
|
|
goto err_msi_request;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&pdev->dev, "Using msi interrupts\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev->db_count = 1;
|
|
|
|
ndev->msix_vec_count = 1;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_msi_request:
|
|
|
|
pci_disable_msi(pdev);
|
|
|
|
err_msi_enable:
|
|
|
|
|
|
|
|
/* Try to set up intx irq */
|
|
|
|
pci_intx(pdev, 1);
|
|
|
|
|
|
|
|
rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
|
|
|
|
"ndev_irq_isr", ndev);
|
|
|
|
if (rc)
|
|
|
|
goto err_intx_request;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&pdev->dev, "Using intx interrupts\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev->db_count = 1;
|
|
|
|
ndev->msix_vec_count = 1;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_intx_request:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
int i;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
pdev = ndev->ntb.pdev;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
/* Mask all doorbell interrupts */
|
|
|
|
ndev->db_mask = ndev->db_valid_mask;
|
|
|
|
writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
|
|
|
|
|
|
|
|
if (ndev->msix) {
|
|
|
|
i = ndev->msix_vec_count;
|
|
|
|
while (i--)
|
|
|
|
free_irq(ndev->msix[i].vector, &ndev->vec[i]);
|
|
|
|
pci_disable_msix(pdev);
|
|
|
|
kfree(ndev->msix);
|
|
|
|
kfree(ndev->vec);
|
|
|
|
} else {
|
|
|
|
free_irq(pdev->irq, ndev);
|
|
|
|
if (pci_dev_msi_enabled(pdev))
|
|
|
|
pci_disable_msi(pdev);
|
|
|
|
else
|
|
|
|
pci_intx(pdev, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *offp)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev;
|
|
|
|
void __iomem *mmio;
|
|
|
|
char *buf;
|
|
|
|
size_t buf_size;
|
|
|
|
ssize_t ret, off;
|
|
|
|
union { u64 v64; u32 v32; u16 v16; } u;
|
|
|
|
|
|
|
|
ndev = filp->private_data;
|
|
|
|
mmio = ndev->self_mmio;
|
|
|
|
|
|
|
|
buf_size = min(count, 0x800ul);
|
|
|
|
|
|
|
|
buf = kmalloc(buf_size, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
off = 0;
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"NTB Device Information:\n");
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Connection Topology -\t%s\n",
|
|
|
|
ntb_topo_string(ndev->ntb.topo));
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"LNK STA -\t\t%#06x\n", ndev->lnk_sta);
|
|
|
|
|
|
|
|
if (!amd_link_is_up(ndev)) {
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Link Status -\t\tDown\n");
|
|
|
|
} else {
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Link Status -\t\tUp\n");
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Link Speed -\t\tPCI-E Gen %u\n",
|
|
|
|
NTB_LNK_STA_SPEED(ndev->lnk_sta));
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Link Width -\t\tx%u\n",
|
|
|
|
NTB_LNK_STA_WIDTH(ndev->lnk_sta));
|
|
|
|
}
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Memory Window Count -\t%u\n", ndev->mw_count);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Scratchpad Count -\t%u\n", ndev->spad_count);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Doorbell Count -\t%u\n", ndev->db_count);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"MSIX Vector Count -\t%u\n", ndev->msix_vec_count);
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
|
|
|
|
|
|
|
|
u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Doorbell Mask -\t\t\t%#06x\n", u.v32);
|
|
|
|
|
|
|
|
u.v32 = readl(mmio + AMD_DBSTAT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"Doorbell Bell -\t\t\t%#06x\n", u.v32);
|
|
|
|
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"\nNTB Incoming XLAT:\n");
|
|
|
|
|
|
|
|
u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"XLAT1 -\t\t%#018llx\n", u.v64);
|
|
|
|
|
|
|
|
u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"XLAT23 -\t\t%#018llx\n", u.v64);
|
|
|
|
|
|
|
|
u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"XLAT45 -\t\t%#018llx\n", u.v64);
|
|
|
|
|
|
|
|
u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"LMT1 -\t\t\t%#06x\n", u.v32);
|
|
|
|
|
|
|
|
u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"LMT23 -\t\t\t%#018llx\n", u.v64);
|
|
|
|
|
|
|
|
u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET);
|
|
|
|
off += scnprintf(buf + off, buf_size - off,
|
|
|
|
"LMT45 -\t\t\t%#018llx\n", u.v64);
|
|
|
|
|
|
|
|
ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
|
|
|
|
kfree(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
if (!debugfs_dir) {
|
|
|
|
ndev->debugfs_dir = NULL;
|
|
|
|
ndev->debugfs_info = NULL;
|
|
|
|
} else {
|
|
|
|
ndev->debugfs_dir =
|
2017-01-11 00:33:36 +00:00
|
|
|
debugfs_create_dir(pci_name(ndev->ntb.pdev),
|
|
|
|
debugfs_dir);
|
2016-01-21 11:47:05 +00:00
|
|
|
if (!ndev->debugfs_dir)
|
|
|
|
ndev->debugfs_info = NULL;
|
|
|
|
else
|
|
|
|
ndev->debugfs_info =
|
|
|
|
debugfs_create_file("info", S_IRUSR,
|
|
|
|
ndev->debugfs_dir, ndev,
|
|
|
|
&amd_ntb_debugfs_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
debugfs_remove_recursive(ndev->debugfs_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ndev_init_struct(struct amd_ntb_dev *ndev,
|
|
|
|
struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
ndev->ntb.pdev = pdev;
|
|
|
|
ndev->ntb.topo = NTB_TOPO_NONE;
|
|
|
|
ndev->ntb.ops = &amd_ntb_ops;
|
|
|
|
ndev->int_mask = AMD_EVENT_INTMASK;
|
|
|
|
spin_lock_init(&ndev->db_mask_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_poll_link(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->peer_mmio;
|
2020-02-05 15:54:21 +00:00
|
|
|
u32 reg;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
|
2020-02-05 15:54:22 +00:00
|
|
|
reg &= AMD_SIDE_READY;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg);
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
ndev->cntl_sta = reg;
|
|
|
|
|
2020-02-05 15:54:22 +00:00
|
|
|
amd_ntb_get_link_status(ndev);
|
|
|
|
|
|
|
|
return ndev->cntl_sta;
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_link_hb(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = hb_ndev(work);
|
|
|
|
|
|
|
|
if (amd_poll_link(ndev))
|
|
|
|
ntb_link_event(&ndev->ntb);
|
|
|
|
|
|
|
|
if (!amd_link_is_up(ndev))
|
|
|
|
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_init_isr(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT);
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:28 +00:00
|
|
|
static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
2020-02-05 15:54:28 +00:00
|
|
|
void __iomem *mmio = NULL;
|
2016-01-21 11:47:05 +00:00
|
|
|
unsigned int reg;
|
2020-02-05 15:54:28 +00:00
|
|
|
|
|
|
|
if (peer)
|
|
|
|
mmio = ndev->peer_mmio;
|
|
|
|
else
|
|
|
|
mmio = ndev->self_mmio;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
if (!(reg & AMD_SIDE_READY)) {
|
|
|
|
reg |= AMD_SIDE_READY;
|
|
|
|
writel(reg, mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:28 +00:00
|
|
|
static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer)
|
2016-01-21 11:47:05 +00:00
|
|
|
{
|
2020-02-05 15:54:28 +00:00
|
|
|
void __iomem *mmio = NULL;
|
2016-01-21 11:47:05 +00:00
|
|
|
unsigned int reg;
|
2020-02-05 15:54:28 +00:00
|
|
|
|
|
|
|
if (peer)
|
|
|
|
mmio = ndev->peer_mmio;
|
|
|
|
else
|
|
|
|
mmio = ndev->self_mmio;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
reg = readl(mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
if (reg & AMD_SIDE_READY) {
|
|
|
|
reg &= ~AMD_SIDE_READY;
|
|
|
|
writel(reg, mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
readl(mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
}
|
2020-02-05 15:54:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_init_side_info(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 ntb_ctl;
|
|
|
|
|
|
|
|
amd_set_side_info_reg(ndev, false);
|
|
|
|
|
|
|
|
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
|
|
|
|
ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
|
|
|
|
writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_deinit_side_info(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 ntb_ctl;
|
|
|
|
|
|
|
|
amd_clear_side_info_reg(ndev, false);
|
2020-02-05 15:54:27 +00:00
|
|
|
|
|
|
|
ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
|
|
|
|
ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
|
|
|
|
writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_init_ntb(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
|
2019-09-15 17:08:35 +00:00
|
|
|
ndev->mw_count = ndev->dev_data->mw_count;
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev->spad_count = AMD_SPADS_CNT;
|
|
|
|
ndev->db_count = AMD_DB_CNT;
|
|
|
|
|
|
|
|
switch (ndev->ntb.topo) {
|
|
|
|
case NTB_TOPO_PRI:
|
|
|
|
case NTB_TOPO_SEC:
|
|
|
|
ndev->spad_count >>= 1;
|
|
|
|
if (ndev->ntb.topo == NTB_TOPO_PRI) {
|
|
|
|
ndev->self_spad = 0;
|
|
|
|
ndev->peer_spad = 0x20;
|
|
|
|
} else {
|
|
|
|
ndev->self_spad = 0x20;
|
|
|
|
ndev->peer_spad = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb);
|
|
|
|
schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT);
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_err(&ndev->ntb.pdev->dev,
|
|
|
|
"AMD NTB does not support B2B mode.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mask event interrupts */
|
|
|
|
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
|
|
|
u32 info;
|
|
|
|
|
|
|
|
info = readl(mmio + AMD_SIDEINFO_OFFSET);
|
|
|
|
if (info & AMD_SIDE_MASK)
|
|
|
|
return NTB_TOPO_SEC;
|
|
|
|
else
|
|
|
|
return NTB_TOPO_PRI;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_init_dev(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
2020-02-05 15:54:20 +00:00
|
|
|
void __iomem *mmio = ndev->self_mmio;
|
2016-01-21 11:47:05 +00:00
|
|
|
struct pci_dev *pdev;
|
|
|
|
int rc = 0;
|
|
|
|
|
2017-01-11 00:33:36 +00:00
|
|
|
pdev = ndev->ntb.pdev;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
ndev->ntb.topo = amd_get_topo(ndev);
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_dbg(&pdev->dev, "AMD NTB topo is %s\n",
|
2016-01-21 11:47:05 +00:00
|
|
|
ntb_topo_string(ndev->ntb.topo));
|
|
|
|
|
|
|
|
rc = amd_init_ntb(ndev);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = amd_init_isr(ndev);
|
|
|
|
if (rc) {
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_err(&pdev->dev, "fail to init isr.\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:31 +00:00
|
|
|
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
|
|
|
|
/*
|
|
|
|
* We reserve the highest order bit of the DB register which will
|
|
|
|
* be used to notify peer when the driver on this side is being
|
|
|
|
* un-loaded.
|
|
|
|
*/
|
|
|
|
ndev->db_last_bit =
|
|
|
|
find_last_bit((unsigned long *)&ndev->db_valid_mask,
|
|
|
|
hweight64(ndev->db_valid_mask));
|
|
|
|
writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET);
|
|
|
|
/*
|
|
|
|
* Since now there is one less bit to account for, the DB count
|
|
|
|
* and DB mask should be adjusted accordingly.
|
|
|
|
*/
|
|
|
|
ndev->db_count -= 1;
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
|
|
|
|
|
2020-02-05 15:54:20 +00:00
|
|
|
/* Enable Link-Up and Link-Down event interrupts */
|
|
|
|
ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT);
|
|
|
|
writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_deinit_dev(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
|
|
|
cancel_delayed_work_sync(&ndev->hb_timer);
|
|
|
|
|
|
|
|
ndev_deinit_isr(ndev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
|
|
|
|
struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, ndev);
|
|
|
|
|
|
|
|
rc = pci_enable_device(pdev);
|
|
|
|
if (rc)
|
|
|
|
goto err_pci_enable;
|
|
|
|
|
|
|
|
rc = pci_request_regions(pdev, NTB_NAME);
|
|
|
|
if (rc)
|
|
|
|
goto err_pci_regions;
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (rc) {
|
|
|
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (rc)
|
|
|
|
goto err_dma_mask;
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (rc) {
|
|
|
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (rc)
|
|
|
|
goto err_dma_mask;
|
2017-01-11 00:33:36 +00:00
|
|
|
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
|
2016-01-21 11:47:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ndev->self_mmio = pci_iomap(pdev, 0, 0);
|
|
|
|
if (!ndev->self_mmio) {
|
|
|
|
rc = -EIO;
|
|
|
|
goto err_dma_mask;
|
|
|
|
}
|
|
|
|
ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dma_mask:
|
|
|
|
pci_clear_master(pdev);
|
2020-08-11 01:59:57 +00:00
|
|
|
pci_release_regions(pdev);
|
2016-01-21 11:47:05 +00:00
|
|
|
err_pci_regions:
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
err_pci_enable:
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev)
|
|
|
|
{
|
2017-01-11 00:33:36 +00:00
|
|
|
struct pci_dev *pdev = ndev->ntb.pdev;
|
2016-01-21 11:47:05 +00:00
|
|
|
|
|
|
|
pci_iounmap(pdev, ndev->self_mmio);
|
|
|
|
|
|
|
|
pci_clear_master(pdev);
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_ntb_pci_probe(struct pci_dev *pdev,
|
|
|
|
const struct pci_device_id *id)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev;
|
|
|
|
int rc, node;
|
|
|
|
|
|
|
|
node = dev_to_node(&pdev->dev);
|
|
|
|
|
|
|
|
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
|
|
|
|
if (!ndev) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_ndev;
|
|
|
|
}
|
|
|
|
|
2019-09-15 17:08:35 +00:00
|
|
|
ndev->dev_data = (struct ntb_dev_data *)id->driver_data;
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
ndev_init_struct(ndev, pdev);
|
|
|
|
|
|
|
|
rc = amd_ntb_init_pci(ndev, pdev);
|
|
|
|
if (rc)
|
|
|
|
goto err_init_pci;
|
|
|
|
|
|
|
|
rc = amd_init_dev(ndev);
|
|
|
|
if (rc)
|
|
|
|
goto err_init_dev;
|
|
|
|
|
|
|
|
/* write side info */
|
|
|
|
amd_init_side_info(ndev);
|
|
|
|
|
|
|
|
amd_poll_link(ndev);
|
|
|
|
|
|
|
|
ndev_init_debugfs(ndev);
|
|
|
|
|
|
|
|
rc = ntb_register_device(&ndev->ntb);
|
|
|
|
if (rc)
|
|
|
|
goto err_register;
|
|
|
|
|
|
|
|
dev_info(&pdev->dev, "NTB device registered.\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register:
|
|
|
|
ndev_deinit_debugfs(ndev);
|
|
|
|
amd_deinit_dev(ndev);
|
|
|
|
err_init_dev:
|
|
|
|
amd_ntb_deinit_pci(ndev);
|
|
|
|
err_init_pci:
|
|
|
|
kfree(ndev);
|
|
|
|
err_ndev:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_ntb_pci_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
|
|
|
|
|
2020-02-05 15:54:31 +00:00
|
|
|
/*
|
|
|
|
* Clear the READY bit in SIDEINFO register before sending DB event
|
|
|
|
* to the peer. This will make sure that when the peer handles the
|
|
|
|
* DB event, it correctly reads this bit as being 0.
|
|
|
|
*/
|
|
|
|
amd_deinit_side_info(ndev);
|
|
|
|
ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
|
2016-01-21 11:47:05 +00:00
|
|
|
ntb_unregister_device(&ndev->ntb);
|
|
|
|
ndev_deinit_debugfs(ndev);
|
|
|
|
amd_deinit_dev(ndev);
|
|
|
|
amd_ntb_deinit_pci(ndev);
|
|
|
|
kfree(ndev);
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:54:32 +00:00
|
|
|
static void amd_ntb_pci_shutdown(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct amd_ntb_dev *ndev = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
/* Send link down notification */
|
|
|
|
ntb_link_event(&ndev->ntb);
|
|
|
|
|
|
|
|
amd_deinit_side_info(ndev);
|
|
|
|
ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit));
|
|
|
|
ntb_unregister_device(&ndev->ntb);
|
|
|
|
ndev_deinit_debugfs(ndev);
|
|
|
|
amd_deinit_dev(ndev);
|
|
|
|
amd_ntb_deinit_pci(ndev);
|
|
|
|
kfree(ndev);
|
|
|
|
}
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
static const struct file_operations amd_ntb_debugfs_info = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = simple_open,
|
|
|
|
.read = ndev_debugfs_read,
|
|
|
|
};
|
|
|
|
|
2019-09-15 17:08:35 +00:00
|
|
|
static const struct ntb_dev_data dev_data[] = {
|
|
|
|
{ /* for device 145b */
|
|
|
|
.mw_count = 3,
|
|
|
|
.mw_idx = 1,
|
|
|
|
},
|
|
|
|
{ /* for device 148b */
|
|
|
|
.mw_count = 2,
|
|
|
|
.mw_idx = 2,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2016-01-21 11:47:05 +00:00
|
|
|
static const struct pci_device_id amd_ntb_pci_tbl[] = {
|
2019-09-15 17:08:35 +00:00
|
|
|
{ PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
|
|
|
|
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
|
2019-11-17 21:48:36 +00:00
|
|
|
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
|
2019-09-15 17:08:35 +00:00
|
|
|
{ 0, }
|
2016-01-21 11:47:05 +00:00
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
|
|
|
|
|
|
|
|
static struct pci_driver amd_ntb_pci_driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.id_table = amd_ntb_pci_tbl,
|
|
|
|
.probe = amd_ntb_pci_probe,
|
|
|
|
.remove = amd_ntb_pci_remove,
|
2020-02-05 15:54:32 +00:00
|
|
|
.shutdown = amd_ntb_pci_shutdown,
|
2016-01-21 11:47:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init amd_ntb_pci_driver_init(void)
|
|
|
|
{
|
|
|
|
pr_info("%s %s\n", NTB_DESC, NTB_VER);
|
|
|
|
|
|
|
|
if (debugfs_initialized())
|
|
|
|
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
|
|
|
|
|
|
|
return pci_register_driver(&amd_ntb_pci_driver);
|
|
|
|
}
|
|
|
|
module_init(amd_ntb_pci_driver_init);
|
|
|
|
|
|
|
|
static void __exit amd_ntb_pci_driver_exit(void)
|
|
|
|
{
|
|
|
|
pci_unregister_driver(&amd_ntb_pci_driver);
|
|
|
|
debugfs_remove_recursive(debugfs_dir);
|
|
|
|
}
|
|
|
|
module_exit(amd_ntb_pci_driver_exit);
|