Browse Source

[DM/Feature] Basic PCI/PCIe (Peripheral Component Interconnect Express) bus

PCI/PCIe have better performance and more devices support, such as
NVMe, GPU, Powerful NIC (Like RDMA). PCI/PCIe can access control by
IOMMU that the virtualiztion and userspace driver will more safety.
PCI/PCIe device could hot plugging, no design modifications SoC required,
PCI/PCIe on Embedded SoC is popular now.
We make a simple framework to support them.

Feature Lists:
1.PCI INTx: the INT[A-D] pin IRQ for legacy PCI, work with platform PIC.
2.MSI/MSI-X: the message write IRQ for PCIe, work with platform's PIC.
3.PME: we only support the D0, D1, D2, D3HOT, D3COLD init by framework.
4.Endpoint: a simple EP framework for PCI FPGA or NTB function.
5.OFW: we only support work on OFW SoC, ACPI support in the future maybe.

Host controller:
1. Common PCI host controller on ECAM.
2. Generic PCI host controller on ECAM.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
GuEe-GUI 11 months ago
parent
commit
2168ed8e7d

+ 1 - 0
components/drivers/Kconfig

@@ -23,6 +23,7 @@ rsource "hwcrypto/Kconfig"
 rsource "wlan/Kconfig"
 rsource "virtio/Kconfig"
 rsource "ofw/Kconfig"
+rsource "pci/Kconfig"
 rsource "pic/Kconfig"
 rsource "pin/Kconfig"
 rsource "pinctrl/Kconfig"

+ 604 - 0
components/drivers/include/drivers/pci.h

@@ -0,0 +1,604 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-25     GuEe-GUI     first version
+ */
+
+#ifndef __PCI_H__
+#define __PCI_H__
+
+#include <rtdef.h>
+#include <bitmap.h>
+#include <ioremap.h>
+#include <drivers/ofw.h>
+#include <drivers/pic.h>
+#include <drivers/core/dm.h>
+#include <drivers/core/driver.h>
+
+#include "../../pci/pci_ids.h"
+#include "../../pci/pci_regs.h"
+
+#define RT_PCI_INTX_PIN_MAX         4
+#define RT_PCI_BAR_NR_MAX           6
+#define RT_PCI_DEVICE_MAX           32
+#define RT_PCI_FUNCTION_MAX         8
+
+#define RT_PCI_FIND_CAP_TTL         48
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices.  The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ *  7:3 = slot
+ *  2:0 = function
+ */
+#define RT_PCI_DEVID(bus, devfn)    ((((rt_uint16_t)(bus)) << 8) | (devfn))
+#define RT_PCI_DEVFN(slot, func)    ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define RT_PCI_SLOT(devfn)          (((devfn) >> 3) & 0x1f)
+#define RT_PCI_FUNC(devfn)          ((devfn) & 0x07)
+
+#define PCIE_LINK_STATE_L0S         RT_BIT(0)
+#define PCIE_LINK_STATE_L1          RT_BIT(1)
+#define PCIE_LINK_STATE_CLKPM       RT_BIT(2)
+#define PCIE_LINK_STATE_L1_1        RT_BIT(3)
+#define PCIE_LINK_STATE_L1_2        RT_BIT(4)
+#define PCIE_LINK_STATE_L1_1_PCIPM  RT_BIT(5)
+#define PCIE_LINK_STATE_L1_2_PCIPM  RT_BIT(6)
+#define PCIE_LINK_STATE_ALL         \
+( \
+    PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | \
+    PCIE_LINK_STATE_CLKPM | \
+    PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2 | \
+    PCIE_LINK_STATE_L1_1_PCIPM | PCIE_LINK_STATE_L1_2_PCIPM \
+)
+
+struct rt_pci_bus_region
+{
+    rt_uint64_t phy_addr;
+    rt_uint64_t cpu_addr;
+    rt_uint64_t size;
+
+    rt_uint64_t bus_start;
+
+#define PCI_BUS_REGION_F_NONE       0xffffffff    /* PCI no memory */
+#define PCI_BUS_REGION_F_MEM        0x00000000    /* PCI memory space */
+#define PCI_BUS_REGION_F_IO         0x00000001    /* PCI IO space */
+#define PCI_BUS_REGION_F_PREFETCH   0x00000008    /* Prefetchable PCI memory */
+    rt_ubase_t flags;
+};
+
+struct rt_pci_bus_resource
+{
+    rt_ubase_t base;
+    rt_size_t size;
+
+    rt_ubase_t flags;
+};
+
+/*
+ * PCI topology:
+ *
+ *   +-----+-----+         +-------------+   PCI Bus 0  +------------+ PCI Bus 1
+ *   | RAM | CPU |---------| Host Bridge |--------+-----| PCI Bridge |-----+
+ *   +-----+-----+         +-------------+        |     +------------+     |    +-------------+
+ *                                                |                        +----| End Point 2 |
+ *  +-------------+         +-------------+       |     +-------------+    |    +-------------+
+ *  | End Point 5 |----+    | End Point 0 |-------+     | End Point 3 |----+
+ *  +-------------+    |    +-------------+       |     +-------------+    |
+ *                     |                          |                        |
+ *  +-------------+    |    +-------------+       |     +-------------+    |    +-------------+
+ *  | End Point 6 |----+----|  ISA Bridge |-------+-----| End Point 1 |    +----| End Point 4 |
+ *  +-------------+         +-------------+       |     +-------------+         +-------------+
+ *                                                |
+ *         +------+         +----------------+    |
+ *         | Port |---------| CardBus Bridge |----+
+ *         +------+         +----------------+
+ */
+
+struct rt_pci_bus;
+
+struct rt_pci_device_id
+{
+#define PCI_ANY_ID   (~0)
+#define RT_PCI_DEVICE_ID(vend, dev) \
+    .vendor = (vend),               \
+    .device = (dev),                \
+    .subsystem_vendor = PCI_ANY_ID, \
+    .subsystem_device = PCI_ANY_ID
+
+#define RT_PCI_DEVICE_CLASS(dev_class, dev_class_mask)  \
+    .vendor = PCI_ANY_ID, .device = PCI_ANY_ID,         \
+    .subsystem_vendor = PCI_ANY_ID,                     \
+    .subsystem_device = PCI_ANY_ID,                     \
+    .class = (dev_class), .class_mask = (dev_class_mask),
+
+    rt_uint32_t vendor, device;     /* Vendor and device ID or PCI_ANY_ID */
+    rt_uint32_t subsystem_vendor;   /* Subsystem ID's or PCI_ANY_ID */
+    rt_uint32_t subsystem_device;   /* Subsystem ID's or PCI_ANY_ID */
+    rt_uint32_t class, class_mask;  /* (class, subclass, prog-if) triplet */
+
+    const void *data;
+};
+
+struct rt_pci_device
+{
+    struct rt_device parent;
+    const char *name;
+
+    rt_list_t list;
+    struct rt_pci_bus *bus;
+    struct rt_pci_bus *subbus;      /* In PCI-to-PCI bridge, 'End Point' or 'Port' is NULL */
+
+    const struct rt_pci_device_id *id;
+
+    rt_uint32_t devfn;              /* Encoded device & function index */
+    rt_uint16_t vendor;
+    rt_uint16_t device;
+    rt_uint16_t subsystem_vendor;
+    rt_uint16_t subsystem_device;
+    rt_uint32_t class;              /* 3 bytes: (base, sub, prog-if) */
+    rt_uint8_t revision;
+    rt_uint8_t hdr_type;
+    rt_uint8_t max_latency;
+    rt_uint8_t min_grantl;
+    rt_uint8_t int_pin;
+    rt_uint8_t int_line;
+    rt_uint16_t exp_flags;
+    rt_uint32_t cfg_size;
+
+    void *sysdata;
+
+    int irq;
+    rt_uint8_t pin;
+    struct rt_pic *intx_pic;
+
+    struct rt_pci_bus_resource resource[RT_PCI_BAR_NR_MAX];
+
+    rt_uint8_t pme_cap;
+    rt_uint8_t msi_cap;
+    rt_uint8_t msix_cap;
+    rt_uint8_t pcie_cap;
+
+    rt_uint8_t busmaster:1;             /* Is the bus master */
+    rt_uint8_t multi_function:1;        /* Multi-function device */
+    rt_uint8_t ari_enabled:1;           /* Alternative Routing-ID Interpretation */
+    rt_uint8_t no_msi:1;                /* May not use MSI */
+    rt_uint8_t no_64bit_msi:1;          /* May only use 32-bit MSIs */
+    rt_uint8_t msi_enabled:1;           /* MSI enable */
+    rt_uint8_t msix_enabled:1;          /* MSIx enable */
+    rt_uint8_t broken_intx_masking:1;   /* INTx masking can't be used */
+    rt_uint8_t pme_support:5;           /* Bitmask of states from which PME# can be generated */
+
+#ifdef RT_PCI_MSI
+    void *msix_base;
+    struct rt_pic *msi_pic;
+    rt_list_t msi_desc_nodes;
+    struct rt_spinlock msi_lock;
+#endif
+};
+
+struct rt_pci_host_bridge
+{
+    struct rt_device parent;
+
+    rt_uint32_t domain;
+
+    struct rt_pci_bus *root_bus;
+    const struct rt_pci_ops *ops;
+    const struct rt_pci_ops *child_ops;
+
+    rt_uint32_t bus_range[2];
+    rt_size_t bus_regions_nr;
+    struct rt_pci_bus_region *bus_regions;
+    rt_size_t dma_regions_nr;
+    struct rt_pci_bus_region *dma_regions;
+
+    rt_uint8_t (*irq_slot)(struct rt_pci_device *pdev, rt_uint8_t *pinp);
+    int (*irq_map)(struct rt_pci_device *pdev, rt_uint8_t slot, rt_uint8_t pin);
+
+    void *sysdata;
+    rt_uint8_t priv[0];
+};
+#define rt_device_to_pci_host_bridge(dev) rt_container_of(dev, struct rt_pci_host_bridge, parent)
+
+struct rt_pci_ops
+{
+    rt_err_t (*add)(struct rt_pci_bus *bus);
+    rt_err_t (*remove)(struct rt_pci_bus *bus);
+
+    void *(*map)(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg);
+
+    rt_err_t (*read)(struct rt_pci_bus *bus,
+            rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
+    rt_err_t (*write)(struct rt_pci_bus *bus,
+            rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
+};
+
+struct rt_pci_bus
+{
+    rt_list_t list;
+    rt_list_t children_nodes;
+    rt_list_t devices_nodes;
+    struct rt_pci_bus *parent;
+
+    union
+    {
+        /* In PCI-to-PCI bridge, parent is not NULL */
+        struct rt_pci_device *self;
+        /* In Host bridge, this is Root bus ('PCI Bus 0') */
+        struct rt_pci_host_bridge *host_bridge;
+    };
+
+    const struct rt_pci_ops *ops;
+
+    char name[48];
+    char number;
+    struct rt_spinlock lock;
+
+    void *sysdata;
+};
+
+struct rt_pci_driver
+{
+    struct rt_driver parent;
+
+    const char *name;
+    const struct rt_pci_device_id *ids;
+
+    rt_err_t (*probe)(struct rt_pci_device *pdev);
+    rt_err_t (*remove)(struct rt_pci_device *pdev);
+    rt_err_t (*shutdown)(struct rt_pci_device *pdev);
+};
+
+struct rt_pci_msix_entry
+{
+    int irq;
+    int index;
+};
+
+enum rt_pci_power
+{
+    RT_PCI_D0,
+    RT_PCI_D1,
+    RT_PCI_D2,
+    RT_PCI_D3HOT,
+    RT_PCI_D3COLD,
+
+    RT_PCI_PME_MAX,
+};
+
+void rt_pci_pme_init(struct rt_pci_device *pdev);
+void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable);
+rt_err_t rt_pci_enable_wake(struct rt_pci_device *pci_dev,
+        enum rt_pci_power state, rt_bool_t enable);
+rt_inline rt_bool_t rt_pci_pme_capable(struct rt_pci_device *pdev,
+        enum rt_pci_power state)
+{
+    if (!pdev->pme_cap)
+    {
+        return RT_FALSE;
+    }
+
+    return !!(pdev->pme_support & (1 << state));
+}
+
+void rt_pci_msi_init(struct rt_pci_device *pdev);
+void rt_pci_msix_init(struct rt_pci_device *pdev);
+
+void rt_pci_set_master(struct rt_pci_device *pdev);
+void rt_pci_clear_master(struct rt_pci_device *pdev);
+
+struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size);
+rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *);
+rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge);
+rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge);
+
+struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus);
+struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn);
+rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev);
+rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn);
+rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses);
+rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus);
+
+rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge);
+rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge);
+
+rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge);
+rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus);
+rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev);
+
+rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev);
+
+rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap);
+rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap);
+rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap);
+
+rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap);
+rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap);
+
+struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus);
+struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus);
+
+rt_inline rt_uint16_t rt_pci_dev_id(struct rt_pci_device *pdev)
+{
+    return RT_PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
+rt_inline rt_bool_t rt_pci_is_root_bus(struct rt_pci_bus *bus)
+{
+    return bus->parent ? RT_FALSE : RT_TRUE;
+}
+
+rt_inline rt_bool_t rt_pci_is_bridge(struct rt_pci_device *pdev)
+{
+    return pdev->hdr_type == PCIM_HDRTYPE_BRIDGE ||
+            pdev->hdr_type == PCIM_HDRTYPE_CARDBUS;
+}
+
+rt_inline rt_bool_t rt_pci_is_pcie(struct rt_pci_device *pdev)
+{
+    return !!pdev->pcie_cap;
+}
+
+#define rt_pci_foreach_bridge(pdev, bus) \
+    rt_list_for_each_entry(pdev, &bus->devices_nodes, list) \
+        if (rt_pci_is_bridge(pdev))
+
+rt_err_t rt_pci_bus_read_config_u8(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int pos, rt_uint8_t *value);
+rt_err_t rt_pci_bus_read_config_u16(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int pos, rt_uint16_t *value);
+rt_err_t rt_pci_bus_read_config_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int pos, rt_uint32_t *value);
+
+rt_err_t rt_pci_bus_write_config_u8(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, rt_uint8_t value);
+rt_err_t rt_pci_bus_write_config_u16(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, rt_uint16_t value);
+rt_err_t rt_pci_bus_write_config_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, rt_uint32_t value);
+
+rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
+rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
+
+rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
+rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
+
+rt_inline rt_err_t rt_pci_read_config_u8(const struct rt_pci_device *pdev,
+        int reg, rt_uint8_t *value)
+{
+    return rt_pci_bus_read_config_u8(pdev->bus, pdev->devfn, reg, value);
+}
+
+rt_inline rt_err_t rt_pci_read_config_u16(const struct rt_pci_device *pdev,
+        int reg, rt_uint16_t *value)
+{
+    return rt_pci_bus_read_config_u16(pdev->bus, pdev->devfn, reg, value);
+}
+
+rt_inline rt_err_t rt_pci_read_config_u32(const struct rt_pci_device *pdev,
+        int reg, rt_uint32_t *value)
+{
+    return rt_pci_bus_read_config_u32(pdev->bus, pdev->devfn, reg, value);
+}
+
+rt_inline rt_err_t rt_pci_write_config_u8(const struct rt_pci_device *pdev,
+        int reg, rt_uint8_t value)
+{
+    return rt_pci_bus_write_config_u8(pdev->bus, pdev->devfn, reg, value);
+}
+
+rt_inline rt_err_t rt_pci_write_config_u16(const struct rt_pci_device *pdev,
+        int reg, rt_uint16_t value)
+{
+    return rt_pci_bus_write_config_u16(pdev->bus, pdev->devfn, reg, value);
+}
+
+rt_inline rt_err_t rt_pci_write_config_u32(const struct rt_pci_device *pdev,
+        int reg, rt_uint32_t value)
+{
+    return rt_pci_bus_write_config_u32(pdev->bus, pdev->devfn, reg, value);
+}
+
+#ifdef RT_USING_OFW
+int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
+        rt_uint8_t slot, rt_uint8_t pin);
+
+rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge);
+
+rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge);
+
+rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus);
+rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus);
+rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev);
+rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev);
+#else
+rt_inline rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge)
+{
+    return RT_EOK;
+}
+rt_inline rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
+{
+    return RT_EOK;
+}
+rt_inline rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
+{
+    return RT_EOK;
+}
+rt_inline rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
+{
+    return RT_EOK;
+}
+rt_inline rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
+{
+    return RT_EOK;
+}
+rt_inline int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
+        rt_uint8_t slot, rt_uint8_t pin)
+{
+    return -1;
+}
+rt_inline rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge)
+{
+    return -RT_ENOSYS;
+}
+#endif /* RT_USING_OFW */
+
+rt_inline void *rt_pci_iomap(struct rt_pci_device *pdev, int bar_idx)
+{
+    struct rt_pci_bus_resource *res = &pdev->resource[bar_idx];
+
+    RT_ASSERT(bar_idx < RT_ARRAY_SIZE(pdev->resource));
+
+    return rt_ioremap((void *)res->base, res->size);
+}
+
+rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin);
+rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp);
+
+void rt_pci_assign_irq(struct rt_pci_device *pdev);
+
+void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable);
+rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev);
+rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev);
+
+void rt_pci_irq_mask(struct rt_pci_device *pdev);
+void rt_pci_irq_unmask(struct rt_pci_device *pdev);
+
+#define RT_PCI_IRQ_F_LEGACY     RT_BIT(0)   /* Allow legacy interrupts */
+#define RT_PCI_IRQ_F_MSI        RT_BIT(1)   /* Allow MSI interrupts */
+#define RT_PCI_IRQ_F_MSIX       RT_BIT(2)   /* Allow MSI-X interrupts */
+#define RT_PCI_IRQ_F_AFFINITY   RT_BIT(3)   /* Auto-assign affinity */
+#define RT_PCI_IRQ_F_ALL_TYPES  (RT_PCI_IRQ_F_LEGACY | RT_PCI_IRQ_F_MSI | RT_PCI_IRQ_F_MSIX)
+
+#ifdef RT_PCI_MSI
+rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
+        rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)));
+void rt_pci_free_vector(struct rt_pci_device *pdev);
+
+rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev);
+rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev);
+rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
+        int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)));
+
+rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev);
+rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev);
+rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int min, int max,
+        RT_IRQ_AFFINITY_DECLARE((*affinities)));
+#else
+rt_inline rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
+        rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    return -RT_ENOSYS;
+}
+
+rt_inline void rt_pci_free_vector(struct rt_pci_device *pdev)
+{
+    return;
+}
+
+rt_inline rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
+{
+    return 0;
+}
+
+rt_inline rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
+{
+    return RT_EOK;
+}
+
+rt_inline rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
+        int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    return -RT_ENOSYS;
+}
+
+rt_inline rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
+{
+    return 0;
+}
+
+rt_inline rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
+{
+    return RT_EOK;
+}
+
+rt_inline rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int min, int max,
+        RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    return -RT_ENOSYS;
+}
+#endif /* RT_PCI_MSI */
+
+rt_inline void rt_pci_msix_entry_index_linear(struct rt_pci_msix_entry *entries,
+        rt_size_t nvectors)
+{
+    for (int i = 0; i < nvectors; ++i)
+    {
+        entries[i].index = i;
+    }
+}
+
+rt_inline rt_ssize_t rt_pci_msi_enable_range(struct rt_pci_device *pdev,
+        int min, int max)
+{
+    return rt_pci_msi_enable_range_affinity(pdev, min, max, RT_NULL);
+}
+
+rt_inline rt_err_t rt_pci_msi_enable(struct rt_pci_device *pdev)
+{
+    rt_ssize_t res = rt_pci_msi_enable_range(pdev, 1, 1);
+    return res == 1 ? res : RT_EOK;
+}
+
+rt_inline rt_ssize_t rt_pci_msix_enable_range(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int min, int max)
+{
+    return rt_pci_msix_enable_range_affinity(pdev, entries, min, max, RT_NULL);
+}
+
+rt_inline rt_ssize_t rt_pci_msix_enable(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int count)
+{
+    return rt_pci_msix_enable_range(pdev, entries, count, count);
+}
+
+rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge);
+struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
+        void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64);
+
+rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
+        struct rt_pci_device *pdev);
+
+void rt_pci_enum_device(struct rt_pci_bus *bus,
+        rt_bool_t (callback(struct rt_pci_device *, void *)), void *data);
+
+const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
+        const struct rt_pci_device_id *id);
+
+const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
+        const struct rt_pci_device_id *ids);
+
+rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv);
+rt_err_t rt_pci_device_register(struct rt_pci_device *pdev);
+
+#define RT_PCI_DRIVER_EXPORT(driver)    RT_DRIVER_EXPORT(driver, pci, BUILIN)
+
+extern struct rt_spinlock rt_pci_lock;
+
+#endif /* __PCI_H__ */

+ 179 - 0
components/drivers/include/drivers/pci_endpoint.h

@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-25     GuEe-GUI     first version
+ */
+
+#ifndef __PCI_ENDPOINT_H__
+#define __PCI_ENDPOINT_H__
+
+#include <drivers/pci.h>
+
+enum rt_pci_ep_pin
+{
+    RT_PCI_EP_PIN_UNKNOWN,
+    RT_PCI_EP_PIN_INTA,
+    RT_PCI_EP_PIN_INTB,
+    RT_PCI_EP_PIN_INTC,
+    RT_PCI_EP_PIN_INTD,
+};
+
+enum rt_pci_ep_irq
+{
+    RT_PCI_EP_IRQ_UNKNOWN,
+    RT_PCI_EP_IRQ_LEGACY,
+    RT_PCI_EP_IRQ_MSI,
+    RT_PCI_EP_IRQ_MSIX,
+};
+
+struct rt_pci_ep_header
+{
+    rt_uint16_t vendor;
+    rt_uint16_t device;
+    rt_uint8_t revision;
+    rt_uint8_t progif;
+    rt_uint8_t subclass;
+    rt_uint8_t class_code;
+    rt_uint8_t cache_line_size;
+    rt_uint16_t subsystem_vendor;
+    rt_uint16_t subsystem_device;
+
+    enum rt_pci_ep_pin intx;
+};
+
+struct rt_pci_ep_bar
+{
+    /* To PCI Bus */
+    struct rt_pci_bus_resource bus;
+    /* To CPU */
+    rt_ubase_t cpu_addr;
+};
+
+/*
+ * Type of MSI-X table, For more format detail,
+ * please read `components/drivers/include/drivers/pci_msi.h`
+ */
+struct rt_pci_ep_msix_tbl
+{
+    union
+    {
+        rt_uint64_t msg_addr;
+        struct
+        {
+            rt_uint32_t msg_addr_upper;
+            rt_uint32_t msg_addr_lower;
+        };
+    };
+    rt_uint32_t msg_data;
+    rt_uint32_t vector_ctrl;
+};
+
+struct rt_pci_ep_ops;
+
+struct rt_pci_ep
+{
+    rt_list_t list;
+    const char *name;
+
+    struct rt_ref ref;
+
+    const struct rt_device *rc_dev;
+    const struct rt_pci_ep_ops *ops;
+
+    rt_uint8_t max_functions;
+    RT_BITMAP_DECLARE(functions_map, 8);
+    rt_list_t epf_nodes;
+    struct rt_mutex lock;
+
+    void *priv;
+};
+
+struct rt_pci_epf
+{
+    rt_list_t list;
+    const char *name;
+
+    struct rt_pci_ep_header *header;
+    struct rt_pci_ep_bar bar[PCI_STD_NUM_BARS];
+
+    rt_uint8_t  msi_interrupts;
+    rt_uint16_t msix_interrupts;
+    rt_uint8_t func_no;
+
+    struct rt_pci_ep *ep;
+};
+
+struct rt_pci_ep_ops
+{
+    rt_err_t (*write_header)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            struct rt_pci_ep_header *hdr);
+
+    rt_err_t (*set_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            struct rt_pci_ep_bar *bar, int bar_idx);
+    rt_err_t (*clear_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            struct rt_pci_ep_bar *bar, int bar_idx);
+
+    rt_err_t (*map_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
+    rt_err_t (*unmap_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no, rt_ubase_t addr);
+
+    rt_err_t (*set_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            unsigned irq_nr);
+    rt_err_t (*get_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            unsigned *out_irq_nr);
+
+    rt_err_t (*set_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            unsigned irq_nr, int bar_idx, rt_off_t offset);
+    rt_err_t (*get_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            unsigned *out_irq_nr);
+
+    rt_err_t (*raise_irq)(struct rt_pci_ep *ep, rt_uint8_t func_no,
+            enum rt_pci_ep_irq type, unsigned irq);
+
+    rt_err_t (*start)(struct rt_pci_ep *ep);
+    rt_err_t (*stop)(struct rt_pci_ep *ep);
+};
+
+rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_header *hdr);
+
+rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx);
+rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx);
+
+rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
+rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t addr);
+
+rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned irq_nr);
+rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned *out_irq_nr);
+
+rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned irq_nr, int bar_idx, rt_off_t offset);
+rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned *out_irq_nr);
+
+rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        enum rt_pci_ep_irq type, unsigned irq);
+
+rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep);
+rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep);
+
+rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep);
+rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep);
+
+rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
+rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
+
+struct rt_pci_ep *rt_pci_ep_get(const char *name);
+void rt_pci_ep_put(struct rt_pci_ep *ep);
+
+#endif /* __PCI_ENDPOINT_H__ */

+ 189 - 0
components/drivers/include/drivers/pci_msi.h

@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-25     GuEe-GUI     first version
+ */
+
+#ifndef __PCI_MSI_H__
+#define __PCI_MSI_H__
+
+#include <drivers/pci.h>
+
+/*
+ * MSI Format:
+ *  T0: 32-bit Address
+ *  T1: 64-bit Address
+ *  T2: 32-bit Address with Per-Vector Masking
+ *  T3: 64-bit Address with Per-Vector Masking
+ *
+ *   31                      16  15              8 7             0
+ *  +---------------------------+-----------------+---------------+
+ *  |      Message Control      | Next Capability | Capability ID | DW0
+ *  |                           |     Pointer     |     (05h)     |
+ *  +---------------------------+-----------------+---------------+
+ *  |                    Message Address [31:0]                   | DW1
+ *  +-------------------------------------------------------------+
+ *  |                    Message Address [63:32]                  | DW2 (T1: only 64-bit)
+ *  +---------------------------+---------------------------------+
+ *  |         Reserved          |           Message Data          | DW3
+ *  +---------------------------+---------------------------------+
+ *  |                       Mask Bits                             | DW4 (T2/T3: only with Per-Vector Masking)
+ *  +-------------------------------------------------------------+
+ *  |                      Pending Bits                           | DW5 (T2/T3: only with Per-Vector Masking)
+ *  +-------------------------------------------------------------+
+ *
+ * MSI Message Control:
+ *
+ *   15                   9  8   7  6             4 3        1  0
+ *  +----------------------+---+---+---------------+----------+---+
+ *  |       Reserved       |   |   |               |          |   |
+ *  +----------------------+---+---+---------------+----------+---+
+ *                           ^   ^         ^             ^      ^
+ *                           |   |         |             |      |
+ *                           |   |         |             |      +---- MSI Enable (RW)
+ *                           |   |         |             +----------- Multiple Message Capable (RO, log2n, [n <= 5])
+ *                           |   |         +------------------------- Multiple Message Enable (RW, log2n, [n <= 5])
+ *                           |   +----------------------------------- 64-bit Address Capable
+ *                           +--------------------------------------- Per-Vector Masking Capable
+ */
+
+struct rt_pci_msi_conf
+{
+    rt_uint32_t mask;
+    rt_uint8_t mask_pos;
+    int default_irq;
+
+    struct
+    {
+        rt_uint8_t is_masking:1;
+        rt_uint8_t is_64bit:1;
+        rt_uint8_t multi_msg_max:3; /* log2 num of messages allocated */
+        rt_uint8_t multi_msg_use:3; /* log2 num of messages supported */
+    } cap;
+};
+
+/*
+ * MSI-X Format:
+ *
+ *   31                      16  15              8 7             0
+ *  +---------------------------+-----------------+---------------+
+ *  |      Message Control      | Next Capability | Capability ID | DW0
+ *  |                           |     Pointer     |     (11h)     |
+ *  +---------------------------+-----------------+---+-----------+
+ *  |                    MSI-X Table Offset           | Table BIR | DW1 (BIR: BAR Index Register)
+ *  +-------------------------------------------------+-----------+               |
+ *  |             Pending Bit Array (PBA) Offset      |  PBA BIR  | DW2 --------+ |
+ *  +-------------------------------------------------+-----------+             | |
+ *                                                                              | |
+ * MSI-X Message Control:                                                       | |
+ *                                                                              | |
+ *   15 14 13      11 10                                         0              | |
+ *  +---+---+----------+------------------------------------------+             | |
+ *  |   |   | Reserved |         Table Size in N-1 (RO)           |             | |
+ *  +---+---+----------+------------------------------------------+             | |
+ *    ^   ^                                                                     | |
+ *    |   |                                                                     | |
+ *    |   +---- Function Mask (RW)                                              | |
+ *    +-------- MSI-X Enable (RW)                                               | |
+ *                                                                              | |
+ * MSI-X Table (BAR[Table BIR] + MSI-X Table Offset):                           | |
+ *                                                                              | |
+ *          DW3            DW2             DW1             DW0                  | |
+ *  +----------------+--------------+---------------+---------------+ <---------|-+
+ *  | Vector Control | Message Data | Upper Address | Lower Address | Entry 0   |
+ *  +----------------+--------------+---------------+---------------+           |
+ *  | Vector Control | Message Data | Upper Address | Lower Address | Entry 1   |
+ *  +----------------+--------------+---------------+---------------+           |
+ *  |     ......     |    ......    |    ......     |    ......     |           |
+ *  +----------------+--------------+---------------+---------------+           |
+ *  | Vector Control | Message Data | Upper Address | Lower Address | Entry N-1 |
+ *  +----------------+--------------+---------------+---------------+           |
+ *                  ^                                                           |
+ *                  |                                                           |
+ *                  +---- Bit 0 is vector Mask Bit (R/W)                        |
+ *                                                                              |
+ * MSI-X Pending Bit Array (BAR[PBA BIR] + Pending Bit Array Offset):           |
+ *                                                                              |
+ *          DW1            DW0                                                  |
+ *  +-------------------------------+ <-----------------------------------------+
+ *  |     Pending Bits 0 - 63       | QW 0
+ *  +-------------------------------+
+ *  |    Pending Bits 64 - 127      | QW 1
+ *  +-------------------------------+
+ *  |            ......             |
+ *  +-------------------------------+
+ *  |         Pending Bits          | QW (N-1)/64
+ *  +-------------------------------+
+ */
+
+struct rt_pci_msix_conf
+{
+    int index;
+
+    rt_uint32_t msg_ctrl;
+    void *table_base;
+};
+
+struct rt_pci_msi_msg
+{
+    rt_uint32_t address_lo;
+    rt_uint32_t address_hi;
+    rt_uint32_t data;
+};
+
+struct rt_pci_msi_desc
+{
+    rt_list_t list;
+
+    int irq;
+    rt_size_t vector_used;
+    rt_size_t vector_count;
+
+    union
+    {
+        /* For MSI-X */
+        rt_bitmap_t *affinity;
+        /* For MSI */
+        rt_bitmap_t **affinities;
+    };
+
+    struct rt_pci_device *pdev;
+    struct rt_pci_msi_msg msg;
+
+    void *write_msi_msg_data;
+    void (*write_msi_msg)(struct rt_pci_msi_desc *, void *);
+
+    rt_bool_t is_msix;
+    union
+    {
+        struct rt_pci_msi_conf msi;
+        struct rt_pci_msix_conf msix;
+    };
+
+    void *priv;
+};
+
+#define rt_pci_msi_first_desc(pdev) \
+    (rt_list_isempty(&(pdev)->msi_desc_nodes) ? RT_NULL : \
+        rt_list_first_entry(&(pdev)->msi_desc_nodes, struct rt_pci_msi_desc, list))
+
+#define rt_pci_msi_for_each_desc(pdev, desc) \
+    rt_list_for_each_entry(desc, &(pdev)->msi_desc_nodes, list)
+
+#define rt_pci_msix_table_size(flags) ((flags & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
+
+rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type);
+
+void rt_pci_msi_shutdown(struct rt_pci_device *pdev);
+void rt_pci_msix_shutdown(struct rt_pci_device *pdev);
+void rt_pci_msi_free_irqs(struct rt_pci_device *pdev);
+void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg);
+
+void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq);
+void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq);
+
+#endif /* __PCI_MSI_H__ */

+ 49 - 0
components/drivers/pci/Kconfig

@@ -0,0 +1,49 @@
+menuconfig RT_USING_PCI
+    bool "Using Peripheral Component Interconnect Express (PCIe/PCI)"
+    depends on RT_USING_DM
+    depends on RT_USING_PIC
+    select RT_USING_ADT
+    select RT_USING_ADT_BITMAP
+    default n
+
+config RT_PCI_MSI
+    bool "PCI MSI/MSI-X"
+    depends on RT_USING_PCI
+    default y
+
+config RT_PCI_ENDPOINT
+    bool "PCI Endpoint"
+    depends on RT_USING_PCI
+    select RT_USING_ADT_REF
+    default n
+
+config RT_PCI_SYS_64BIT
+    bool "PCI System 64bit"
+    depends on RT_USING_PCI
+    depends on ARCH_CPU_64BIT
+    default y
+
+config RT_PCI_CACHE_LINE_SIZE
+    int "PCI Cache line size"
+    depends on RT_USING_PCI
+    default 8 if ARCH_CPU_64BIT
+    default 4
+
+config RT_PCI_LOCKLESS
+    bool "PCI Lock less in options"
+    depends on RT_USING_PCI
+    default n
+
+if RT_USING_PCI
+
+comment "PCI Device Drivers"
+
+config RT_PCI_ECAM
+    bool "PCIe ECAM"
+    depends on RT_USING_PCI
+    default y
+    help
+        PCIe Express Enhanced Configuration Access Mechanism
+
+rsource "host/Kconfig"
+endif

+ 28 - 0
components/drivers/pci/SConscript

@@ -0,0 +1,28 @@
+from building import *
+
+objs = []
+
+if not GetDepend(['RT_USING_PCI']):
+    Return('objs')
+
+cwd     = GetCurrentDir()
+list    = os.listdir(cwd)
+CPPPATH = [cwd + '/../include']
+
+src = ['access.c', 'host-bridge.c', 'irq.c', 'pci.c', 'pme.c', 'probe.c']
+
+if GetDepend(['RT_USING_OFW']):
+    src += ['ofw.c']
+
+if GetDepend(['RT_PCI_ECAM']):
+    src += ['ecam.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+for d in list:
+    path = os.path.join(cwd, d)
+    if os.path.isfile(os.path.join(path, 'SConscript')):
+        objs = objs + SConscript(os.path.join(d, 'SConscript'))
+objs = objs + group
+
+Return('objs')

+ 159 - 0
components/drivers/pci/access.c

@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+
+#include <drivers/pci.h>
+
+struct rt_spinlock rt_pci_lock = { 0 };
+
+#ifdef RT_PCI_LOCKLESS
+#define pci_lock_config(l)      do { (void)(l); } while (0)
+#define pci_unlock_config(l)    do { (void)(l); } while (0)
+#else
+#define pci_lock_config(l)      l = rt_spin_lock_irqsave(&rt_pci_lock)
+#define pci_unlock_config(l)    rt_spin_unlock_irqrestore(&rt_pci_lock, l)
+#endif
+
+#define PCI_OPS_READ(name, type) \
+rt_err_t rt_pci_bus_read_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type *value) \
+{                                                               \
+    rt_err_t err;                                               \
+    rt_ubase_t level;                                           \
+    rt_uint32_t data = 0;                                       \
+    pci_lock_config(level);                                     \
+    err = bus->ops->read(bus, devfn, reg, sizeof(type), &data); \
+    *value = err ? (type)(~0) : (type)data;                     \
+    pci_unlock_config(level);                                   \
+    return err;                                                 \
+}
+
+#define PCI_OPS_WRITE(name, type) \
+rt_err_t rt_pci_bus_write_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type value) \
+{                                                                \
+    rt_err_t err;                                                \
+    rt_ubase_t level;                                            \
+    pci_lock_config(level);                                      \
+    err = bus->ops->write(bus, devfn, reg, sizeof(type), value); \
+    pci_unlock_config(level);                                    \
+    return err;                                                  \
+}
+
+#define PCI_OPS(name, type)  \
+    PCI_OPS_READ(name, type) \
+    PCI_OPS_WRITE(name, type)
+
+PCI_OPS(u8, rt_uint8_t)
+PCI_OPS(u16, rt_uint16_t)
+PCI_OPS(u32, rt_uint32_t)
+
+#undef PCI_OP_WRITE
+#undef PCI_OP_READ
+#undef PCI_OPS
+
+rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
+{
+    void *base;
+
+    if ((base = bus->ops->map(bus, devfn, reg)))
+    {
+        if (width == 1)
+        {
+            *value = HWREG8(base);
+        }
+        else if (width == 2)
+        {
+            *value = HWREG16(base);
+        }
+        else
+        {
+            *value = HWREG32(base);
+        }
+
+        return RT_EOK;
+    }
+
+    return -RT_ERROR;
+}
+
+rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
+{
+    void *base;
+
+    if ((base = bus->ops->map(bus, devfn, reg)))
+    {
+        if (width == 1)
+        {
+            HWREG8(base) = value;
+        }
+        else if (width == 2)
+        {
+            HWREG16(base) = value;
+        }
+        else
+        {
+            HWREG32(base) = value;
+        }
+
+        return RT_EOK;
+    }
+
+    return -RT_ERROR;
+}
+
+rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
+{
+    void *base;
+
+    if ((base = bus->ops->map(bus, devfn, reg)))
+    {
+        *value = HWREG32(base);
+
+        if (width <= 2)
+        {
+            *value = (*value >> (8 * (reg & 3))) & ((1 << (width * 8)) - 1);
+        }
+
+        return RT_EOK;
+    }
+
+    return -RT_ERROR;
+}
+
+rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
+{
+    void *base;
+
+    if ((base = bus->ops->map(bus, devfn, reg & ~0x3)))
+    {
+        if (width == 4)
+        {
+            HWREG32(base) = value;
+        }
+        else
+        {
+            rt_uint32_t mask, tmp;
+
+            mask = ~(((1 << (width * 8)) - 1) << ((reg & 0x3) * 8));
+            tmp = HWREG32(base) & mask;
+            tmp |= value << ((reg & 0x3) * 8);
+            HWREG32(base) = tmp;
+        }
+
+        return RT_EOK;
+    }
+
+    return -RT_ERROR;
+}

+ 72 - 0
components/drivers/pci/ecam.c

@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+
+#define DBG_TAG "pci.ecam"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include "ecam.h"
+
+struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
+        const struct pci_ecam_ops *ops)
+{
+    struct pci_ecam_config_window *conf_win = rt_calloc(1, sizeof(*conf_win));
+
+    if (!conf_win)
+    {
+        return RT_NULL;
+    }
+
+    conf_win->bus_range = host_bridge->bus_range;
+    conf_win->bus_shift = ops->bus_shift;
+    conf_win->ops = ops;
+
+    host_bridge->ops = (const struct rt_pci_ops *)&ops->pci_ops;
+
+    return conf_win;
+}
+
+void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
+{
+    struct pci_ecam_config_window *conf_win = bus->sysdata;
+    const struct pci_ecam_ops *eops = conf_win->ops;
+    void *win = conf_win->win, *map;
+    rt_uint32_t busn = bus->number, bus_shift = eops->bus_shift, devfn_shift = bus_shift - 8;
+
+    busn -= conf_win->bus_range[0];
+
+    if (bus_shift)
+    {
+        rt_uint32_t bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift;
+        rt_uint32_t devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift;
+
+        where &= PCIE_ECAM_REG_MASK;
+        map = win + (bus_offset | devfn_offset | where);
+    }
+    else
+    {
+        map = win + PCIE_ECAM_OFFSET(busn, devfn, where);
+    }
+
+    return map;
+}
+
+const struct pci_ecam_ops pci_generic_ecam_ops =
+{
+    .pci_ops =
+    {
+        .map = pci_ecam_map,
+        .read = rt_pci_bus_read_config_uxx,
+        .write = rt_pci_bus_write_config_uxx,
+    }
+};

+ 69 - 0
components/drivers/pci/ecam.h

@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#ifndef __RT_PCI_ECAM_H__
+#define __RT_PCI_ECAM_H__
+
+#include <drivers/pci.h>
+#include <drivers/ofw.h>
+#include <drivers/ofw_io.h>
+#include <drivers/platform.h>
+
+/*
+ * Memory address shift values for the byte-level address that
+ * can be used when accessing the PCI Express Configuration Space.
+ */
+
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * See PCI Express Base Specification, Revision 5.0, Version 1.0,
+ * Section 7.2.2, Table 7-1, p. 677.
+ */
+#define PCIE_ECAM_BUS_SHIFT     20 /* Bus number */
+#define PCIE_ECAM_DEVFN_SHIFT   12 /* Device and Function number */
+
+#define PCIE_ECAM_BUS_MASK      0xff
+#define PCIE_ECAM_DEVFN_MASK    0xff
+#define PCIE_ECAM_REG_MASK      0xfff /* Limit offset to a maximum of 4K */
+
+#define PCIE_ECAM_BUS(x)        (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
+#define PCIE_ECAM_DEVFN(x)      (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
+#define PCIE_ECAM_REG(x)        ((x) & PCIE_ECAM_REG_MASK)
+
+#define PCIE_ECAM_OFFSET(bus, devfn, where) \
+        (PCIE_ECAM_BUS(bus) |  PCIE_ECAM_DEVFN(devfn) | PCIE_ECAM_REG(where))
+
+struct pci_ecam_ops
+{
+    rt_uint32_t bus_shift;
+    const struct rt_pci_ops pci_ops;
+};
+
+struct pci_ecam_config_window
+{
+    rt_uint32_t *bus_range;
+    rt_uint32_t bus_shift;
+
+    void *win;
+    void *priv;
+    const struct pci_ecam_ops *ops;
+};
+
+/* Default ECAM ops */
+extern const struct pci_ecam_ops pci_generic_ecam_ops;
+
+void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where);
+struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
+        const struct pci_ecam_ops *ops);
+rt_err_t pci_host_common_probe(struct rt_platform_device *pdev);
+rt_err_t pci_host_common_remove(struct rt_platform_device *pdev);
+
+#endif /* __RT_PCI_ECAM_H__ */

+ 15 - 0
components/drivers/pci/endpoint/SConscript

@@ -0,0 +1,15 @@
+from building import *
+
+group = []
+
+if not GetDepend(['RT_PCI_ENDPOINT']):
+    Return('group')
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd + '/../../include']
+
+src = ['endpoint.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 504 - 0
components/drivers/pci/endpoint/endpoint.c

@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-08-25     GuEe-GUI     first version
+ */
+
+#include <drivers/pci_endpoint.h>
+
+#define DBG_TAG "pci.ep"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+static rt_list_t _ep_nodes = RT_LIST_OBJECT_INIT(_ep_nodes);
+static struct rt_spinlock _ep_lock = { 0 };
+
+rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_header *hdr)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && hdr && func_no < ep->max_functions)
+    {
+        if (ep->ops->write_header)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->write_header(ep, func_no, hdr);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx)
+{
+    rt_err_t err = RT_EOK;
+
+    if (ep && ep->ops && func_no < ep->max_functions && bar &&
+        bar_idx < PCI_STD_NUM_BARS)
+    {
+        struct rt_pci_bus_resource *bus_bar = &bar->bus;
+
+        if (bar_idx == (PCI_STD_NUM_BARS - 1) &&
+            (bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
+        {
+            err = -RT_EINVAL;
+            LOG_E("%s: Set BAR[%d] can't not 64bit", ep->name, bar_idx);
+        }
+
+        if (rt_upper_32_bits(bus_bar->size) &&
+            !(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
+        {
+            err = -RT_EINVAL;
+            LOG_E("%s: Set BAR[%d] size is no support 64bit", ep->name, bar_idx);
+        }
+
+        if ((bus_bar->flags & PCIM_BAR_SPACE_IO) &&
+            (bus_bar->flags & PCIM_BAR_IO_MASK))
+        {
+            err = -RT_EINVAL;
+            LOG_E("%s: Set BAR[%d] io flags is invalid", ep->name, bar_idx);
+        }
+
+        if (!err)
+        {
+            if (ep->ops->set_bar)
+            {
+                rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+                err = ep->ops->set_bar(ep, func_no, bar, bar_idx);
+                rt_mutex_release(&ep->lock);
+            }
+            else
+            {
+                err = -RT_ENOSYS;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        struct rt_pci_ep_bar *bar, int bar_idx)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions && bar &&
+        bar_idx < PCI_STD_NUM_BARS)
+    {
+        if (ep->ops->clear_bar)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->clear_bar(ep, func_no, bar, bar_idx);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions && size)
+    {
+        if (ep->ops->map_addr)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->map_addr(ep, func_no, addr, pci_addr, size);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        rt_ubase_t addr)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions)
+    {
+        if (ep->ops->unmap_addr)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->unmap_addr(ep, func_no, addr);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned irq_nr)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions)
+    {
+        if (ep->ops->set_msix)
+        {
+            err = -RT_EINVAL;
+
+            for (int log2 = 0; log2 < 5; ++log2)
+            {
+                if (irq_nr <= (1 << log2))
+                {
+                    rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+                    err = ep->ops->set_msi(ep, func_no, log2);
+                    rt_mutex_release(&ep->lock);
+                }
+            }
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned *out_irq_nr)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
+    {
+        if (ep->ops->get_msi)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->get_msi(ep, func_no, out_irq_nr);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned irq_nr, int bar_idx, rt_off_t offset)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions && irq_nr < 2048 &&
+        bar_idx < PCI_STD_NUM_BARS)
+    {
+        if (ep->ops->set_msix)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->set_msix(ep, func_no, irq_nr, bar_idx, offset);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        unsigned *out_irq_nr)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
+    {
+        if (ep->ops->get_msix)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->get_msix(ep, func_no, out_irq_nr);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
+        enum rt_pci_ep_irq type, unsigned irq)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops && func_no < ep->max_functions)
+    {
+        if (ep->ops->raise_irq)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->raise_irq(ep, func_no, type, irq);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops)
+    {
+        if (ep->ops->start)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->start(ep);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep)
+{
+    rt_err_t err;
+
+    if (ep && ep->ops)
+    {
+        if (ep->ops->stop)
+        {
+            rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+            err = ep->ops->stop(ep);
+            rt_mutex_release(&ep->lock);
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep)
+{
+    rt_ubase_t level;
+
+    if (!ep || !ep->ops)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_list_init(&ep->list);
+    rt_ref_init(&ep->ref);
+
+    rt_list_init(&ep->epf_nodes);
+    rt_mutex_init(&ep->lock, ep->name, RT_IPC_FLAG_PRIO);
+
+    level = rt_spin_lock_irqsave(&_ep_lock);
+    rt_list_insert_before(&_ep_nodes, &ep->list);
+    rt_spin_unlock_irqrestore(&_ep_lock, level);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep)
+{
+    rt_ubase_t level;
+    rt_err_t err = RT_EOK;
+
+    if (!ep)
+    {
+        return -RT_EINVAL;
+    }
+
+    level = rt_spin_lock_irqsave(&_ep_lock);
+
+    if (rt_ref_read(&ep->ref) > 1)
+    {
+        err = -RT_EBUSY;
+    }
+    else
+    {
+        rt_list_remove(&ep->list);
+        rt_mutex_detach(&ep->lock);
+    }
+
+    rt_spin_unlock_irqrestore(&_ep_lock, level);
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
+{
+    rt_err_t err = RT_EOK;
+
+    if (!ep || !epf || !epf->name)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (epf->func_no > ep->max_functions - 1)
+    {
+        LOG_E("%s function No(%d) > %s max function No(%d - 1)",
+                epf->name, epf->func_no, ep->name, ep->max_functions);
+
+        return -RT_EINVAL;
+    }
+
+    epf->ep = ep;
+    rt_list_init(&epf->list);
+
+    rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+
+    if (!rt_bitmap_test_bit(ep->functions_map, epf->func_no))
+    {
+        rt_bitmap_set_bit(ep->functions_map, epf->func_no);
+        rt_list_insert_before(&ep->epf_nodes, &epf->list);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+        LOG_E("%s function No(%d) is repeating", epf->name, epf->func_no);
+    }
+
+    rt_mutex_release(&ep->lock);
+
+    return err;
+}
+
+rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
+{
+    if (!ep || !epf)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
+    rt_bitmap_clear_bit(ep->functions_map, epf->func_no);
+    rt_list_remove(&epf->list);
+    rt_mutex_release(&ep->lock);
+
+    return RT_EOK;
+}
+
+struct rt_pci_ep *rt_pci_ep_get(const char *name)
+{
+    rt_ubase_t level;
+    struct rt_pci_ep *ep = RT_NULL, *ep_tmp;
+
+    level = rt_spin_lock_irqsave(&_ep_lock);
+
+    rt_list_for_each_entry(ep_tmp, &_ep_nodes, list)
+    {
+        if (!name || !rt_strcmp(ep_tmp->name, name))
+        {
+            ep = ep_tmp;
+            rt_ref_get(&ep->ref);
+            break;
+        }
+    }
+
+    rt_spin_unlock_irqrestore(&_ep_lock, level);
+
+    return ep;
+}
+
+static void pci_ep_release(struct rt_ref *ref)
+{
+    struct rt_pci_ep *ep = rt_container_of(ref, struct rt_pci_ep, ref);
+
+    rt_pci_ep_unregister(ep);
+}
+
+void rt_pci_ep_put(struct rt_pci_ep *ep)
+{
+    if (ep)
+    {
+        rt_ref_put(&ep->ref, &pci_ep_release);
+    }
+}

+ 129 - 0
components/drivers/pci/host-bridge.c

@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+
+#include <drivers/pm.h>
+#include <drivers/pci.h>
+
+#ifdef RT_USING_PM
+struct host_bridge_pm_status
+{
+    rt_uint8_t mode;
+    rt_bool_t enable;
+};
+
+static const enum rt_pci_power system_pci_pm_mode[] =
+{
+    [PM_SLEEP_MODE_NONE]        = RT_PCI_D0,
+    [PM_SLEEP_MODE_IDLE]        = RT_PCI_D3HOT,
+    [PM_SLEEP_MODE_LIGHT]       = RT_PCI_D1,
+    [PM_SLEEP_MODE_DEEP]        = RT_PCI_D1,
+    [PM_SLEEP_MODE_STANDBY]     = RT_PCI_D2,
+    [PM_SLEEP_MODE_SHUTDOWN]    = RT_PCI_D3COLD,
+};
+
+static rt_bool_t pci_device_pm_ops(struct rt_pci_device *pdev, void *data)
+{
+    struct host_bridge_pm_status *status = data;
+
+    rt_pci_enable_wake(pdev, system_pci_pm_mode[status->mode], status->enable);
+
+    /* To find all devices, always return false */
+    return RT_FALSE;
+}
+
+static rt_err_t host_bridge_pm_suspend(const struct rt_device *device, rt_uint8_t mode)
+{
+    struct host_bridge_pm_status status;
+    struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
+
+    status.mode = mode;
+    status.enable = RT_FALSE;
+    rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
+
+    return RT_EOK;
+}
+
+static void host_bridge_pm_resume(const struct rt_device *device, rt_uint8_t mode)
+{
+    struct host_bridge_pm_status status;
+    struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
+
+    status.mode = mode;
+    status.enable = RT_TRUE;
+    rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
+}
+
+static const struct rt_device_pm_ops host_bridge_pm_ops =
+{
+    .suspend = host_bridge_pm_suspend,
+    .resume = host_bridge_pm_resume,
+};
+#endif /* RT_USING_PM */
+
+static void host_bridge_free(struct rt_pci_device *pdev)
+{
+#ifdef RT_USING_PM
+    rt_pm_device_unregister(&pdev->parent);
+#endif
+}
+
+static rt_err_t host_bridge_probe(struct rt_pci_device *pdev)
+{
+    rt_err_t err = RT_EOK;
+
+    rt_pci_set_master(pdev);
+
+#ifdef RT_USING_PM
+    rt_pm_device_register(&pdev->parent, &host_bridge_pm_ops);
+#endif
+
+    return err;
+}
+
+static rt_err_t host_bridge_remove(struct rt_pci_device *pdev)
+{
+    host_bridge_free(pdev);
+    rt_pci_clear_master(pdev);
+
+    return RT_EOK;
+}
+
+static rt_err_t host_bridge_shutdown(struct rt_pci_device *pdev)
+{
+    host_bridge_free(pdev);
+
+    return RT_EOK;
+}
+
+static const struct rt_pci_device_id host_bridge_pci_ids[] =
+{
+    /* PCI host bridges */
+    { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT, 0x0008) },
+    /* Any PCI-Express port */
+    { RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_NORMAL, ~0) },
+    /* PCI-to-PCI bridge */
+    { RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
+    /* Any Root Complex Event Collector */
+    { RT_PCI_DEVICE_CLASS(((PCIS_SYSTEM_RCEC << 8) | 0x00), ~0) },
+    { /* sentinel */ }
+};
+
+static struct rt_pci_driver host_bridge_driver =
+{
+    .name = "host-bridge",
+
+    .ids = host_bridge_pci_ids,
+    .probe = host_bridge_probe,
+    .remove = host_bridge_remove,
+    .shutdown = host_bridge_shutdown,
+};
+RT_PCI_DRIVER_EXPORT(host_bridge_driver);

+ 10 - 0
components/drivers/pci/host/Kconfig

@@ -0,0 +1,10 @@
+config RT_PCI_HOST_COMMON
+    bool "Common PCI host controller"
+    depends on RT_PCI_ECAM
+    default y
+
+config RT_PCI_HOST_GENERIC
+    bool "Generic PCI host controller"
+    depends on RT_PCI_ECAM
+    select RT_PCI_HOST_COMMON
+    default y

+ 25 - 0
components/drivers/pci/host/SConscript

@@ -0,0 +1,25 @@
+from building import *
+
+objs = []
+
+cwd = GetCurrentDir()
+list = os.listdir(cwd)
+CPPPATH = [cwd + '/../../include']
+
+src = []
+
+if GetDepend(['RT_PCI_HOST_COMMON']):
+    src += ['pci-host-common.c']
+
+if GetDepend(['RT_PCI_HOST_GENERIC']):
+    src += ['pci-host-generic.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+for d in list:
+    path = os.path.join(cwd, d)
+    if os.path.isfile(os.path.join(path, 'SConscript')):
+        objs = objs + SConscript(os.path.join(d, 'SConscript'))
+objs = objs + group
+
+Return('objs')

+ 85 - 0
components/drivers/pci/host/pci-host-common.c

@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+
+#include "../ecam.h"
+
+rt_err_t pci_host_common_probe(struct rt_platform_device *pdev)
+{
+    void *base;
+    rt_err_t err;
+    struct rt_device *dev = &pdev->parent;
+    struct pci_ecam_config_window *conf_win;
+    struct rt_pci_host_bridge *host_bridge = rt_pci_host_bridge_alloc(0);
+
+    if (!host_bridge)
+    {
+        return -RT_ENOMEM;
+    }
+
+    if (!(base = rt_dm_dev_iomap(dev, 0)))
+    {
+        err = -RT_EIO;
+        goto _fail;
+    }
+
+    host_bridge->parent.ofw_node = dev->ofw_node;
+
+    if ((err = rt_pci_host_bridge_init(host_bridge)))
+    {
+        goto _fail;
+    }
+
+    host_bridge->sysdata = conf_win = pci_ecam_create(host_bridge,
+            (const struct pci_ecam_ops *)pdev->id->data);
+
+    if (!conf_win)
+    {
+        err = -RT_ENOMEM;
+        goto _fail;
+    }
+
+    conf_win->win = base;
+    conf_win->priv = host_bridge;
+
+    if ((err = rt_pci_host_bridge_probe(host_bridge)))
+    {
+        goto _fail;
+    }
+
+    dev->user_data = host_bridge;
+
+    return RT_EOK;
+
+_fail:
+    if (base)
+    {
+        rt_iounmap(base);
+    }
+    rt_pci_host_bridge_free(host_bridge);
+
+    return err;
+}
+
+rt_err_t pci_host_common_remove(struct rt_platform_device *pdev)
+{
+    struct pci_ecam_config_window *conf_win;
+    struct rt_pci_host_bridge *host_bridge = pdev->parent.user_data;
+
+    rt_pci_host_bridge_remove(host_bridge);
+
+    conf_win = host_bridge->sysdata;
+
+    rt_iounmap(conf_win->win);
+    rt_pci_host_bridge_free(host_bridge);
+
+    return RT_EOK;
+}

+ 66 - 0
components/drivers/pci/host/pci-host-generic.c

@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+
+#include "../ecam.h"
+
+static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops =
+{
+    .bus_shift = 16,
+    .pci_ops =
+    {
+        .map = pci_ecam_map,
+        .read = rt_pci_bus_read_config_uxx,
+        .write = rt_pci_bus_write_config_uxx,
+    }
+};
+
+static void *pci_dw_ecam_map_bus(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
+{
+    struct pci_ecam_config_window *conf_win = bus->sysdata;
+
+    if (bus->number == conf_win->bus_range[0] && RT_PCI_SLOT(devfn) > 0)
+    {
+        return RT_NULL;
+    }
+
+    return pci_ecam_map(bus, devfn, where);
+}
+
+static const struct pci_ecam_ops pci_dw_ecam_bus_ops =
+{
+    .pci_ops =
+    {
+        .map = pci_dw_ecam_map_bus,
+        .read = rt_pci_bus_read_config_uxx,
+        .write = rt_pci_bus_write_config_uxx,
+    }
+};
+
+static const struct rt_ofw_node_id gen_pci_ofw_ids[] =
+{
+    { .compatible = "pci-host-cam-generic", .data = &gen_pci_cfg_cam_bus_ops },
+    { .compatible = "pci-host-ecam-generic", .data = &pci_generic_ecam_ops },
+    { .compatible = "marvell,armada8k-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
+    { .compatible = "socionext,synquacer-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
+    { .compatible = "snps,dw-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
+    { /* sentinel */ }
+};
+
+static struct rt_platform_driver gen_pci_driver =
+{
+    .name = "pci-host-generic",
+    .ids = gen_pci_ofw_ids,
+
+    .probe = pci_host_common_probe,
+    .remove = pci_host_common_remove,
+};
+RT_PLATFORM_DRIVER_EXPORT(gen_pci_driver);

+ 60 - 0
components/drivers/pci/irq.c

@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-07     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+
+#define DBG_TAG "pci.irq"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include <drivers/pci.h>
+
+void rt_pci_assign_irq(struct rt_pci_device *pdev)
+{
+    int irq = 0;
+    rt_uint8_t pin, slot = -1;
+    struct rt_pci_host_bridge *host_bridge = rt_pci_find_host_bridge(pdev->bus);
+
+    if (!host_bridge->irq_map)
+    {
+        LOG_D("PCI-Device<%s> runtime IRQ mapping not provided by platform",
+                rt_dm_dev_get_name(&pdev->parent));
+
+        return;
+    }
+
+    /* Must try the swizzle when interrupt line passes through a P2P bridge */
+    rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin);
+
+    if (pin > RT_PCI_INTX_PIN_MAX)
+    {
+        pin = 1;
+    }
+
+    if (pin)
+    {
+        if (host_bridge->irq_slot)
+        {
+            slot = host_bridge->irq_slot(pdev, &pin);
+        }
+
+        /* Map IRQ */
+        if ((irq = host_bridge->irq_map(pdev, slot, pin)) == -1)
+        {
+            irq = 0;
+        }
+    }
+    pdev->irq = irq;
+
+    LOG_D("PCI-Device<%s> assign IRQ: got %d", rt_dm_dev_get_name(&pdev->parent), pdev->irq);
+
+    /* Save IRQ */
+    rt_pci_write_config_u8(pdev, PCIR_INTLINE, irq);
+}

+ 15 - 0
components/drivers/pci/msi/SConscript

@@ -0,0 +1,15 @@
+from building import *
+
+group = []
+
+if not GetDepend(['RT_PCI_MSI']):
+    Return('group')
+
+cwd = GetCurrentDir()
+CPPPATH = [cwd + '/../../include']
+
+src = ['device.c', 'irq.c', 'msi.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 46 - 0
components/drivers/pci/msi/device.c

@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <drivers/pci.h>
+
+void rt_pci_msi_init(struct rt_pci_device *pdev)
+{
+    if (pdev && (pdev->msi_cap = rt_pci_find_capability(pdev, PCIY_MSI)))
+    {
+        rt_uint16_t ctrl;
+
+        rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &ctrl);
+
+        if (ctrl & PCIM_MSICTRL_MSI_ENABLE)
+        {
+            rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, ctrl & ~PCIM_MSICTRL_MSI_ENABLE);
+        }
+
+        if (!(ctrl & PCIM_MSICTRL_64BIT))
+        {
+            pdev->no_64bit_msi = RT_TRUE;
+        }
+    }
+}
+
+void rt_pci_msix_init(struct rt_pci_device *pdev)
+{
+    if (pdev && (pdev->msix_cap = rt_pci_find_capability(pdev, PCIY_MSIX)))
+    {
+        rt_uint16_t ctrl;
+
+        rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &ctrl);
+
+        if (ctrl & PCIM_MSIXCTRL_MSIX_ENABLE)
+        {
+            rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE);
+        }
+    }
+}

+ 146 - 0
components/drivers/pci/msi/irq.c

@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <drivers/pci_msi.h>
+
+#define DBG_TAG "pci.msi.irq"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+static struct rt_spinlock msi_irq_map_lock = {};
+static RT_BITMAP_DECLARE(msi_irq_map, MAX_HANDLERS) = {};
+
+rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type)
+{
+    int irq, index = 0, irq_nr = 0;
+    rt_err_t err = RT_EOK;
+    struct rt_pic_irq *pirq;
+    struct rt_pic *msi_pic;
+    struct rt_pci_msi_desc *desc;
+
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    msi_pic = pdev->msi_pic;
+
+    if (type == PCIY_MSI)
+    {
+        int last_irq = -1;
+        rt_size_t irq_nr;
+
+        desc = rt_pci_msi_first_desc(pdev);
+        irq_nr = 1 << desc->msi.cap.multi_msg_use;
+
+        rt_hw_spin_lock(&msi_irq_map_lock.lock);
+
+    _retry:
+        for (int i = 0; i < irq_nr; ++i)
+        {
+            if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
+            {
+                err = irq;
+
+                LOG_E("Setup %s[%d] IRQ error = %s", "MSI", i, rt_strerror(err));
+
+                break;
+            }
+
+            if (last_irq >= 0 && last_irq + 1 != irq)
+            {
+                for (int idx = 0; idx < i; ++i, --last_irq)
+                {
+                    rt_bitmap_set_bit(msi_irq_map, last_irq);
+                }
+
+                last_irq = irq;
+                goto _retry;
+            }
+
+            last_irq = irq;
+        }
+
+        if (!err)
+        {
+            /* Get the first irq */
+            desc->irq = irq - irq_nr;
+        }
+
+        rt_bitmap_for_each_set_bit(msi_irq_map, irq, MAX_HANDLERS)
+        {
+            msi_pic->ops->irq_free_msi(msi_pic, irq);
+
+            /* Free bit so the next user doesn't need to bzero */
+            rt_bitmap_clear_bit(msi_irq_map, irq);
+        }
+
+        rt_hw_spin_unlock(&msi_irq_map_lock.lock);
+
+        if (!err)
+        {
+            for (int idx = 0; idx < nvec; ++idx)
+            {
+                pirq = rt_pic_find_pirq(msi_pic, irq + idx);
+                pirq->msi_desc = desc;
+
+                msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
+
+                rt_pci_msi_write_msg(desc, &desc->msg);
+            }
+        }
+    }
+    else if (type == PCIY_MSIX)
+    {
+        rt_pci_msi_for_each_desc(pdev, desc)
+        {
+            if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
+            {
+                err = irq;
+
+                LOG_E("Setup %s[%d] IRQ error = %s", "MSI-X",
+                        desc->msix.index, rt_strerror(err));
+
+                break;
+            }
+
+            desc->irq = irq;
+            pirq = rt_pic_find_pirq(msi_pic, irq);
+            pirq->msi_desc = desc;
+
+            msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
+
+            rt_pci_msi_write_msg(desc, &desc->msg);
+
+            ++irq_nr;
+        }
+
+        if (err)
+        {
+            rt_pci_msi_for_each_desc(pdev, desc)
+            {
+                if (index >= irq_nr)
+                {
+                    break;
+                }
+
+                msi_pic->ops->irq_free_msi(msi_pic, desc->irq);
+
+                ++index;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}

+ 949 - 0
components/drivers/pci/msi/msi.c

@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-07     GuEe-GUI     first version
+ */
+
+#include <drivers/pci_msi.h>
+#include <drivers/core/numa.h>
+
+#define DBG_TAG "pci.msi"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+/* PCI has 2048 max IRQs in MSI-X */
+static RT_IRQ_AFFINITY_DECLARE(msi_affinity_default[2048]) rt_section(".bss.noclean.pci.msi");
+
+rt_inline void spin_lock(struct rt_spinlock *lock)
+{
+    rt_hw_spin_lock(&lock->lock);
+}
+
+rt_inline void spin_unlock(struct rt_spinlock *lock)
+{
+    rt_hw_spin_unlock(&lock->lock);
+}
+
+rt_inline void *msix_table_base(struct rt_pci_msix_conf *msix)
+{
+    return msix->table_base + msix->index * PCIM_MSIX_ENTRY_SIZE;
+}
+
+rt_inline void *msix_vector_ctrl_base(struct rt_pci_msix_conf *msix)
+{
+    return msix_table_base(msix) + PCIM_MSIX_ENTRY_VECTOR_CTRL;
+}
+
+rt_inline void msix_write_vector_ctrl(struct rt_pci_msix_conf *msix,
+        rt_uint32_t ctrl)
+{
+    void *vc_addr = msix_vector_ctrl_base(msix);
+
+    HWREG32(vc_addr) = ctrl;
+}
+
+rt_inline void msix_mask(struct rt_pci_msix_conf *msix)
+{
+    msix->msg_ctrl |= PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
+    msix_write_vector_ctrl(msix, msix->msg_ctrl);
+
+    /* Flush write to device */
+    HWREG32(msix->table_base);
+}
+
+static void msix_update_ctrl(struct rt_pci_device *pdev,
+        rt_uint16_t clear, rt_uint16_t set)
+{
+    rt_uint16_t msgctl;
+
+    rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
+    msgctl &= ~clear;
+    msgctl |= set;
+    rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, msgctl);
+}
+
+rt_inline void msix_unmask(struct rt_pci_msix_conf *msix)
+{
+    msix->msg_ctrl &= ~PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
+    msix_write_vector_ctrl(msix, msix->msg_ctrl);
+}
+
+rt_inline rt_uint32_t msi_multi_mask(struct rt_pci_msi_conf *msi)
+{
+    if (msi->cap.multi_msg_max >= 5)
+    {
+        return 0xffffffff;
+    }
+
+    return (1 << (1 << msi->cap.multi_msg_max)) - 1;
+}
+
+static void msi_write_mask(struct rt_pci_msi_conf *msi,
+        rt_uint32_t clear, rt_uint32_t set, struct rt_pci_device *pdev)
+{
+    if (msi->cap.is_masking)
+    {
+        rt_ubase_t level = rt_spin_lock_irqsave(&pdev->msi_lock);
+
+        msi->mask &= ~clear;
+        msi->mask |= set;
+        rt_pci_write_config_u32(pdev, msi->mask_pos, msi->mask);
+
+        rt_spin_unlock_irqrestore(&pdev->msi_lock, level);
+    }
+}
+
+rt_inline void msi_mask(struct rt_pci_msi_conf *msi,
+        rt_uint32_t mask, struct rt_pci_device *pdev)
+{
+    msi_write_mask(msi, 0, mask, pdev);
+}
+
+rt_inline void msi_unmask(struct rt_pci_msi_conf *msi,
+        rt_uint32_t mask, struct rt_pci_device *pdev)
+{
+    msi_write_mask(msi, mask, 0, pdev);
+}
+
+static void msi_write_enable(struct rt_pci_device *pdev, rt_bool_t enable)
+{
+    rt_uint16_t msgctl;
+
+    rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
+
+    msgctl &= ~PCIM_MSICTRL_MSI_ENABLE;
+
+    if (enable)
+    {
+        msgctl |= PCIM_MSICTRL_MSI_ENABLE;
+    }
+
+    rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, msgctl);
+}
+
+static void msi_affinity_init(struct rt_pci_msi_desc *desc, int msi_index,
+        rt_bitmap_t *cpumasks)
+{
+    int irq;
+    struct rt_pic_irq *pirq;
+    struct rt_pci_device *pdev = desc->pdev;
+    struct rt_pic *msi_pic = pdev->msi_pic;
+
+    irq = desc->irq + desc->is_msix ? 0 : msi_index;
+    pirq = rt_pic_find_pirq(msi_pic, irq);
+
+    /* Save affinity */
+    if (desc->is_msix)
+    {
+        desc->affinity = pirq->affinity;
+    }
+    else
+    {
+        desc->affinities[msi_index] = pirq->affinity;
+    }
+
+    if ((void *)cpumasks > (void *)msi_affinity_default &&
+        (void *)cpumasks < (void *)msi_affinity_default + sizeof(msi_affinity_default))
+    {
+        rt_uint64_t data_address;
+
+        /* Get MSI/MSI-X write data adddress */
+        data_address = desc->msg.address_hi;
+        data_address <<= 32;
+        data_address |= desc->msg.address_lo;
+
+        /* Prepare affinity */
+        cpumasks = pirq->affinity;
+
+        rt_numa_memory_affinity(data_address, cpumasks);
+    }
+    else if (rt_bitmap_next_set_bit(cpumasks, 0, RT_CPUS_NR) >= RT_CPUS_NR)
+    {
+        /* No affinity info found, give up */
+        return;
+    }
+
+    if (!rt_pic_irq_set_affinity(irq, cpumasks))
+    {
+        if (msi_pic->ops->irq_write_msi_msg)
+        {
+            msi_pic->ops->irq_write_msi_msg(pirq, &desc->msg);
+        }
+    }
+}
+
+void rt_pci_msi_shutdown(struct rt_pci_device *pdev)
+{
+    struct rt_pci_msi_desc *desc;
+
+    if (!pdev)
+    {
+        return;
+    }
+
+    msi_write_enable(pdev, RT_FALSE);
+    rt_pci_intx(pdev, RT_TRUE);
+
+    if ((desc = rt_pci_msi_first_desc(pdev)))
+    {
+        msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
+    }
+
+    /* Restore pdev->irq to its default pin-assertion IRQ */
+    pdev->irq = desc->msi.default_irq;
+    pdev->msi_enabled = RT_FALSE;
+}
+
+void rt_pci_msix_shutdown(struct rt_pci_device *pdev)
+{
+    struct rt_pci_msi_desc *desc;
+
+    if (!pdev)
+    {
+        return;
+    }
+
+    rt_pci_msi_for_each_desc(pdev, desc)
+    {
+        msix_mask(&desc->msix);
+    }
+
+    msix_update_ctrl(pdev, PCIM_MSIXCTRL_MSIX_ENABLE, 0);
+
+    rt_pci_intx(pdev, RT_TRUE);
+    pdev->msix_enabled = RT_FALSE;
+}
+
+void rt_pci_msi_free_irqs(struct rt_pci_device *pdev)
+{
+    struct rt_pci_msi_desc *desc, *last_desc = RT_NULL;
+
+    if (!pdev)
+    {
+        return;
+    }
+
+    if (pdev->msix_base)
+    {
+        rt_iounmap(pdev->msix_base);
+        pdev->msix_base = RT_NULL;
+    }
+
+    rt_pci_msi_for_each_desc(pdev, desc)
+    {
+        /* To safety */
+        if (last_desc)
+        {
+            rt_list_remove(&last_desc->list);
+            rt_free(last_desc);
+        }
+        last_desc = desc;
+    }
+
+    /* The last one */
+    if (last_desc)
+    {
+        rt_list_remove(&last_desc->list);
+        rt_free(last_desc);
+    }
+}
+
+void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg)
+{
+    struct rt_pci_device *pdev = desc->pdev;
+
+    if (desc->is_msix)
+    {
+        void *msix_entry;
+        rt_bool_t unmasked;
+        rt_uint32_t msgctl;
+        struct rt_pci_msix_conf *msix = &desc->msix;
+
+        msgctl = msix->msg_ctrl;
+        unmasked = !(msgctl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
+        msix_entry = msix_table_base(msix);
+
+        if (unmasked)
+        {
+            msix_write_vector_ctrl(msix, msgctl | PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
+        }
+
+        HWREG32(msix_entry + PCIM_MSIX_ENTRY_LOWER_ADDR) = msg->address_lo;
+        HWREG32(msix_entry + PCIM_MSIX_ENTRY_UPPER_ADDR) = msg->address_hi;
+        HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA) = msg->data;
+
+        if (unmasked)
+        {
+            msix_write_vector_ctrl(msix, msgctl);
+        }
+
+        /* Ensure that the writes are visible in the device */
+        HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA);
+    }
+    else
+    {
+        rt_uint16_t msgctl;
+        int pos = pdev->msi_cap;
+        struct rt_pci_msi_conf *msi = &desc->msi;
+
+        rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
+        msgctl &= ~PCIM_MSICTRL_MME_MASK;
+        msgctl |= msi->cap.multi_msg_use << PCIM_MSICTRL_MME_SHIFT;
+        rt_pci_write_config_u16(pdev, pos + PCIR_MSI_CTRL, msgctl);
+
+        rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR, msg->address_lo);
+
+        /*
+         * The value stored in this field is related to the processor system,
+         * the processor will initialize this field
+         * when the PCIe device is initialized, and the rules for filling
+         * in this field are not the same for different processors.
+         * If the Multiple Message Enable field is not 0b000 (multiple IRQs),
+         * the PCIe device can send different interrupt requests
+         * by changing the low data in the Message Data field
+         */
+        if (msi->cap.is_64bit)
+        {
+            rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR_HIGH, msg->address_hi);
+            rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA_64BIT, msg->data);
+        }
+        else
+        {
+            rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA, msg->data);
+        }
+
+        /* Ensure that the writes are visible in the device */
+        rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
+    }
+
+    desc->msg = *msg;
+
+    if (desc->write_msi_msg)
+    {
+        desc->write_msi_msg(desc, desc->write_msi_msg_data);
+    }
+}
+
+void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq)
+{
+    struct rt_pci_msi_desc *desc;
+
+    if (pirq && (desc = pirq->msi_desc))
+    {
+        if (desc->is_msix)
+        {
+            msix_mask(&desc->msix);
+        }
+        else
+        {
+            msi_mask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
+        }
+    }
+}
+
+void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq)
+{
+    struct rt_pci_msi_desc *desc;
+
+    if (pirq && (desc = pirq->msi_desc))
+    {
+        if (desc->is_msix)
+        {
+            msix_unmask(&desc->msix);
+        }
+        else
+        {
+            msi_unmask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
+        }
+    }
+}
+
+rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
+        rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    rt_ssize_t res = -RT_ENOSYS;
+
+    if (!pdev || min > max)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (flags & RT_PCI_IRQ_F_AFFINITY)
+    {
+        if (!affinities)
+        {
+            affinities = msi_affinity_default;
+        }
+    }
+    else
+    {
+        affinities = RT_NULL;
+    }
+
+    if (flags & RT_PCI_IRQ_F_MSIX)
+    {
+        res = rt_pci_msix_enable_range_affinity(pdev, RT_NULL, min, max, affinities);
+
+        if (res > 0)
+        {
+            return res;
+        }
+    }
+
+    if (flags & RT_PCI_IRQ_F_MSI)
+    {
+        res = rt_pci_msi_enable_range_affinity(pdev, min, max, affinities);
+
+        if (res > 0)
+        {
+            return res;
+        }
+    }
+
+    if (flags & RT_PCI_IRQ_F_LEGACY)
+    {
+        if (min == 1 && pdev->irq >= 0)
+        {
+            if (affinities)
+            {
+                int cpuid;
+                RT_IRQ_AFFINITY_DECLARE(old_affinity);
+
+                /* INTx is shared, we should update it */
+                rt_pic_irq_get_affinity(pdev->irq, old_affinity);
+
+                rt_bitmap_for_each_set_bit(affinities[0], cpuid, RT_CPUS_NR)
+                {
+                    RT_IRQ_AFFINITY_SET(old_affinity, cpuid);
+                }
+
+                rt_pic_irq_set_affinity(pdev->irq, old_affinity);
+            }
+
+            rt_pci_intx(pdev, RT_TRUE);
+
+            return min;
+        }
+    }
+
+    return res;
+}
+
+void rt_pci_free_vector(struct rt_pci_device *pdev)
+{
+    if (!pdev)
+    {
+        return;
+    }
+
+    rt_pci_msi_disable(pdev);
+    rt_pci_msix_disable(pdev);
+    rt_pci_irq_mask(pdev);
+}
+
+static rt_err_t msi_verify_entries(struct rt_pci_device *pdev)
+{
+    if (pdev->no_64bit_msi)
+    {
+        struct rt_pci_msi_desc *desc;
+
+        rt_pci_msi_for_each_desc(pdev, desc)
+        {
+            if (desc->msg.address_hi)
+            {
+                LOG_D("%s: Arch assigned 64-bit MSI address %08x%08x"
+                        "but device only supports 32 bits",
+                        rt_dm_dev_get_name(&pdev->parent),
+                        desc->msg.address_hi, desc->msg.address_lo);
+
+                return -RT_EIO;
+            }
+        }
+    }
+
+    return RT_EOK;
+}
+
+static rt_err_t msi_insert_desc(struct rt_pci_device *pdev,
+        struct rt_pci_msi_desc *init_desc)
+{
+    rt_size_t msi_affinity_ptr_size = 0;
+    struct rt_pci_msi_desc *msi_desc;
+
+    if (!init_desc->is_msix)
+    {
+        msi_affinity_ptr_size += sizeof(msi_desc->affinities[0]) * 32;
+    }
+
+    msi_desc = rt_calloc(1, sizeof(*msi_desc) + msi_affinity_ptr_size);
+
+    if (!msi_desc)
+    {
+        return -RT_ENOMEM;
+    }
+
+    rt_memcpy(msi_desc, init_desc, sizeof(*msi_desc));
+
+    if (!init_desc->is_msix)
+    {
+        msi_desc->affinities = (void *)msi_desc + sizeof(*msi_desc);
+    }
+
+    msi_desc->pdev = pdev;
+    rt_list_init(&msi_desc->list);
+    rt_list_insert_before(&pdev->msi_desc_nodes, &msi_desc->list);
+
+    return RT_EOK;
+}
+
+rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
+{
+    rt_uint16_t msgctl;
+
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!pdev->msi_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
+
+    return 1 << ((msgctl & PCIM_MSICTRL_MMC_MASK) >> 1);
+}
+
+rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
+{
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!pdev->msi_enabled)
+    {
+        return -RT_EINVAL;
+    }
+
+    spin_lock(&pdev->msi_lock);
+
+    rt_pci_msi_shutdown(pdev);
+    rt_pci_msi_free_irqs(pdev);
+
+    spin_unlock(&pdev->msi_lock);
+
+    return RT_EOK;
+}
+
+static rt_err_t msi_setup_msi_desc(struct rt_pci_device *pdev, int nvec)
+{
+    rt_uint16_t msgctl;
+    struct rt_pci_msi_desc desc;
+
+    rt_memset(&desc, 0, sizeof(desc));
+
+    desc.vector_used = nvec;
+    desc.vector_count = rt_pci_msi_vector_count(pdev);
+    desc.is_msix = RT_FALSE;
+
+    rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
+
+    desc.msi.cap.is_64bit = !!(msgctl & PCIM_MSICTRL_64BIT);
+    desc.msi.cap.is_masking = !!(msgctl & PCIM_MSICTRL_VECTOR);
+    desc.msi.cap.multi_msg_max = (msgctl & PCIM_MSICTRL_MMC_MASK) >> 1;
+
+    for (int log2 = 0; log2 < 5; ++log2)
+    {
+        if (nvec <= (1 << log2))
+        {
+            desc.msi.cap.multi_msg_use = log2;
+            break;
+        }
+    }
+
+    if (desc.msi.cap.is_64bit)
+    {
+        desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK_64BIT;
+    }
+    else
+    {
+        desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK;
+    }
+
+    /* Save pdev->irq for its default pin-assertion IRQ */
+    desc.msi.default_irq = pdev->irq;
+
+    if (desc.msi.cap.is_masking)
+    {
+        /* Get the old mask status */
+        rt_pci_read_config_u32(pdev, desc.msi.mask_pos, &desc.msi.mask);
+    }
+
+    return msi_insert_desc(pdev, &desc);
+}
+
+static rt_ssize_t msi_capability_init(struct rt_pci_device *pdev,
+        int nvec, RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    rt_err_t err;
+    struct rt_pci_msi_desc *desc;
+
+    msi_write_enable(pdev, RT_FALSE);
+
+    spin_lock(&pdev->msi_lock);
+
+    if (!(err = msi_setup_msi_desc(pdev, nvec)))
+    {
+        /* All MSIs are unmasked by default; mask them all */
+        desc = rt_pci_msi_first_desc(pdev);
+        msi_mask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
+
+        if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSI)))
+        {
+            err = msi_verify_entries(pdev);
+        }
+
+        if (err)
+        {
+            msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
+        }
+    }
+
+    spin_unlock(&pdev->msi_lock);
+
+    if (err)
+    {
+        rt_pci_msi_free_irqs(pdev);
+
+        LOG_E("%s: Setup %s interrupts(%d) error = %s",
+                rt_dm_dev_get_name(&pdev->parent), "MSI", nvec, rt_strerror(err));
+
+        return err;
+    }
+
+    if (affinities)
+    {
+        for (int idx = 0; idx < nvec; ++idx)
+        {
+            msi_affinity_init(desc, idx, affinities[idx]);
+        }
+    }
+
+    /* Disable INTX */
+    rt_pci_intx(pdev, RT_FALSE);
+
+    /* Set MSI enabled bits */
+    msi_write_enable(pdev, RT_TRUE);
+
+    pdev->irq = desc->irq;
+
+    pdev->msi_enabled = RT_TRUE;
+
+    return nvec;
+}
+
+rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
+        int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    int nvec = max;
+    rt_size_t entries_nr;
+
+    if (!pdev || min > max)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (pdev->no_msi)
+    {
+        return -RT_ENOSYS;
+    }
+
+    if (!pdev->msi_pic)
+    {
+        return -RT_ENOSYS;
+    }
+
+    if (pdev->msi_enabled)
+    {
+        LOG_W("%s: MSI is enabled", rt_dm_dev_get_name(&pdev->parent));
+
+        return -RT_EINVAL;
+    }
+
+    entries_nr = rt_pci_msi_vector_count(pdev);
+
+    if (entries_nr < 0)
+    {
+        return entries_nr;
+    }
+
+    if (nvec > entries_nr)
+    {
+        return -RT_EEMPTY;
+    }
+
+    return msi_capability_init(pdev, nvec, affinities);
+}
+
+rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
+{
+    rt_uint16_t msgctl;
+
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!pdev->msix_cap)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
+
+    return rt_pci_msix_table_size(msgctl);
+}
+
+rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
+{
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!pdev->msix_enabled)
+    {
+        return -RT_EINVAL;
+    }
+
+    spin_lock(&pdev->msi_lock);
+
+    rt_pci_msix_shutdown(pdev);
+    rt_pci_msi_free_irqs(pdev);
+
+    spin_unlock(&pdev->msi_lock);
+
+    return RT_EOK;
+}
+
+static void *msix_table_remap(struct rt_pci_device *pdev, rt_size_t entries_nr)
+{
+    rt_uint8_t bir;
+    rt_uint32_t table_offset;
+    rt_ubase_t table_base_phys;
+
+    rt_pci_read_config_u32(pdev, pdev->msix_cap + PCIR_MSIX_TABLE, &table_offset);
+    bir = (rt_uint8_t)(table_offset & PCIM_MSIX_BIR_MASK);
+
+    if (pdev->resource[bir].flags & PCI_BUS_REGION_F_NONE)
+    {
+        LOG_E("%s: BAR[bir = %d] is invalid", rt_dm_dev_get_name(&pdev->parent), bir);
+
+        return RT_NULL;
+    }
+
+    table_base_phys = pdev->resource[bir].base + (table_offset & ~PCIM_MSIX_BIR_MASK);
+
+    return rt_ioremap((void *)table_base_phys, entries_nr * PCIM_MSIX_ENTRY_SIZE);
+}
+
+static rt_err_t msix_setup_msi_descs(struct rt_pci_device *pdev,
+        void *table_base, struct rt_pci_msix_entry *entries, int nvec)
+{
+    rt_err_t err;
+    struct rt_pci_msi_desc desc;
+
+    rt_memset(&desc, 0, sizeof(desc));
+
+    desc.vector_used = 1;
+    desc.vector_count = rt_pci_msix_vector_count(pdev);
+
+    desc.is_msix = RT_TRUE;
+    desc.msix.table_base = table_base;
+
+    for (int i = 0; i < nvec; ++i)
+    {
+        void *table_entry;
+        int index = entries ? entries[i].index : i;
+
+        desc.msix.index = index;
+        table_entry = msix_table_base(&desc.msix);
+
+        desc.msix.msg_ctrl = HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL);
+
+        if ((err = msi_insert_desc(pdev, &desc)))
+        {
+            break;
+        }
+    }
+
+    return err;
+}
+
+static rt_ssize_t msix_capability_init(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int nvec,
+        RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    rt_err_t err;
+    rt_uint16_t msgctl;
+    rt_size_t table_size;
+    void *table_base, *table_entry;
+    struct rt_pci_msi_desc *desc;
+    struct rt_pci_msix_entry *entry;
+
+    /*
+     * Some devices require MSI-X to be enabled before the MSI-X
+     * registers can be accessed.
+     * Mask all the vectors to prevent interrupts coming in before
+     * they're fully set up.
+     */
+    msix_update_ctrl(pdev, 0, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE);
+
+    rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
+    /* Request & Map MSI-X table region */
+    table_size = rt_pci_msix_table_size(msgctl);
+    table_base = msix_table_remap(pdev, table_size);
+
+    if (!table_base)
+    {
+        LOG_E("%s: Remap MSI-X table fail", rt_dm_dev_get_name(&pdev->parent));
+
+        err = -RT_ENOMEM;
+        goto _out_disbale_msix;
+    }
+
+    pdev->msix_base = table_base;
+
+    spin_lock(&pdev->msi_lock);
+
+    if (!(err = msix_setup_msi_descs(pdev, table_base, entries, nvec)))
+    {
+        if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSIX)))
+        {
+            /* Check if all MSI entries honor device restrictions */
+            err = msi_verify_entries(pdev);
+        }
+    }
+
+    spin_unlock(&pdev->msi_lock);
+
+    if (err)
+    {
+        rt_pci_msi_free_irqs(pdev);
+
+        LOG_E("%s: Setup %s interrupts(%d) error = %s",
+                rt_dm_dev_get_name(&pdev->parent), "MSI-X", nvec, rt_strerror(err));
+
+        goto _out_disbale_msix;
+    }
+
+    entry = entries;
+    rt_pci_msi_for_each_desc(pdev, desc)
+    {
+        if (affinities)
+        {
+            msi_affinity_init(desc, desc->msix.index, affinities[entry->index]);
+        }
+
+        entry->irq = desc->irq;
+        ++entry;
+    }
+
+    /* Disable INTX */
+    rt_pci_intx(pdev, RT_FALSE);
+
+    /* Maske all table entries */
+    table_entry = table_base;
+    for (int i = 0; i < table_size; ++i, table_entry += PCIM_MSIX_ENTRY_SIZE)
+    {
+        HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL) = PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
+    }
+    msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK, 0);
+
+    pdev->msix_enabled = RT_TRUE;
+
+    return nvec;
+
+_out_disbale_msix:
+    msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE, 0);
+
+    return err;
+}
+
+rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
+        struct rt_pci_msix_entry *entries, int min, int max,
+        RT_IRQ_AFFINITY_DECLARE((*affinities)))
+{
+    int nvec = max;
+    rt_size_t entries_nr;
+
+    if (!pdev || min > max)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (pdev->no_msi)
+    {
+        return -RT_ENOSYS;
+    }
+
+    if (!pdev->msi_pic)
+    {
+        return -RT_ENOSYS;
+    }
+
+    if (pdev->msix_enabled)
+    {
+        LOG_W("%s: MSI-X is enabled", rt_dm_dev_get_name(&pdev->parent));
+
+        return -RT_EINVAL;
+    }
+
+    entries_nr = rt_pci_msix_vector_count(pdev);
+
+    if (entries_nr < 0)
+    {
+        return entries_nr;
+    }
+
+    if (nvec > entries_nr)
+    {
+        return -RT_EEMPTY;
+    }
+
+    if (!entries)
+    {
+        return 0;
+    }
+
+    /* Check if entries is valid */
+    for (int i = 0; i < nvec; ++i)
+    {
+        struct rt_pci_msix_entry *target = &entries[i];
+
+        if (target->index >= entries_nr)
+        {
+            return -RT_EINVAL;
+        }
+
+        for (int j = i + 1; j < nvec; ++j)
+        {
+            /* Check duplicate */
+            if (target->index == entries[j].index)
+            {
+                LOG_E("%s: msix entry[%d].index = entry[%d].index",
+                        rt_dm_dev_get_name(&pdev->parent), i, j);
+
+                return -RT_EINVAL;
+            }
+        }
+    }
+
+    return msix_capability_init(pdev, entries, nvec, affinities);
+}

+ 621 - 0
components/drivers/pci/ofw.c

@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rthw.h>
+#include <rtthread.h>
+
+#define DBG_TAG "pci.ofw"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include <drivers/pci.h>
+#include <drivers/ofw.h>
+#include <drivers/ofw_io.h>
+#include <drivers/ofw_irq.h>
+#include <drivers/ofw_fdt.h>
+
+static rt_err_t pci_ofw_irq_parse(struct rt_pci_device *pdev, struct rt_ofw_cell_args *out_irq)
+{
+    rt_err_t err = RT_EOK;
+    rt_uint8_t pin;
+    fdt32_t map_addr[4];
+    struct rt_pci_device *p2pdev;
+    struct rt_ofw_node *dev_np, *p2pnode = RT_NULL;
+
+    /* Parse device tree if dev have a device node */
+    dev_np = pdev->parent.ofw_node;
+
+    if (dev_np)
+    {
+        err = rt_ofw_parse_irq_cells(dev_np, 0, out_irq);
+
+        if (err)
+        {
+            return err;
+        }
+    }
+
+    /* Assume #interrupt-cells is 1 */
+    if ((err = rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin)))
+    {
+        goto _err;
+    }
+
+    /* No pin, exit with no error message. */
+    if (pin == 0)
+    {
+        return -RT_ENOSYS;
+    }
+
+    /* Try local interrupt-map in the device node */
+    if (rt_ofw_prop_read_raw(dev_np, "interrupt-map", RT_NULL))
+    {
+        pin = rt_pci_irq_intx(pdev, pin);
+        p2pnode = dev_np;
+    }
+
+    /* Walk up the PCI tree */
+    while (!p2pnode)
+    {
+        p2pdev = pdev->bus->self;
+
+        /* Is the root bus -> host bridge */
+        if (rt_pci_is_root_bus(pdev->bus))
+        {
+            struct rt_pci_host_bridge *host_bridge = pdev->bus->host_bridge;
+
+            p2pnode = host_bridge->parent.ofw_node;
+
+            if (!p2pnode)
+            {
+                err = -RT_EINVAL;
+
+                goto _err;
+            }
+        }
+        else
+        {
+            /* Is P2P bridge */
+            p2pnode = p2pdev->parent.ofw_node;
+        }
+
+        if (p2pnode)
+        {
+            break;
+        }
+
+        /* Try get INTx in P2P */
+        pin = rt_pci_irq_intx(pdev, pin);
+        pdev = p2pdev;
+    }
+
+    /* For more format detail, please read `components/drivers/ofw/irq.c:ofw_parse_irq_map` */
+    out_irq->data = map_addr;
+    out_irq->args_count = 2;
+    out_irq->args[0] = 3;
+    out_irq->args[1] = 1;
+
+    /* In addr cells */
+    map_addr[0] = cpu_to_fdt32((pdev->bus->number << 16) | (pdev->devfn << 8));
+    map_addr[1] = cpu_to_fdt32(0);
+    map_addr[2] = cpu_to_fdt32(0);
+    /* In pin cells */
+    map_addr[3] = cpu_to_fdt32(pin);
+
+    err = rt_ofw_parse_irq_map(p2pnode, out_irq);
+
+_err:
+    if (err == -RT_EEMPTY)
+    {
+        LOG_W("PCI-Device<%s> no interrupt-map found, INTx interrupts not available",
+                rt_dm_dev_get_name(&pdev->parent));
+        LOG_W("PCI-Device<%s> possibly some PCI slots don't have level triggered interrupts capability",
+                rt_dm_dev_get_name(&pdev->parent));
+    }
+    else if (err && err != -RT_ENOSYS)
+    {
+        LOG_E("PCI-Device<%s> irq parse failed with err = %s",
+                rt_dm_dev_get_name(&pdev->parent), rt_strerror(err));
+    }
+
+    return err;
+}
+
+int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
+        rt_uint8_t slot, rt_uint8_t pin)
+{
+    int irq = -1;
+    rt_err_t status;
+    struct rt_ofw_cell_args irq_args;
+
+    if (!pdev)
+    {
+        goto _end;
+    }
+
+    status = pci_ofw_irq_parse(pdev, &irq_args);
+
+    if (status)
+    {
+        goto _end;
+    }
+
+    irq = rt_ofw_map_irq(&irq_args);
+
+    if (irq >= 0)
+    {
+        pdev->intx_pic = rt_pic_dynamic_cast(rt_ofw_data(irq_args.data));
+    }
+
+_end:
+    return irq;
+}
+
+static rt_err_t pci_ofw_parse_ranges(struct rt_ofw_node *dev_np, const char *propname,
+        int phy_addr_cells, int phy_size_cells, int cpu_addr_cells,
+        struct rt_pci_bus_region **out_regions, rt_size_t *out_regions_nr)
+{
+    const fdt32_t *cell;
+    rt_ssize_t total_cells;
+    int groups, space_code;
+    rt_uint32_t phy_addr[3];
+    rt_uint64_t cpu_addr, phy_addr_size;
+
+    *out_regions = RT_NULL;
+    *out_regions_nr = 0;
+    cell = rt_ofw_prop_read_raw(dev_np, propname, &total_cells);
+
+    if (!cell)
+    {
+        return -RT_EEMPTY;
+    }
+
+    groups = total_cells / sizeof(*cell) / (phy_addr_cells + phy_size_cells + cpu_addr_cells);
+    *out_regions = rt_malloc(groups * sizeof(struct rt_pci_bus_region));
+
+    if (!*out_regions)
+    {
+        return -RT_ENOMEM;
+    }
+
+    for (int i = 0; i < groups; ++i)
+    {
+        /*
+         * ranges:
+         *  phys.hi  cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
+         *  phys.low cell: llllllll llllllll llllllll llllllll
+         *  phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
+         *
+         *  n: relocatable region flag (doesn't play a role here)
+         *  p: prefetchable (cacheable) region flag
+         *  t: aliased address flag (doesn't play a role here)
+         *  ss: space code
+         *      00: configuration space
+         *      01: I/O space
+         *      10: 32 bit memory space
+         *      11: 64 bit memory space
+         *  bbbbbbbb: The PCI bus number
+         *  ddddd: The device number
+         *  fff: The function number. Used for multifunction PCI devices.
+         *  rrrrrrrr: Register number; used for configuration cycles.
+         */
+
+        for (int j = 0; j < phy_addr_cells; ++j)
+        {
+            phy_addr[j] = rt_fdt_read_number(cell++, 1);
+        }
+
+        space_code = (phy_addr[0] >> 24) & 0x3;
+
+        cpu_addr = rt_fdt_read_number(cell, cpu_addr_cells);
+        cell += cpu_addr_cells;
+        phy_addr_size = rt_fdt_read_number(cell, phy_size_cells);
+        cell += phy_size_cells;
+
+        (*out_regions)[i].phy_addr = ((rt_uint64_t)phy_addr[1] << 32) | phy_addr[2];
+        (*out_regions)[i].cpu_addr = cpu_addr;
+        (*out_regions)[i].size = phy_addr_size;
+
+        (*out_regions)[i].bus_start = (*out_regions)[i].phy_addr;
+
+        if (space_code & 2)
+        {
+            (*out_regions)[i].flags = phy_addr[0] & (1U << 30) ?
+                    PCI_BUS_REGION_F_PREFETCH : PCI_BUS_REGION_F_MEM;
+        }
+        else if (space_code & 1)
+        {
+            (*out_regions)[i].flags = PCI_BUS_REGION_F_IO;
+        }
+        else
+        {
+            (*out_regions)[i].flags = PCI_BUS_REGION_F_NONE;
+        }
+
+        ++*out_regions_nr;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err;
+    int phy_addr_cells = -1, phy_size_cells = -1, cpu_addr_cells;
+
+    if (!dev_np || !host_bridge)
+    {
+        return -RT_EINVAL;
+    }
+
+    cpu_addr_cells = rt_ofw_io_addr_cells(dev_np);
+    rt_ofw_prop_read_s32(dev_np, "#address-cells", &phy_addr_cells);
+    rt_ofw_prop_read_s32(dev_np, "#size-cells", &phy_size_cells);
+
+    if (phy_addr_cells != 3 || phy_size_cells < 1 || cpu_addr_cells < 1)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (pci_ofw_parse_ranges(dev_np, "ranges",
+        phy_addr_cells, phy_size_cells, cpu_addr_cells,
+        &host_bridge->bus_regions, &host_bridge->bus_regions_nr))
+    {
+        return -RT_EINVAL;
+    }
+
+    if ((err = rt_pci_region_setup(host_bridge)))
+    {
+        rt_free(host_bridge->bus_regions);
+        host_bridge->bus_regions_nr = 0;
+
+        return err;
+    }
+
+    err = pci_ofw_parse_ranges(dev_np, "dma-ranges",
+            phy_addr_cells, phy_size_cells, cpu_addr_cells,
+            &host_bridge->dma_regions, &host_bridge->dma_regions_nr);
+
+    if (err != -RT_EEMPTY)
+    {
+        rt_free(host_bridge->bus_regions);
+        host_bridge->bus_regions_nr = 0;
+
+        LOG_E("%s: Read dma-ranges error = %s", rt_ofw_node_full_name(dev_np),
+                rt_strerror(err));
+
+        return err;
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
+        struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err;
+    const char *propname;
+
+    if (!dev_np || !host_bridge)
+    {
+        return -RT_EINVAL;
+    }
+
+    host_bridge->irq_slot = rt_pci_irq_slot;
+    host_bridge->irq_map = rt_pci_ofw_irq_parse_and_map;
+
+    if (rt_ofw_prop_read_u32_array_index(dev_np, "bus-range", 0, 2, host_bridge->bus_range) < 0)
+    {
+        return -RT_EIO;
+    }
+
+    propname = rt_ofw_get_prop_fuzzy_name(dev_np, ",pci-domain$");
+    rt_ofw_prop_read_u32(dev_np, propname, &host_bridge->domain);
+
+    err = rt_pci_ofw_parse_ranges(dev_np, host_bridge);
+
+    return err;
+}
+
+rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
+{
+    rt_err_t err = RT_EOK;
+
+    return err;
+}
+
+rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
+{
+    rt_err_t err = RT_EOK;
+
+    return err;
+}
+
+/*
+ * RID (Requester ID) is formatted such that:
+ *  Bits [15:8] are the Bus number.
+ *  Bits [7:3] are the Device number.
+ *  Bits [2:0] are the Function number.
+ *
+ * msi-map: Maps a Requester ID to an MSI controller and associated
+ *  msi-specifier data. The property is an arbitrary number of tuples of
+ *  (rid-base,msi-controller,msi-base,length), where:
+ *
+ *    - rid-base is a single cell describing the first RID matched by the entry.
+ *
+ *    - msi-controller is a single phandle to an MSI controller
+ *
+ *    - msi-base is an msi-specifier describing the msi-specifier produced for
+ *      the first RID matched by the entry.
+ *
+ *    - length is a single cell describing how many consecutive RIDs are matched
+ *      following the rid-base.
+ *
+ *  Any RID r in the interval [rid-base, rid-base + length) is associated with
+ *  the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
+ *
+ * msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
+ *  to an msi-specifier per the msi-map property.
+ *
+ * msi-parent: Describes the MSI parent of the root complex itself. Where
+ *  the root complex and MSI controller do not pass sideband data with MSI
+ *  writes, this property may be used to describe the MSI controller(s)
+ *  used by PCI devices under the root complex, if defined as such in the
+ *  binding for the root complex.
+ *
+ *  / {
+ *      #address-cells = <1>;
+ *      #size-cells = <1>;
+ *
+ *      msi_a: msi-controller@a {
+ *          reg = <0xa 0x1>;
+ *          msi-controller;
+ *          #msi-cells = <1>;
+ *      };
+ *
+ *      msi_b: msi-controller@b {
+ *          reg = <0xb 0x1>;
+ *          msi-controller;
+ *          #msi-cells = <1>;
+ *      };
+ *
+ *      msi_c: msi-controller@c {
+ *          reg = <0xc 0x1>;
+ *          msi-controller;
+ *          #msi-cells = <1>;
+ *      };
+ *
+ *  Example (1)
+ *  ===========
+ *      pci: pci@f {
+ *          reg = <0xf 0x1>;
+ *          device_type = "pci";
+ *
+ *          // The sideband data provided to the MSI controller is
+ *          //  the RID, identity-mapped.
+ *          msi-map = <0x0 &msi_a 0x0 0x10000>;
+ *      };
+ *
+ *  Example (2)
+ *  ===========
+ *      pci: pci@ff {
+ *          reg = <0xff 0x1>;
+ *          device_type = "pci";
+ *
+ *          // The sideband data provided to the MSI controller is
+ *          //  the RID, masked to only the device and function bits.
+ *          msi-map = <0x0 &msi_a 0x0 0x100>;
+ *          msi-map-mask = <0xff>
+ *      };
+ *
+ *  Example (3)
+ *  ===========
+ *      pci: pci@fff {
+ *          reg = <0xfff 0x1>;
+ *          device_type = "pci";
+ *
+ *          // The sideband data provided to the MSI controller is
+ *          //  the RID, but the high bit of the bus number is ignored.
+ *          msi-map = <0x0000 &msi_a 0x0000 0x8000>,
+ *                    <0x8000 &msi_a 0x0000 0x8000>;
+ *      };
+ *
+ *  Example (4)
+ *  ===========
+ *      pci: pci@f {
+ *          reg = <0xf 0x1>;
+ *          device_type = "pci";
+ *
+ *          // The sideband data provided to the MSI controller is
+ *          //  the RID, but the high bit of the bus number is negated.
+ *          msi-map = <0x0000 &msi 0x8000 0x8000>,
+ *                    <0x8000 &msi 0x0000 0x8000>;
+ *      };
+ *
+ *  Example (5)
+ *  ===========
+ *      pci: pci@f {
+ *          reg = <0xf 0x1>;
+ *          device_type = "pci";
+ *
+ *          // The sideband data provided to MSI controller a is the
+ *          //  RID, but the high bit of the bus number is negated.
+ *          // The sideband data provided to MSI controller b is the
+ *          //  RID, identity-mapped.
+ *          // MSI controller c is not addressable.
+ *          msi-map = <0x0000 &msi_a 0x8000 0x08000>,
+ *                    <0x8000 &msi_a 0x0000 0x08000>,
+ *                    <0x0000 &msi_b 0x0000 0x10000>;
+ *      };
+ *  };
+ */
+static void ofw_msi_pic_init(struct rt_pci_device *pdev)
+{
+#ifdef RT_PCI_MSI
+    rt_uint32_t rid;
+    struct rt_pci_bus *bus;
+    struct rt_ofw_node *np, *msi_ic_np = RT_NULL;
+
+    /*
+     * NOTE: Typically, a device's RID is equal to the PCI device's ID.
+     * However, in complex bus management scenarios such as servers and PCs,
+     * the RID needs to be associated with DMA. In these cases,
+     * the RID should be equal to the DMA alias assigned to the
+     * PCI device by the system bus.
+     */
+    rid = rt_pci_dev_id(pdev);
+
+    for (bus = pdev->bus; bus; bus = bus->parent)
+    {
+        if (rt_pci_is_root_bus(bus))
+        {
+            np = bus->host_bridge->parent.ofw_node;
+        }
+        else
+        {
+            np = bus->self->parent.ofw_node;
+        }
+
+        if ((msi_ic_np = rt_ofw_parse_phandle(np, "msi-parent", 0)))
+        {
+            break;
+        }
+
+        if (!rt_ofw_map_id(np, rid, "msi-map", "msi-map-mask", &msi_ic_np, RT_NULL))
+        {
+            break;
+        }
+    }
+
+    if (!msi_ic_np)
+    {
+        LOG_W("%s: MSI PIC not found", rt_dm_dev_get_name(&pdev->parent));
+
+        return;
+    }
+
+    pdev->msi_pic = rt_pic_dynamic_cast(rt_ofw_data(msi_ic_np));
+
+    if (!pdev->msi_pic)
+    {
+        LOG_W("%s: '%s' not supported", rt_dm_dev_get_name(&pdev->parent), "msi-parent");
+
+        goto _out_put_msi_parent_node;
+    }
+
+    if (!pdev->msi_pic->ops->irq_compose_msi_msg)
+    {
+        LOG_E("%s: MSI pic MUST implemented %s",
+                rt_ofw_node_full_name(msi_ic_np), "irq_compose_msi_msg");
+        RT_ASSERT(0);
+    }
+
+    if (!pdev->msi_pic->ops->irq_alloc_msi)
+    {
+        LOG_E("%s: MSI pic MUST implemented %s",
+                rt_ofw_node_full_name(msi_ic_np), "irq_alloc_msi");
+        RT_ASSERT(0);
+    }
+
+    if (!pdev->msi_pic->ops->irq_free_msi)
+    {
+        LOG_E("%s: MSI pic MUST implemented %s",
+                rt_ofw_node_full_name(msi_ic_np), "irq_free_msi");
+        RT_ASSERT(0);
+    }
+
+_out_put_msi_parent_node:
+    rt_ofw_node_put(msi_ic_np);
+#endif
+}
+
+static rt_int32_t ofw_pci_devfn(struct rt_ofw_node *np)
+{
+    rt_int32_t res;
+    rt_uint32_t reg[5];
+
+    res = rt_ofw_prop_read_u32_array_index(np, "reg", 0, RT_ARRAY_SIZE(reg), reg);
+
+    return res > 0 ? ((reg[0] >> 8) & 0xff) : res;
+}
+
+static struct rt_ofw_node *ofw_find_device(struct rt_ofw_node *np, rt_uint32_t devfn)
+{
+    struct rt_ofw_node *dev_np, *mfd_np;
+
+    rt_ofw_foreach_child_node(np, dev_np)
+    {
+        if (ofw_pci_devfn(dev_np) == devfn)
+        {
+            return dev_np;
+        }
+
+        if (rt_ofw_node_tag_equ(dev_np, "multifunc-device"))
+        {
+            rt_ofw_foreach_child_node(dev_np, mfd_np)
+            {
+                if (ofw_pci_devfn(mfd_np) == devfn)
+                {
+                    rt_ofw_node_put(dev_np);
+
+                    return mfd_np;
+                }
+            }
+        }
+    }
+
+    return RT_NULL;
+}
+
+rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
+{
+    struct rt_ofw_node *np = RT_NULL;
+
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    ofw_msi_pic_init(pdev);
+
+    if (rt_pci_is_root_bus(pdev->bus) || !pdev->bus->self)
+    {
+        struct rt_pci_host_bridge *host_bridge;
+
+        host_bridge = rt_pci_find_host_bridge(pdev->bus);
+        RT_ASSERT(host_bridge != RT_NULL);
+
+        np = host_bridge->parent.ofw_node;
+    }
+    else
+    {
+        np = pdev->bus->self->parent.ofw_node;
+    }
+
+    if (np)
+    {
+        pdev->parent.ofw_node = ofw_find_device(np, pdev->devfn);
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
+{
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_ofw_node_put(pdev->parent.ofw_node);
+
+    return RT_EOK;
+}

+ 1004 - 0
components/drivers/pci/pci.c

@@ -0,0 +1,1004 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+#include <rtservice.h>
+
+#define DBG_TAG "rtdm.pci"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include <drivers/pci.h>
+#include <drivers/misc.h>
+#include <drivers/core/bus.h>
+
+rt_inline void spin_lock(struct rt_spinlock *spinlock)
+{
+    rt_hw_spin_lock(&spinlock->lock);
+}
+
+rt_inline void spin_unlock(struct rt_spinlock *spinlock)
+{
+    rt_hw_spin_unlock(&spinlock->lock);
+}
+
+rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev)
+{
+    struct rt_pci_host_bridge *host_bridge;
+
+    if (!pdev)
+    {
+        return RT_UINT32_MAX;
+    }
+
+    if ((host_bridge = rt_pci_find_host_bridge(pdev->bus)))
+    {
+        return host_bridge->domain;
+    }
+
+    return RT_UINT32_MAX;
+}
+
+static rt_uint8_t pci_find_next_cap_ttl(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, rt_uint8_t pos, int cap, int *ttl)
+{
+    rt_uint8_t ret = 0, id;
+    rt_uint16_t ent;
+
+    rt_pci_bus_read_config_u8(bus, devfn, pos, &pos);
+
+    while ((*ttl)--)
+    {
+        if (pos < 0x40)
+        {
+            break;
+        }
+
+        pos &= ~3;
+        rt_pci_bus_read_config_u16(bus, devfn, pos, &ent);
+
+        id = ent & 0xff;
+        if (id == 0xff)
+        {
+            break;
+        }
+        if (id == cap)
+        {
+            ret = pos;
+            break;
+        }
+        pos = (ent >> 8);
+    }
+
+    return ret;
+}
+
+static rt_uint8_t pci_find_next_cap(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, rt_uint8_t pos, int cap)
+{
+    int ttl = RT_PCI_FIND_CAP_TTL;
+
+    return pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+}
+
+static rt_uint8_t pci_bus_find_cap_start(struct rt_pci_bus *bus,
+        rt_uint32_t devfn, rt_uint8_t hdr_type)
+{
+    rt_uint8_t res = 0;
+    rt_uint16_t status;
+
+    rt_pci_bus_read_config_u16(bus, devfn, PCIR_STATUS, &status);
+
+    if (status & PCIM_STATUS_CAPPRESENT)
+    {
+        switch (hdr_type)
+        {
+        case PCIM_HDRTYPE_NORMAL:
+        case PCIM_HDRTYPE_BRIDGE:
+            res = PCIR_CAP_PTR;
+            break;
+
+        case PCIM_HDRTYPE_CARDBUS:
+            res = PCIR_CAP_PTR_2;
+            break;
+        }
+    }
+
+    return res;
+}
+
+rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap)
+{
+    rt_uint8_t hdr_type, ret = RT_UINT8_MAX;
+
+    if (bus)
+    {
+        rt_pci_bus_read_config_u8(bus, devfn, PCIR_HDRTYPE, &hdr_type);
+
+        ret = pci_bus_find_cap_start(bus, devfn, hdr_type & PCIM_HDRTYPE);
+
+        if (ret)
+        {
+            ret = pci_find_next_cap(bus, devfn, ret, cap);
+        }
+    }
+
+    return ret;
+}
+
+rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap)
+{
+    rt_uint8_t res = RT_UINT8_MAX;
+
+    if (pdev)
+    {
+        res = pci_bus_find_cap_start(pdev->bus, pdev->devfn, pdev->hdr_type);
+
+        if (res)
+        {
+            res = pci_find_next_cap(pdev->bus, pdev->devfn, res, cap);
+        }
+    }
+
+    return res;
+}
+
+rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap)
+{
+    rt_uint8_t res = RT_UINT8_MAX;
+
+    if (pdev)
+    {
+        res = pci_find_next_cap(pdev->bus, pdev->devfn, pos + PCICAP_NEXTPTR, cap);
+    }
+
+    return res;
+}
+
+rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap)
+{
+    return rt_pci_find_ext_next_capability(pdev, 0, cap);
+}
+
+rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap)
+{
+    int ttl;
+    rt_uint32_t header;
+    rt_uint16_t start = pos;
+
+    /* minimum 8 bytes per capability */
+    ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
+
+    if (pdev->cfg_size <= PCI_REGMAX + 1)
+    {
+        return 0;
+    }
+
+    if (!pos)
+    {
+        pos = PCI_REGMAX + 1;
+    }
+
+    if (rt_pci_read_config_u32(pdev, pos, &header))
+    {
+        return 0;
+    }
+
+    /*
+     * If we have no capabilities, this is indicated by cap ID,
+     * cap version and next pointer all being 0.
+     */
+    if (header == 0)
+    {
+        return 0;
+    }
+
+    while (ttl-- > 0)
+    {
+        if (PCI_EXTCAP_ID(header) == cap && pos != start)
+        {
+            return pos;
+        }
+
+        pos = PCI_EXTCAP_NEXTPTR(header);
+
+        if (pos < PCI_REGMAX + 1)
+        {
+            break;
+        }
+
+        if (rt_pci_read_config_u32(pdev, pos, &header))
+        {
+            break;
+        }
+    }
+
+    return 0;
+}
+
+static void pci_set_master(struct rt_pci_device *pdev, rt_bool_t enable)
+{
+    rt_uint16_t old_cmd, cmd;
+
+    rt_pci_read_config_u16(pdev, PCIR_COMMAND, &old_cmd);
+
+    if (enable)
+    {
+        cmd = old_cmd | PCIM_CMD_BUSMASTEREN;
+    }
+    else
+    {
+        cmd = old_cmd & ~PCIM_CMD_BUSMASTEREN;
+    }
+
+    if (cmd != old_cmd)
+    {
+        rt_pci_write_config_u16(pdev, PCIR_COMMAND, cmd);
+    }
+
+    pdev->busmaster = !!enable;
+}
+
+void rt_pci_set_master(struct rt_pci_device *pdev)
+{
+    if (pdev)
+    {
+        pci_set_master(pdev, RT_TRUE);
+    }
+}
+
+void rt_pci_clear_master(struct rt_pci_device *pdev)
+{
+    if (pdev)
+    {
+        pci_set_master(pdev, RT_FALSE);
+    }
+}
+
+void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable)
+{
+    rt_uint16_t pci_command, new;
+
+    if (!pdev)
+    {
+        return;
+    }
+
+    rt_pci_read_config_u16(pdev, PCIR_COMMAND, &pci_command);
+
+    if (enable)
+    {
+        new = pci_command & ~PCIM_CMD_INTxDIS;
+    }
+    else
+    {
+        new = pci_command | PCIM_CMD_INTxDIS;
+    }
+
+    if (new != pci_command)
+    {
+        rt_pci_write_config_u16(pdev, PCIR_COMMAND, new);
+    }
+}
+
+static rt_bool_t pci_check_and_set_intx_mask(struct rt_pci_device *pdev, rt_bool_t mask)
+{
+    rt_ubase_t level;
+    rt_bool_t irq_pending;
+    rt_bool_t res = RT_TRUE;
+    rt_uint16_t origcmd, newcmd;
+    rt_uint32_t cmd_status_dword;
+    struct rt_pci_bus *bus = pdev->bus;
+
+    level = rt_spin_lock_irqsave(&rt_pci_lock);
+
+    bus->ops->read(bus, pdev->devfn, PCIR_COMMAND, 4, &cmd_status_dword);
+
+    irq_pending = (cmd_status_dword >> 16) & PCIM_STATUS_INTxSTATE;
+
+    /*
+     * Check interrupt status register to see whether our device
+     * triggered the interrupt (when masking) or the next IRQ is
+     * already pending (when unmasking).
+     */
+    if (mask != irq_pending)
+    {
+        res = RT_FALSE;
+    }
+    else
+    {
+        origcmd = cmd_status_dword;
+        newcmd = origcmd & ~PCIM_CMD_INTxDIS;
+
+        if (mask)
+        {
+            newcmd |= PCIM_CMD_INTxDIS;
+        }
+        if (newcmd != origcmd)
+        {
+            bus->ops->write(bus, pdev->devfn, PCIR_COMMAND, 2, newcmd);
+        }
+    }
+
+    rt_spin_unlock_irqrestore(&rt_pci_lock, level);
+
+    return res;
+}
+
+rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev)
+{
+    rt_bool_t res = RT_FALSE;
+
+    if (pdev)
+    {
+        res = pci_check_and_set_intx_mask(pdev, RT_TRUE);
+    }
+
+    return res;
+}
+
+rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev)
+{
+    rt_bool_t res = RT_FALSE;
+
+    if (pdev)
+    {
+        res = pci_check_and_set_intx_mask(pdev, RT_FALSE);
+    }
+
+    return res;
+}
+
+void rt_pci_irq_mask(struct rt_pci_device *pdev)
+{
+    if (pdev)
+    {
+        rt_bool_t unused;
+        struct rt_pic_irq *pirq;
+
+        rt_pci_intx(pdev, RT_FALSE);
+
+        pirq = rt_pic_find_pirq(pdev->intx_pic, pdev->irq);
+        RT_ASSERT(pirq != RT_NULL);
+
+        rt_hw_spin_lock(&pirq->rw_lock.lock);
+        unused = rt_list_isempty(&pirq->isr.list);
+        rt_hw_spin_unlock(&pirq->rw_lock.lock);
+
+        if (unused)
+        {
+            rt_hw_interrupt_mask(pdev->irq);
+        }
+    }
+}
+
+void rt_pci_irq_unmask(struct rt_pci_device *pdev)
+{
+    if (pdev)
+    {
+        rt_hw_interrupt_umask(pdev->irq);
+        rt_pci_intx(pdev, RT_TRUE);
+    }
+}
+
+struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus)
+{
+    if (!bus)
+    {
+        return RT_NULL;
+    }
+
+    while (bus->parent)
+    {
+        bus = bus->parent;
+    }
+
+    return bus;
+}
+
+struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus)
+{
+    if (!bus)
+    {
+        return RT_NULL;
+    }
+
+    if ((bus = rt_pci_find_root_bus(bus)))
+    {
+        return rt_container_of(bus->host_bridge, struct rt_pci_host_bridge, parent);
+    }
+
+    return RT_NULL;
+}
+
+rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin)
+{
+    int slot = 0;
+
+    if (!pdev->ari_enabled)
+    {
+        slot = RT_PCI_SLOT(pdev->devfn);
+    }
+
+    return (((pin - 1) + slot) % 4) + 1;
+}
+
+rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp)
+{
+    rt_uint8_t pin = *pinp;
+
+    while (!rt_pci_is_root_bus(pdev->bus))
+    {
+        pin = rt_pci_irq_intx(pdev, pin);
+        pdev = pdev->bus->self;
+    }
+
+    *pinp = pin;
+
+    return RT_PCI_SLOT(pdev->devfn);
+}
+
+rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err = host_bridge->bus_regions_nr == 0 ? -RT_EEMPTY : RT_EOK;
+
+    for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
+    {
+        struct rt_pci_bus_region *region = &host_bridge->bus_regions[i];
+        /*
+         * Avoid allocating PCI resources from address 0 -- this is illegal
+         * according to PCI 2.1 and moreover. Use a reasonable starting value of
+         * 0x1000 instead if the bus start address is below 0x1000.
+         */
+        region->bus_start = rt_max_t(rt_size_t, 0x1000, region->phy_addr);
+
+        LOG_I("Bus %s region(%d):",
+            region->flags == PCI_BUS_REGION_F_MEM ? "Memory" :
+                    (region->flags == PCI_BUS_REGION_F_PREFETCH ? "Prefetchable Mem" :
+                            (region->flags == PCI_BUS_REGION_F_IO ? "I/O" : "Unknown")), i);
+        LOG_I("  cpu:      [%p, %p]", region->cpu_addr, (region->cpu_addr + region->size - 1));
+        LOG_I("  physical: [%p, %p]", region->phy_addr, (region->phy_addr + region->size - 1));
+    }
+
+    return err;
+}
+
+struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
+        void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64)
+{
+    struct rt_pci_bus_region *bus_region, *region = RT_NULL;
+
+    bus_region = &host_bridge->bus_regions[0];
+
+    for (int i = 0; i < host_bridge->bus_regions_nr; ++i, ++bus_region)
+    {
+        if (bus_region->flags == flags && bus_region->size > 0)
+        {
+            void *addr;
+
+            region = bus_region;
+            addr = (void *)(((region->bus_start - 1) | (size - 1)) + 1);
+
+            if ((rt_uint64_t)addr - region->phy_addr + size <= region->size)
+            {
+                rt_bool_t addr64 = !!rt_upper_32_bits((rt_ubase_t)addr);
+
+                if (mem64)
+                {
+                    if (!addr64)
+                    {
+                        region = RT_NULL;
+
+                        /* Try again */
+                        continue;
+                    }
+                }
+                else if (addr64)
+                {
+                    region = RT_NULL;
+
+                    /* Try again */
+                    continue;
+                }
+
+                region->bus_start = ((rt_uint64_t)addr + size);
+                *out_addr = addr;
+            }
+
+            break;
+        }
+    }
+
+    if (!region && mem64)
+    {
+        /* Retry */
+        region = rt_pci_region_alloc(host_bridge, out_addr, size, flags, RT_FALSE);
+    }
+
+    return region;
+}
+
+rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
+        struct rt_pci_device *pdev)
+{
+    rt_err_t err = RT_EOK;
+    rt_size_t size;
+    rt_ubase_t addr = 0;
+    rt_uint32_t cfg;
+    rt_size_t bars_nr;
+    rt_uint8_t hdr_type;
+    rt_bool_t prefetch = RT_FALSE;
+    rt_uint16_t class, command = 0;
+
+    for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
+    {
+        if (host_bridge->bus_regions[i].flags == PCI_BUS_REGION_F_PREFETCH)
+        {
+            prefetch = RT_TRUE;
+            break;
+        }
+    }
+
+    rt_pci_read_config_u16(pdev, PCIR_COMMAND, &command);
+    command = (command & ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) | PCIM_CMD_BUSMASTEREN;
+    rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &hdr_type);
+
+    if (pdev->hdr_type != hdr_type)
+    {
+        LOG_W("%s may not initialized", rt_dm_dev_get_name(&pdev->parent));
+    }
+
+    switch (hdr_type)
+    {
+    case PCIM_HDRTYPE_NORMAL:
+        bars_nr = PCI_STD_NUM_BARS;
+        break;
+
+    case PCIM_HDRTYPE_BRIDGE:
+        bars_nr = 2;
+        break;
+
+    case PCIM_HDRTYPE_CARDBUS:
+        bars_nr = 0;
+        break;
+
+    default:
+        bars_nr = 0;
+        break;
+    }
+
+    for (int i = 0; i < bars_nr; ++i)
+    {
+        rt_ubase_t flags;
+        rt_ubase_t bar_base;
+        rt_bool_t mem64 = RT_FALSE;
+        struct rt_pci_bus_region *region;
+
+        cfg = 0;
+        bar_base = PCIR_BAR(i);
+
+        rt_pci_write_config_u32(pdev, bar_base, RT_UINT32_MAX);
+        rt_pci_read_config_u32(pdev, bar_base, &cfg);
+
+        if (!cfg)
+        {
+            continue;
+        }
+        else if (cfg == RT_UINT32_MAX)
+        {
+            rt_pci_write_config_u32(pdev, bar_base, 0UL);
+            continue;
+        }
+
+        if (cfg & PCIM_BAR_SPACE)
+        {
+            mem64 = RT_FALSE;
+            flags = PCI_BUS_REGION_F_IO;
+
+            size = cfg & PCIM_BAR_IO_MASK;
+            size &= ~(size - 1);
+        }
+        else
+        {
+            /* memory */
+            if ((cfg & PCIM_BAR_MEM_TYPE_MASK) == PCIM_BAR_MEM_TYPE_64)
+            {
+                /* 64bits */
+                rt_uint32_t cfg64;
+                rt_uint64_t bar64;
+
+                mem64 = RT_TRUE;
+
+                rt_pci_write_config_u32(pdev, bar_base + sizeof(rt_uint32_t), RT_UINT32_MAX);
+                rt_pci_read_config_u32(pdev, bar_base + sizeof(rt_uint32_t), &cfg64);
+
+                bar64 = ((rt_uint64_t)cfg64 << 32) | cfg;
+
+                size = ~(bar64 & PCIM_BAR_MEM_MASK) + 1;
+            }
+            else
+            {
+                /* 32bits */
+                mem64 = RT_FALSE;
+                size = (rt_uint32_t)(~(cfg & PCIM_BAR_MEM_MASK) + 1);
+            }
+
+            if (prefetch && (cfg & PCIM_BAR_MEM_PREFETCH))
+            {
+                flags = PCI_BUS_REGION_F_PREFETCH;
+            }
+            else
+            {
+                flags = PCI_BUS_REGION_F_MEM;
+            }
+        }
+
+        region = rt_pci_region_alloc(host_bridge, (void **)&addr, size, flags, mem64);
+
+        if (region)
+        {
+            rt_pci_write_config_u32(pdev, bar_base, addr);
+
+            if (mem64)
+            {
+                bar_base += sizeof(rt_uint32_t);
+            #ifdef RT_PCI_SYS_64BIT
+                rt_pci_write_config_u32(pdev, bar_base, (rt_uint32_t)(addr >> 32));
+            #else
+                /*
+                 * If we are a 64-bit decoder then increment to the upper 32 bits
+                 * of the bar and force it to locate in the lower 4GB of memory.
+                 */
+                rt_pci_write_config_u32(pdev, bar_base, 0UL);
+            #endif
+            }
+
+            pdev->resource[i].size = size;
+            pdev->resource[i].base = region->cpu_addr + (addr - region->phy_addr);
+            pdev->resource[i].flags = flags;
+
+            if (mem64)
+            {
+                ++i;
+                pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
+            }
+        }
+        else
+        {
+            err = -RT_ERROR;
+            LOG_W("%s alloc bar(%d) address fail", rt_dm_dev_get_name(&pdev->parent), i);
+        }
+
+        command |= (cfg & PCIM_BAR_SPACE) ? PCIM_CMD_PORTEN : PCIM_CMD_MEMEN;
+    }
+
+    if (hdr_type == PCIM_HDRTYPE_NORMAL || hdr_type == PCIM_HDRTYPE_BRIDGE)
+    {
+        int rom_addr = (hdr_type == PCIM_HDRTYPE_NORMAL) ? PCIR_BIOS : PCIR_BIOS_1;
+
+        rt_pci_write_config_u32(pdev, rom_addr, 0xfffffffe);
+        rt_pci_read_config_u32(pdev, rom_addr, &cfg);
+
+        if (cfg)
+        {
+            size = -(cfg & ~1);
+
+            if (rt_pci_region_alloc(host_bridge, (void **)&addr, size, PCI_BUS_REGION_F_MEM, RT_FALSE))
+            {
+                rt_pci_write_config_u32(pdev, rom_addr, addr);
+            }
+            command |= PCIM_CMD_MEMEN;
+        }
+    }
+
+    rt_pci_read_config_u16(pdev, PCIR_SUBCLASS, &class);
+
+    if (class == PCIS_DISPLAY_VGA)
+    {
+        command |= PCIM_CMD_PORTEN;
+    }
+
+    rt_pci_write_config_u16(pdev, PCIR_COMMAND, command);
+    rt_pci_write_config_u8(pdev, PCIR_CACHELNSZ, RT_PCI_CACHE_LINE_SIZE);
+    rt_pci_write_config_u8(pdev, PCIR_LATTIMER, 0x80);
+
+    return err;
+}
+
+void rt_pci_enum_device(struct rt_pci_bus *bus,
+        rt_bool_t (callback(struct rt_pci_device *, void *)), void *data)
+{
+    rt_bool_t is_end = RT_FALSE;
+    struct rt_spinlock *lock;
+    struct rt_pci_bus *parent;
+    struct rt_pci_device *pdev, *last_pdev = RT_NULL;
+
+    /* Walk tree */
+    while (bus && !is_end)
+    {
+        /* Goto bottom */
+        for (;;)
+        {
+            lock = &bus->lock;
+
+            spin_lock(lock);
+            if (rt_list_isempty(&bus->children_nodes))
+            {
+                parent = bus->parent;
+                break;
+            }
+            bus = rt_list_entry(&bus->children_nodes, struct rt_pci_bus, list);
+            spin_unlock(lock);
+        }
+
+        rt_list_for_each_entry(pdev, &bus->devices_nodes, list)
+        {
+            if (last_pdev)
+            {
+                spin_unlock(lock);
+
+                if (callback(last_pdev, data))
+                {
+                    spin_lock(lock);
+                    --last_pdev->parent.ref_count;
+
+                    is_end = RT_TRUE;
+                    break;
+                }
+
+                spin_lock(lock);
+                --last_pdev->parent.ref_count;
+            }
+            ++pdev->parent.ref_count;
+            last_pdev = pdev;
+        }
+
+        if (!is_end && last_pdev)
+        {
+            spin_unlock(lock);
+
+            if (callback(last_pdev, data))
+            {
+                is_end = RT_TRUE;
+            }
+
+            spin_lock(lock);
+            --last_pdev->parent.ref_count;
+        }
+        last_pdev = RT_NULL;
+        spin_unlock(lock);
+
+        /* Up a level or goto next */
+        while (!is_end)
+        {
+            lock = &bus->lock;
+
+            if (!parent)
+            {
+                /* Root bus, is end */
+                bus = RT_NULL;
+                break;
+            }
+
+            spin_lock(lock);
+            if (bus->list.next != &parent->children_nodes)
+            {
+                /* Has next sibling */
+                bus = rt_list_entry(bus->list.next, struct rt_pci_bus, list);
+                spin_unlock(lock);
+                break;
+            }
+
+            /* All device on this buss' parent */
+            rt_list_for_each_entry(pdev, &parent->devices_nodes, list)
+            {
+                if (last_pdev)
+                {
+                    spin_unlock(lock);
+
+                    if (callback(last_pdev, data))
+                    {
+                        spin_lock(lock);
+                        --last_pdev->parent.ref_count;
+
+                        is_end = RT_TRUE;
+                        break;
+                    }
+
+                    spin_lock(lock);
+                    --last_pdev->parent.ref_count;
+                }
+                ++pdev->parent.ref_count;
+                last_pdev = pdev;
+            }
+
+            if (!is_end && last_pdev)
+            {
+                spin_unlock(lock);
+
+                if (callback(last_pdev, data))
+                {
+                    is_end = RT_TRUE;
+                }
+
+                spin_lock(lock);
+                --last_pdev->parent.ref_count;
+            }
+            last_pdev = RT_NULL;
+
+            bus = parent;
+            parent = parent->parent;
+            spin_unlock(lock);
+        }
+    }
+}
+
+const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
+        const struct rt_pci_device_id *id)
+{
+    if ((id->vendor == PCI_ANY_ID || id->vendor == pdev->vendor) &&
+        (id->device == PCI_ANY_ID || id->device == pdev->device) &&
+        (id->subsystem_vendor == PCI_ANY_ID || id->subsystem_vendor == pdev->subsystem_vendor) &&
+        (id->subsystem_device == PCI_ANY_ID || id->subsystem_device == pdev->subsystem_device) &&
+        !((id->class ^ pdev->class) & id->class_mask))
+    {
+        return id;
+    }
+
+    return RT_NULL;
+}
+
+const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
+        const struct rt_pci_device_id *ids)
+{
+    while (ids->vendor || ids->subsystem_vendor || ids->class_mask)
+    {
+        if (rt_pci_match_id(pdev, ids))
+        {
+            return ids;
+        }
+
+        ++ids;
+    }
+
+    return RT_NULL;
+}
+
+static struct rt_bus pci_bus;
+
+rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv)
+{
+    RT_ASSERT(pdrv != RT_NULL);
+
+    pdrv->parent.bus = &pci_bus;
+#if RT_NAME_MAX > 0
+    rt_strcpy(pdrv->parent.parent.name, pdrv->name);
+#else
+    pdrv->parent.parent.name = pdrv->name;
+#endif
+
+    return rt_driver_register(&pdrv->parent);
+}
+
+rt_err_t rt_pci_device_register(struct rt_pci_device *pdev)
+{
+    rt_err_t err;
+    RT_ASSERT(pdev != RT_NULL);
+
+    if ((err = rt_bus_add_device(&pci_bus, &pdev->parent)))
+    {
+        return err;
+    }
+
+    return RT_EOK;
+}
+
+static rt_bool_t pci_match(rt_driver_t drv, rt_device_t dev)
+{
+    rt_bool_t match = RT_FALSE;
+    struct rt_pci_driver *pdrv = rt_container_of(drv, struct rt_pci_driver, parent);
+    struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
+
+    if (pdrv->name && pdev->name)
+    {
+        match = rt_strcmp(pdrv->name, pdev->name) ? RT_FALSE : RT_TRUE;
+    }
+
+    if (!match)
+    {
+        pdev->id = rt_pci_match_ids(pdev, pdrv->ids);
+
+        match = pdev->id ? RT_TRUE : RT_FALSE;
+    }
+
+    return match;
+}
+
+static rt_err_t pci_probe(rt_device_t dev)
+{
+    rt_err_t err = RT_EOK;
+    struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
+    struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
+
+    rt_pci_assign_irq(pdev);
+    rt_pci_enable_wake(pdev, RT_PCI_D0, RT_TRUE);
+
+    err = pdrv->probe(pdev);
+
+    if (err)
+    {
+        rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
+    }
+
+    return err;
+}
+
+static rt_err_t pci_remove(rt_device_t dev)
+{
+    rt_err_t err = RT_EOK;
+    struct rt_pci_bus *bus;
+    struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
+    struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
+
+    if (pdrv && pdrv->remove)
+    {
+        if ((err = pdrv->remove(pdev)))
+        {
+            return err;
+        }
+    }
+
+    rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
+
+    bus = pdev->bus;
+    rt_pci_device_remove(pdev);
+    /* Just try to remove */
+    rt_pci_bus_remove(bus);
+
+    return err;
+}
+
+static rt_err_t pci_shutdown(rt_device_t dev)
+{
+    struct rt_pci_bus *bus;
+    struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
+    struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
+
+    if (pdrv && pdrv->shutdown)
+    {
+        pdrv->shutdown(pdev);
+    }
+
+    rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
+
+    bus = pdev->bus;
+    rt_pci_device_remove(pdev);
+    /* Just try to remove */
+    rt_pci_bus_remove(bus);
+
+    return RT_EOK;
+}
+
+static struct rt_bus pci_bus =
+{
+    .name = "pci",
+    .match = pci_match,
+    .probe = pci_probe,
+    .remove = pci_remove,
+    .shutdown = pci_shutdown,
+};
+
+static int pci_bus_init(void)
+{
+    rt_bus_register(&pci_bus);
+
+    return 0;
+}
+INIT_CORE_EXPORT(pci_bus_init);

+ 272 - 0
components/drivers/pci/pci_ids.h

@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef __PCI_IDS_H__
+#define __PCI_IDS_H__
+
+#define PCI_VENDOR_ID_LOONGSON              0x0014
+#define PCI_VENDOR_ID_TTTECH                0x0357
+#define PCI_VENDOR_ID_DYNALINK              0x0675
+#define PCI_VENDOR_ID_UBIQUITI              0x0777
+#define PCI_VENDOR_ID_BERKOM                0x0871
+#define PCI_VENDOR_ID_COMPAQ                0x0e11
+#define PCI_VENDOR_ID_NCR                   0x1000
+#define PCI_VENDOR_ID_ATI                   0x1002
+#define PCI_VENDOR_ID_VLSI                  0x1004
+#define PCI_VENDOR_ID_ADL                   0x1005
+#define PCI_VENDOR_ID_NS                    0x100b
+#define PCI_VENDOR_ID_TSENG                 0x100c
+#define PCI_VENDOR_ID_WEITEK                0x100e
+#define PCI_VENDOR_ID_DEC                   0x1011
+#define PCI_VENDOR_ID_CIRRUS                0x1013
+#define PCI_VENDOR_ID_IBM                   0x1014
+#define PCI_VENDOR_ID_UNISYS                0x1018
+#define PCI_VENDOR_ID_COMPEX2               0x101a
+#define PCI_VENDOR_ID_WD                    0x101c
+#define PCI_VENDOR_ID_AMI                   0x101e
+#define PCI_VENDOR_ID_AMD                   0x1022
+#define PCI_VENDOR_ID_TRIDENT               0x1023
+#define PCI_VENDOR_ID_AI                    0x1025
+#define PCI_VENDOR_ID_DELL                  0x1028
+#define PCI_VENDOR_ID_MATROX                0x102b
+#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS  0x14f2
+#define PCI_VENDOR_ID_CT                    0x102c
+#define PCI_VENDOR_ID_MIRO                  0x1031
+#define PCI_VENDOR_ID_NEC                   0x1033
+#define PCI_VENDOR_ID_FD                    0x1036
+#define PCI_VENDOR_ID_SI                    0x1039
+#define PCI_VENDOR_ID_HP                    0x103c
+#define PCI_VENDOR_ID_PCTECH                0x1042
+#define PCI_VENDOR_ID_ASUSTEK               0x1043
+#define PCI_VENDOR_ID_DPT                   0x1044
+#define PCI_VENDOR_ID_OPTI                  0x1045
+#define PCI_VENDOR_ID_ELSA                  0x1048
+#define PCI_VENDOR_ID_STMICRO               0x104a
+#define PCI_VENDOR_ID_BUSLOGIC              0x104b
+#define PCI_VENDOR_ID_TI                    0x104c
+#define PCI_VENDOR_ID_SONY                  0x104d
+#define PCI_VENDOR_ID_ANIGMA                0x1051
+#define PCI_VENDOR_ID_EFAR                  0x1055
+#define PCI_VENDOR_ID_MOTOROLA              0x1057
+#define PCI_VENDOR_ID_PROMISE               0x105a
+#define PCI_VENDOR_ID_FOXCONN               0x105b
+#define PCI_VENDOR_ID_UMC                   0x1060
+#define PCI_VENDOR_ID_PICOPOWER             0x1066
+#define PCI_VENDOR_ID_MYLEX                 0x1069
+#define PCI_VENDOR_ID_APPLE                 0x106b
+#define PCI_VENDOR_ID_YAMAHA                0x1073
+#define PCI_VENDOR_ID_QLOGIC                0x1077
+#define PCI_VENDOR_ID_CYRIX                 0x1078
+#define PCI_VENDOR_ID_CONTAQ                0x1080
+#define PCI_VENDOR_ID_OLICOM                0x108d
+#define PCI_VENDOR_ID_SUN                   0x108e
+#define PCI_VENDOR_ID_NI                    0x1093
+#define PCI_VENDOR_ID_CMD                   0x1095
+#define PCI_VENDOR_ID_BROOKTREE             0x109e
+#define PCI_VENDOR_ID_SGI                   0x10a9
+#define PCI_VENDOR_ID_WINBOND               0x10ad
+#define PCI_VENDOR_ID_PLX                   0x10b5
+#define PCI_VENDOR_ID_MADGE                 0x10b6
+#define PCI_VENDOR_ID_3COM                  0x10b7
+#define PCI_VENDOR_ID_AL                    0x10b9
+#define PCI_VENDOR_ID_NEOMAGIC              0x10c8
+#define PCI_VENDOR_ID_TCONRAD               0x10da
+#define PCI_VENDOR_ID_ROHM                  0x10db
+#define PCI_VENDOR_ID_NVIDIA                0x10de
+#define PCI_VENDOR_ID_IMS                   0x10e0
+#define PCI_VENDOR_ID_AMCC                  0x10e8
+#define PCI_VENDOR_ID_INTERG                0x10ea
+#define PCI_VENDOR_ID_REALTEK               0x10ec
+#define PCI_VENDOR_ID_XILINX                0x10ee
+#define PCI_VENDOR_ID_INIT                  0x1101
+#define PCI_VENDOR_ID_CREATIVE              0x1102
+#define PCI_VENDOR_ID_ECTIVA                PCI_VENDOR_ID_CREATIVE
+#define PCI_VENDOR_ID_TTI                   0x1103
+#define PCI_VENDOR_ID_SIGMA                 0x1105
+#define PCI_VENDOR_ID_VIA                   0x1106
+#define PCI_VENDOR_ID_SIEMENS               0x110a
+#define PCI_VENDOR_ID_VORTEX                0x1119
+#define PCI_VENDOR_ID_EF                    0x111a
+#define PCI_VENDOR_ID_IDT                   0x111d
+#define PCI_VENDOR_ID_FORE                  0x1127
+#define PCI_VENDOR_ID_PHILIPS               0x1131
+#define PCI_VENDOR_ID_EICON                 0x1133
+#define PCI_VENDOR_ID_CISCO                 0x1137
+#define PCI_VENDOR_ID_ZIATECH               0x1138
+#define PCI_VENDOR_ID_SYSKONNECT            0x1148
+#define PCI_VENDOR_ID_DIGI                  0x114f
+#define PCI_VENDOR_ID_XIRCOM                0x115d
+#define PCI_VENDOR_ID_SERVERWORKS           0x1166
+#define PCI_VENDOR_ID_ALTERA                0x1172
+#define PCI_VENDOR_ID_SBE                   0x1176
+#define PCI_VENDOR_ID_TOSHIBA               0x1179
+#define PCI_VENDOR_ID_TOSHIBA_2             0x102f
+#define PCI_VENDOR_ID_ATTO                  0x117c
+#define PCI_VENDOR_ID_RICOH                 0x1180
+#define PCI_VENDOR_ID_DLINK                 0x1186
+#define PCI_VENDOR_ID_ARTOP                 0x1191
+#define PCI_VENDOR_ID_ZEITNET               0x1193
+#define PCI_VENDOR_ID_FUJITSU_ME            0x119e
+#define PCI_VENDOR_ID_MARVELL               0x11ab
+#define PCI_VENDOR_ID_V3                    0x11b0
+#define PCI_VENDOR_ID_ATT                   0x11c1
+#define PCI_VENDOR_ID_SPECIALIX             0x11cb
+#define PCI_VENDOR_ID_ANALOG_DEVICES        0x11d4
+#define PCI_VENDOR_ID_ZORAN                 0x11de
+#define PCI_VENDOR_ID_COMPEX                0x11f6
+#define PCI_VENDOR_ID_PMC_Sierra            0x11f8
+#define PCI_VENDOR_ID_RP                    0x11fe
+#define PCI_VENDOR_ID_CYCLADES              0x120e
+#define PCI_VENDOR_ID_ESSENTIAL             0x120f
+#define PCI_VENDOR_ID_O2                    0x1217
+#define PCI_VENDOR_ID_3DFX                  0x121a
+#define PCI_VENDOR_ID_QEMU                  0x1234
+#define PCI_VENDOR_ID_AVM                   0x1244
+#define PCI_VENDOR_ID_STALLION              0x124d
+#define PCI_VENDOR_ID_ESS                   0x125d
+#define PCI_VENDOR_ID_SATSAGEM              0x1267
+#define PCI_VENDOR_ID_ENSONIQ               0x1274
+#define PCI_VENDOR_ID_TRANSMETA             0x1279
+#define PCI_VENDOR_ID_ROCKWELL              0x127a
+#define PCI_VENDOR_ID_ITE                   0x1283
+#define PCI_VENDOR_ID_ALTEON                0x12ae
+#define PCI_VENDOR_ID_NVIDIA_SGS            0x12d2
+#define PCI_VENDOR_ID_PERICOM               0x12d8
+#define PCI_VENDOR_ID_AUREAL                0x12eb
+#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH  0x12f8
+#define PCI_VENDOR_ID_ESDGMBH               0x12fe
+#define PCI_VENDOR_ID_CB                    0x1307
+#define PCI_VENDOR_ID_SIIG                  0x131f
+#define PCI_VENDOR_ID_RADISYS               0x1331
+#define PCI_VENDOR_ID_MICRO_MEMORY          0x1332
+#define PCI_VENDOR_ID_DOMEX                 0x134a
+#define PCI_VENDOR_ID_INTASHIELD            0x135a
+#define PCI_VENDOR_ID_QUATECH               0x135c
+#define PCI_VENDOR_ID_SEALEVEL              0x135e
+#define PCI_VENDOR_ID_HYPERCOPE             0x1365
+#define PCI_VENDOR_ID_DIGIGRAM              0x1369
+#define PCI_VENDOR_ID_KAWASAKI              0x136b
+#define PCI_VENDOR_ID_CNET                  0x1371
+#define PCI_VENDOR_ID_LMC                   0x1376
+#define PCI_VENDOR_ID_NETGEAR               0x1385
+#define PCI_VENDOR_ID_APPLICOM              0x1389
+#define PCI_VENDOR_ID_MOXA                  0x1393
+#define PCI_VENDOR_ID_CCD                   0x1397
+#define PCI_VENDOR_ID_EXAR                  0x13a8
+#define PCI_VENDOR_ID_MICROGATE             0x13c0
+#define PCI_VENDOR_ID_3WARE                 0x13c1
+#define PCI_VENDOR_ID_IOMEGA                0x13ca
+#define PCI_VENDOR_ID_ABOCOM                0x13d1
+#define PCI_VENDOR_ID_SUNDANCE              0x13f0
+#define PCI_VENDOR_ID_CMEDIA                0x13f6
+#define PCI_VENDOR_ID_ADVANTECH             0x13fe
+#define PCI_VENDOR_ID_MEILHAUS              0x1402
+#define PCI_VENDOR_ID_LAVA                  0x1407
+#define PCI_VENDOR_ID_TIMEDIA               0x1409
+#define PCI_VENDOR_ID_ICE                   0x1412
+#define PCI_VENDOR_ID_MICROSOFT             0x1414
+#define PCI_VENDOR_ID_OXSEMI                0x1415
+#define PCI_VENDOR_ID_CHELSIO               0x1425
+#define PCI_VENDOR_ID_ADLINK                0x144a
+#define PCI_VENDOR_ID_SAMSUNG               0x144d
+#define PCI_VENDOR_ID_GIGABYTE              0x1458
+#define PCI_VENDOR_ID_AMBIT                 0x1468
+#define PCI_VENDOR_ID_MYRICOM               0x14c1
+#define PCI_VENDOR_ID_MEDIATEK              0x14c3
+#define PCI_VENDOR_ID_TITAN                 0x14d2
+#define PCI_VENDOR_ID_PANACOM               0x14d4
+#define PCI_VENDOR_ID_SIPACKETS             0x14d9
+#define PCI_VENDOR_ID_AFAVLAB               0x14db
+#define PCI_VENDOR_ID_AMPLICON              0x14dc
+#define PCI_VENDOR_ID_BCM_GVC               0x14a4
+#define PCI_VENDOR_ID_TOPIC                 0x151f
+#define PCI_VENDOR_ID_MAINPINE              0x1522
+#define PCI_VENDOR_ID_SYBA                  0x1592
+#define PCI_VENDOR_ID_MORETON               0x15aa
+#define PCI_VENDOR_ID_VMWARE                0x15ad
+#define PCI_VENDOR_ID_ZOLTRIX               0x15b0
+#define PCI_VENDOR_ID_MELLANOX              0x15b3
+#define PCI_VENDOR_ID_DFI                   0x15bd
+#define PCI_VENDOR_ID_QUICKNET              0x15e2
+#define PCI_VENDOR_ID_PDC                   0x15e9
+#define PCI_VENDOR_ID_FARSITE               0x1619
+#define PCI_VENDOR_ID_ARIMA                 0x161f
+#define PCI_VENDOR_ID_BROCADE               0x1657
+#define PCI_VENDOR_ID_SIBYTE                0x166d
+#define PCI_VENDOR_ID_ATHEROS               0x168c
+#define PCI_VENDOR_ID_NETCELL               0x169c
+#define PCI_VENDOR_ID_CENATEK               0x16ca
+#define PCI_VENDOR_ID_SYNOPSYS              0x16c3
+#define PCI_VENDOR_ID_USR                   0x16ec
+#define PCI_VENDOR_ID_VITESSE               0x1725
+#define PCI_VENDOR_ID_LINKSYS               0x1737
+#define PCI_VENDOR_ID_ALTIMA                0x173b
+#define PCI_VENDOR_ID_CAVIUM                0x177d
+#define PCI_VENDOR_ID_TECHWELL              0x1797
+#define PCI_VENDOR_ID_BELKIN                0x1799
+#define PCI_VENDOR_ID_RDC                   0x17f3
+#define PCI_VENDOR_ID_GLI                   0x17a0
+#define PCI_VENDOR_ID_LENOVO                0x17aa
+#define PCI_VENDOR_ID_QCOM                  0x17cb
+#define PCI_VENDOR_ID_CDNS                  0x17cd
+#define PCI_VENDOR_ID_ARECA                 0x17d3
+#define PCI_VENDOR_ID_S2IO                  0x17d5
+#define PCI_VENDOR_ID_SITECOM               0x182d
+#define PCI_VENDOR_ID_TOPSPIN               0x1867
+#define PCI_VENDOR_ID_COMMTECH              0x18f7
+#define PCI_VENDOR_ID_SILAN                 0x1904
+#define PCI_VENDOR_ID_RENESAS               0x1912
+#define PCI_VENDOR_ID_SOLARFLARE            0x1924
+#define PCI_VENDOR_ID_TDI                   0x192e
+#define PCI_VENDOR_ID_FREESCALE             0x1957
+#define PCI_VENDOR_ID_NXP                   PCI_VENDOR_ID_FREESCALE
+#define PCI_VENDOR_ID_PASEMI                0x1959
+#define PCI_VENDOR_ID_ATTANSIC              0x1969
+#define PCI_VENDOR_ID_JMICRON               0x197b
+#define PCI_VENDOR_ID_KORENIX               0x1982
+#define PCI_VENDOR_ID_HUAWEI                0x19e5
+#define PCI_VENDOR_ID_NETRONOME             0x19ee
+#define PCI_VENDOR_ID_QMI                   0x1a32
+#define PCI_VENDOR_ID_AZWAVE                0x1a3b
+#define PCI_VENDOR_ID_REDHAT_QUMRANET       0x1af4
+#define PCI_VENDOR_ID_ASMEDIA               0x1b21
+#define PCI_VENDOR_ID_REDHAT                0x1b36
+#define PCI_VENDOR_ID_SILICOM_DENMARK       0x1c2c
+#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
+#define PCI_VENDOR_ID_CIRCUITCO             0x1cc8
+#define PCI_VENDOR_ID_AMAZON                0x1d0f
+#define PCI_VENDOR_ID_ZHAOXIN               0x1d17
+#define PCI_VENDOR_ID_HYGON                 0x1d94
+#define PCI_VENDOR_ID_FUNGIBLE              0x1dad
+#define PCI_VENDOR_ID_HXT                   0x1dbf
+#define PCI_VENDOR_ID_TEKRAM                0x1de1
+#define PCI_VENDOR_ID_TEHUTI                0x1fc9
+#define PCI_VENDOR_ID_SUNIX                 0x1fd4
+#define PCI_VENDOR_ID_HINT                  0x3388
+#define PCI_VENDOR_ID_3DLABS                0x3d3d
+#define PCI_VENDOR_ID_NETXEN                0x4040
+#define PCI_VENDOR_ID_AKS                   0x416c
+#define PCI_VENDOR_ID_ACCESSIO              0x494f
+#define PCI_VENDOR_ID_S3                    0x5333
+#define PCI_VENDOR_ID_DUNORD                0x5544
+#define PCI_VENDOR_ID_DCI                   0x6666
+#define PCI_VENDOR_ID_INTEL                 0x8086
+#define PCI_VENDOR_ID_SCALEMP               0x8686
+#define PCI_VENDOR_ID_COMPUTONE             0x8e0e
+#define PCI_VENDOR_ID_KTI                   0x8e2e
+#define PCI_VENDOR_ID_ADAPTEC               0x9004
+#define PCI_VENDOR_ID_ADAPTEC2              0x9005
+#define PCI_VENDOR_ID_HOLTEK                0x9412
+#define PCI_VENDOR_ID_NETMOS                0x9710
+#define PCI_VENDOR_ID_3COM_2                0xa727
+#define PCI_VENDOR_ID_DIGIUM                0xd161
+#define PCI_VENDOR_ID_TIGERJET              0xe159
+#define PCI_VENDOR_ID_XILINX_RME            0xea60
+#define PCI_VENDOR_ID_XEN                   0x5853
+#define PCI_VENDOR_ID_OCZ                   0x1b85
+#define PCI_VENDOR_ID_NCUBE                 0x10ff
+
+#endif /* __PCI_IDS_H__ */

+ 1090 - 0
components/drivers/pci/pci_regs.h

@@ -0,0 +1,1090 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef __PCI_REGS_H__
+#define __PCI_REGS_H__
+
+#include <rtdef.h>
+
+/*
+ *  PCI standard defines
+ *  Copyright 1994, Drew Eckhardt
+ *  Copyright 1997--1999 Martin Mares <mj@ucw.cz>
+ *
+ *  For more information, please consult the following manuals (look at
+ *  http://www.pcisig.com/ for how to get them):
+ *
+ *  PCI BIOS Specification
+ *  PCI Local Bus Specification
+ *  PCI to PCI Bridge Specification
+ *  PCI System Design Guide
+ *
+ *  For HyperTransport information, please consult the following manuals
+ *  from http://www.hypertransport.org :
+ *
+ *  The HyperTransport I/O Link Specification
+ *
+ *  Mean of prefix:
+ *
+ *  PCIM_xxx: mask to locate subfield in register
+ *  PCIR_xxx: config register offset
+ *  PCIC_xxx: device class
+ *  PCIS_xxx: device subclass
+ *  PCIP_xxx: device programming interface
+ *  PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
+ *  PCID_xxx: device ID
+ *  PCIY_xxx: capability identification number
+ *  PCIZ_xxx: extended capability identification number
+ */
+
+/* some PCI bus constants */
+#define PCI_DOMAINMAX               65535   /* highest supported domain number */
+#define PCI_BUSMAX                  255     /* highest supported bus number */
+#define PCI_SLOTMAX                 31      /* highest supported slot number */
+#define PCI_FUNCMAX                 7       /* highest supported function number */
+#define PCI_REGMAX                  255     /* highest supported config register addr */
+#define PCIE_REGMAX                 4095    /* highest supported config register addr */
+#define PCI_MAXHDRTYPE              2
+#define PCI_STD_HEADER_SIZEOF       64
+#define PCI_STD_NUM_BARS            6       /* number of standard BARs */
+
+/* PCI config header registers for all devices */
+
+#define PCIR_DEVVENDOR              0x00
+#define PCIR_VENDOR                 0x00
+#define PCIR_DEVICE                 0x02
+#define PCIR_COMMAND                0x04
+#define PCIM_CMD_PORTEN             0x0001
+#define PCIM_CMD_MEMEN              0x0002
+#define PCIM_CMD_BUSMASTEREN        0x0004
+#define PCIM_CMD_SPECIALEN          0x0008
+#define PCIM_CMD_MWRICEN            0x0010
+#define PCIM_CMD_PERRESPEN          0x0040
+#define PCIM_CMD_SERRESPEN          0x0100
+#define PCIM_CMD_BACKTOBACK         0x0200
+#define PCIM_CMD_INTxDIS            0x0400
+#define PCIR_STATUS                 0x06
+#define PCIM_STATUS_INTxSTATE       0x0008
+#define PCIM_STATUS_CAPPRESENT      0x0010
+#define PCIM_STATUS_66CAPABLE       0x0020
+#define PCIM_STATUS_BACKTOBACK      0x0080
+#define PCIM_STATUS_MDPERR          0x0100
+#define PCIM_STATUS_SEL_FAST        0x0000
+#define PCIM_STATUS_SEL_MEDIMUM     0x0200
+#define PCIM_STATUS_SEL_SLOW        0x0400
+#define PCIM_STATUS_SEL_MASK        0x0600
+#define PCIM_STATUS_STABORT         0x0800
+#define PCIM_STATUS_RTABORT         0x1000
+#define PCIM_STATUS_RMABORT         0x2000
+#define PCIM_STATUS_SERR            0x4000
+#define PCIM_STATUS_PERR            0x8000
+#define PCIR_REVID                  0x08
+#define PCIR_PROGIF                 0x09
+#define PCIR_SUBCLASS               0x0a
+#define PCIR_CLASS                  0x0b
+#define PCIR_CACHELNSZ              0x0c
+#define PCIR_LATTIMER               0x0d
+#define PCIR_HDRTYPE                0x0e
+#define PCIM_HDRTYPE                0x7f
+#define PCIM_HDRTYPE_NORMAL         0x00
+#define PCIM_HDRTYPE_BRIDGE         0x01
+#define PCIM_HDRTYPE_CARDBUS        0x02
+#define PCIM_MFDEV                  0x80
+#define PCIR_BIST                   0x0f
+
+/* PCI Spec rev 2.2: 0FFFFh is an invalid value for Vendor ID. */
+#define PCIV_INVALID                0xffff
+
+/* Capability Register Offsets */
+
+#define PCICAP_ID                   0x0
+#define PCICAP_NEXTPTR              0x1
+
+/* Capability Identification Numbers */
+
+#define PCIY_PMG                    0x01    /* PCI Power Management */
+#define PCIY_AGP                    0x02    /* AGP */
+#define PCIY_VPD                    0x03    /* Vital Product Data */
+#define PCIY_SLOTID                 0x04    /* Slot Identification */
+#define PCIY_MSI                    0x05    /* Message Signaled Interrupts */
+#define PCIY_CHSWP                  0x06    /* CompactPCI Hot Swap */
+#define PCIY_PCIX                   0x07    /* PCI-X */
+#define PCIY_HT                     0x08    /* HyperTransport */
+#define PCIY_VENDOR                 0x09    /* Vendor Unique */
+#define PCIY_DEBUG                  0x0a    /* Debug port */
+#define PCIY_CRES                   0x0b    /* CompactPCI central resource control */
+#define PCIY_HOTPLUG                0x0c    /* PCI Hot-Plug */
+#define PCIY_SUBVENDOR              0x0d    /* PCI-PCI bridge subvendor ID */
+#define PCIY_AGP8X                  0x0e    /* AGP 8x */
+#define PCIY_SECDEV                 0x0f    /* Secure Device */
+#define PCIY_EXPRESS                0x10    /* PCI Express */
+#define PCIY_MSIX                   0x11    /* MSI-X */
+#define PCIY_SATA                   0x12    /* SATA */
+#define PCIY_PCIAF                  0x13    /* PCI Advanced Features */
+#define PCIY_EA                     0x14    /* PCI Extended Allocation */
+#define PCIY_FPB                    0x15    /* Flattening Portal Bridge */
+#define PCIY_MAX                    PCIY_FPB
+
+/* Extended Capability Register Fields */
+
+#define PCIR_EXTCAP                 0x100
+#define PCIM_EXTCAP_ID              0x0000ffff
+#define PCIM_EXTCAP_VER             0x000f0000
+#define PCIM_EXTCAP_NEXTPTR         0xfff00000
+#define PCI_EXTCAP_ID(ecap)         ((ecap) & PCIM_EXTCAP_ID)
+#define PCI_EXTCAP_VER(ecap)        (((ecap) & PCIM_EXTCAP_VER) >> 16)
+#define PCI_EXTCAP_NEXTPTR(ecap)    (((ecap) & PCIM_EXTCAP_NEXTPTR) >> 20)
+
+/* Extended Capability Identification Numbers */
+
+#define PCIZ_AER                    0x0001  /* Advanced Error Reporting */
+#define PCIZ_VC                     0x0002  /* Virtual Channel if MFVC Ext Cap not set */
+#define PCIZ_SERNUM                 0x0003  /* Device Serial Number */
+#define PCIZ_PWRBDGT                0x0004  /* Power Budgeting */
+#define PCIZ_RCLINK_DCL             0x0005  /* Root Complex Link Declaration */
+#define PCIZ_RCLINK_CTL             0x0006  /* Root Complex Internal Link Control */
+#define PCIZ_RCEC_ASSOC             0x0007  /* Root Complex Event Collector Association */
+#define PCIZ_MFVC                   0x0008  /* Multi-Function Virtual Channel */
+#define PCIZ_VC2                    0x0009  /* Virtual Channel if MFVC Ext Cap set */
+#define PCIZ_RCRB                   0x000a  /* RCRB Header */
+#define PCIZ_VENDOR                 0x000b  /* Vendor Unique */
+#define PCIZ_CAC                    0x000c  /* Configuration Access Correction -- obsolete */
+#define PCIZ_ACS                    0x000d  /* Access Control Services */
+#define PCIZ_ARI                    0x000e  /* Alternative Routing-ID Interpretation */
+#define PCIZ_ATS                    0x000f  /* Address Translation Services */
+#define PCIZ_SRIOV                  0x0010  /* Single Root IO Virtualization */
+#define PCIZ_MRIOV                  0x0011  /* Multiple Root IO Virtualization */
+#define PCIZ_MULTICAST              0x0012  /* Multicast */
+#define PCIZ_PAGE_REQ               0x0013  /* Page Request */
+#define PCIZ_AMD                    0x0014  /* Reserved for AMD */
+#define PCIZ_RESIZE_BAR             0x0015  /* Resizable BAR */
+#define PCIZ_DPA                    0x0016  /* Dynamic Power Allocation */
+#define PCIZ_TPH_REQ                0x0017  /* TPH Requester */
+#define PCIZ_LTR                    0x0018  /* Latency Tolerance Reporting */
+#define PCIZ_SEC_PCIE               0x0019  /* Secondary PCI Express */
+#define PCIZ_PMUX                   0x001a  /* Protocol Multiplexing */
+#define PCIZ_PASID                  0x001b  /* Process Address Space ID */
+#define PCIZ_LN_REQ                 0x001c  /* LN Requester */
+#define PCIZ_DPC                    0x001d  /* Downstream Port Containment */
+#define PCIZ_L1PM                   0x001e  /* L1 PM Substates */
+#define PCIZ_PTM                    0x001f  /* Precision Time Measurement */
+#define PCIZ_M_PCIE                 0x0020  /* PCIe over M-PHY */
+#define PCIZ_FRS                    0x0021  /* FRS Queuing */
+#define PCIZ_RTR                    0x0022  /* Readiness Time Reporting */
+#define PCIZ_DVSEC                  0x0023  /* Designated Vendor-Specific */
+#define PCIZ_VF_REBAR               0x0024  /* VF Resizable BAR */
+#define PCIZ_DLNK                   0x0025  /* Data Link Feature */
+#define PCIZ_16GT                   0x0026  /* Physical Layer 16.0 GT/s */
+#define PCIZ_LMR                    0x0027  /* Lane Margining at Receiver */
+#define PCIZ_HIER_ID                0x0028  /* Hierarchy ID */
+#define PCIZ_NPEM                   0x0029  /* Native PCIe Enclosure Management */
+#define PCIZ_PL32                   0x002a  /* Physical Layer 32.0 GT/s */
+#define PCIZ_AP                     0x002b  /* Alternate Protocol */
+#define PCIZ_SFI                    0x002c  /* System Firmware Intermediary */
+
+/* Resizable BARs */
+#define PCIM_REBAR_CAP              4           /* Capability register */
+#define PCIM_REBAR_CAP_SIZES        0x00fffff0  /* Supported BAR sizes */
+#define PCIM_REBAR_CTRL             8           /* Control register */
+#define PCIM_REBAR_CTRL_BAR_IDX     0x00000007  /* BAR index */
+#define PCIM_REBAR_CTRL_NBAR_MASK   0x000000e0
+#define PCIM_REBAR_CTRL_NBAR_SHIFT  5           /* Shift for # of BARs */
+#define PCIM_REBAR_CTRL_BAR_SIZE    0x00001f00  /* BAR size */
+#define PCIM_REBAR_CTRL_BAR_SHIFT   8           /* Shift for BAR size */
+
+/* config registers for header type 0 devices */
+
+#define PCIR_BARS                   0x10
+#define PCIR_BAR(x)                 (PCIR_BARS + (x) * 4)
+#define PCI_RID2BAR(rid)            (((rid) - PCIR_BARS) / 4)
+#define PCI_BAR_IO(x)               (((x) & PCIM_BAR_SPACE) == PCIM_BAR_IO_SPACE)
+#define PCI_BAR_MEM(x)              (((x) & PCIM_BAR_SPACE) == PCIM_BAR_MEM_SPACE)
+#define PCIM_BAR_SPACE              0x01    /* 0 = memory, 1 = I/O */
+#define PCIM_BAR_SPACE_IO           0x01
+#define PCIM_BAR_SPACE_MEMORY       0x00
+#define PCIM_BAR_MEM_TYPE_MASK      0x06
+#define PCIM_BAR_MEM_TYPE_32        0x00    /* 32 bit address */
+#define PCIM_BAR_MEM_TYPE_1M        0x02    /* Below 1M [obsolete] */
+#define PCIM_BAR_MEM_TYPE_64        0x04    /* 64 bit address */
+#define PCIM_BAR_MEM_PREFETCH       0x08    /* prefetchable? */
+#define PCIM_BAR_MEM_MASK           (~0x0fUL)
+#define PCIM_BAR_IO_MASK            (~0x03UL)
+#define PCIR_CIS                    0x28
+#define PCIM_CIS_ASI_MASK           0x00000007
+#define PCIM_CIS_ASI_CONFIG         0
+#define PCIM_CIS_ASI_BAR0           1
+#define PCIM_CIS_ASI_BAR1           2
+#define PCIM_CIS_ASI_BAR2           3
+#define PCIM_CIS_ASI_BAR3           4
+#define PCIM_CIS_ASI_BAR4           5
+#define PCIM_CIS_ASI_BAR5           6
+#define PCIM_CIS_ASI_ROM            7
+#define PCIM_CIS_ADDR_MASK          0x0ffffff8
+#define PCIM_CIS_ROM_MASK           0xf0000000
+#define PCIM_CIS_CONFIG_MASK        0xff
+#define PCIR_SUBVEND_0              0x2c
+#define PCIR_SUBDEV_0               0x2e
+#define PCIR_BIOS                   0x30
+#define PCIM_BIOS_ENABLE            0x01
+#define PCIM_BIOS_ADDR_MASK         0xfffff800
+#define PCIR_CAP_PTR                0x34
+#define PCIR_INTLINE                0x3c
+#define PCIR_INTPIN                 0x3d
+#define PCIR_MINGNT                 0x3e
+#define PCIR_MAXLAT                 0x3f
+
+/* config registers for header type 1 (PCI-to-PCI bridge) devices */
+
+#define PCIR_MAX_BAR_1              1
+#define PCIR_SECSTAT_1              0x1e
+
+#define PCIR_PRIBUS_1               0x18
+#define PCIR_SECBUS_1               0x19
+#define PCIR_SUBBUS_1               0x1a
+#define PCIR_SECLAT_1               0x1b
+
+#define PCIR_IOBASEL_1              0x1c
+#define PCIR_IOLIMITL_1             0x1d
+#define PCIR_IOBASEH_1              0x30
+#define PCIR_IOLIMITH_1             0x32
+#define PCIM_BRIO_16                0x0
+#define PCIM_BRIO_32                0x1
+#define PCIM_BRIO_MASK              0xf
+
+#define PCIR_MEMBASE_1              0x20
+#define PCIR_MEMLIMIT_1             0x22
+
+#define PCIR_PMBASEL_1              0x24
+#define PCIR_PMLIMITL_1             0x26
+#define PCIR_PMBASEH_1              0x28
+#define PCIR_PMLIMITH_1             0x2c
+#define PCIM_BRPM_32                0x0
+#define PCIM_BRPM_64                0x1
+#define PCIM_BRPM_MASK              0xf
+
+#define PCIR_BIOS_1                 0x38
+#define PCIR_BRIDGECTL_1            0x3e
+
+#define PCI_PPBMEMBASE(h, l)        ((((rt_uint64_t)(h) << 32) + ((l) << 16)) & ~0xfffff)
+#define PCI_PPBMEMLIMIT(h, l)       ((((rt_uint64_t)(h) << 32) + ((l) << 16)) | 0xfffff)
+#define PCI_PPBIOBASE(h, l)         ((((h) << 16) + ((l) << 8)) & ~0xfff)
+#define PCI_PPBIOLIMIT(h, l)        ((((h) << 16) + ((l) << 8)) | 0xfff)
+
+/* config registers for header t    ype 2 (CardBus) devices */
+
+#define PCIR_MAX_BAR_2              0
+#define PCIR_CAP_PTR_2              0x14
+#define PCIR_SECSTAT_2              0x16
+
+#define PCIR_PRIBUS_2               0x18
+#define PCIR_SECBUS_2               0x19
+#define PCIR_SUBBUS_2               0x1a
+#define PCIR_SECLAT_2               0x1b
+
+#define PCIR_MEMBASE0_2             0x1c
+#define PCIR_MEMLIMIT0_2            0x20
+#define PCIR_MEMBASE1_2             0x24
+#define PCIR_MEMLIMIT1_2            0x28
+#define PCIR_IOBASE0_2              0x2c
+#define PCIR_IOLIMIT0_2             0x30
+#define PCIR_IOBASE1_2              0x34
+#define PCIR_IOLIMIT1_2             0x38
+#define PCIM_CBBIO_16               0x0
+#define PCIM_CBBIO_32               0x1
+#define PCIM_CBBIO_MASK             0x3
+
+#define PCIR_BRIDGECTL_2            0x3e
+
+#define PCIR_SUBVEND_2              0x40
+#define PCIR_SUBDEV_2               0x42
+
+#define PCIR_PCCARDIF_2             0x44
+
+#define PCI_CBBMEMBASE(l)           ((l) & ~0xfffff)
+#define PCI_CBBMEMLIMIT(l)          ((l) | 0xfffff)
+#define PCI_CBBIOBASE(l)            ((l) & ~0x3)
+#define PCI_CBBIOLIMIT(l)           ((l) | 0x3)
+
+/* PCI device class, subclass and programming interface definitions */
+#define PCIC_NOT_DEFINED                0x0000
+#define PCIS_NOT_DEFINED_VGA            0x0001
+
+#define PCIC_STORAGE                    0x01
+#define PCIS_STORAGE_SCSI               0x0100
+#define PCIS_STORAGE_IDE                0x0101
+#define PCIS_STORAGE_FLOPPY             0x0102
+#define PCIS_STORAGE_IPI                0x0103
+#define PCIS_STORAGE_RAID               0x0104
+#define PCIS_STORAGE_SATA               0x0106
+#define PCIS_STORAGE_SATA_AHCI          0x010601
+#define PCIS_STORAGE_SAS                0x0107
+#define PCIS_STORAGE_EXPRESS            0x010802
+#define PCIS_STORAGE_OTHER              0x0180
+
+#define PCIC_NETWORK                    0x02
+#define PCIS_NETWORK_ETHERNET           0x0200
+#define PCIS_NETWORK_TOKEN_RING         0x0201
+#define PCIS_NETWORK_FDDI               0x0202
+#define PCIS_NETWORK_ATM                0x0203
+#define PCIS_NETWORK_OTHER              0x0280
+
+#define PCIC_DISPLAY                    0x03
+#define PCIS_DISPLAY_VGA                0x0300
+#define PCIS_DISPLAY_XGA                0x0301
+#define PCIS_DISPLAY_3D                 0x0302
+#define PCIS_DISPLAY_OTHER              0x0380
+
+#define PCIC_MULTIMEDIA                 0x04
+#define PCIS_MULTIMEDIA_VIDEO           0x0400
+#define PCIS_MULTIMEDIA_AUDIO           0x0401
+#define PCIS_MULTIMEDIA_PHONE           0x0402
+#define PCIS_MULTIMEDIA_HD_AUDIO        0x0403
+#define PCIS_MULTIMEDIA_OTHER           0x0480
+
+#define PCIC_MEMORY                     0x05
+#define PCIS_MEMORY_RAM                 0x0500
+#define PCIS_MEMORY_FLASH               0x0501
+#define PCIS_MEMORY_CXL                 0x0502
+#define PCIS_MEMORY_OTHER               0x0580
+
+#define PCIC_BRIDGE                     0x06
+#define PCIS_BRIDGE_HOST                0x0600
+#define PCIS_BRIDGE_ISA                 0x0601
+#define PCIS_BRIDGE_EISA                0x0602
+#define PCIS_BRIDGE_MC                  0x0603
+#define PCIS_BRIDGE_PCI                 0x0604
+#define PCIS_BRIDGE_PCI_NORMAL          0x060400
+#define PCIS_BRIDGE_PCI_SUBTRACTIVE     0x060401
+#define PCIS_BRIDGE_PCMCIA              0x0605
+#define PCIS_BRIDGE_NUBUS               0x0606
+#define PCIS_BRIDGE_CARDBUS             0x0607
+#define PCIS_BRIDGE_RACEWAY             0x0608
+#define PCIS_BRIDGE_OTHER               0x0680
+
+#define PCIC_COMMUNICATION              0x07
+#define PCIS_COMMUNICATION_SERIAL       0x0700
+#define PCIS_COMMUNICATION_PARALLEL     0x0701
+#define PCIS_COMMUNICATION_MULTISERIAL  0x0702
+#define PCIS_COMMUNICATION_MODEM        0x0703
+#define PCIS_COMMUNICATION_OTHER        0x0780
+
+#define PCIC_SYSTEM                     0x08
+#define PCIS_SYSTEM_PIC                 0x0800
+#define PCIS_SYSTEM_PIC_IOAPIC          0x080010
+#define PCIS_SYSTEM_PIC_IOXAPIC         0x080020
+#define PCIS_SYSTEM_DMA                 0x0801
+#define PCIS_SYSTEM_TIMER               0x0802
+#define PCIS_SYSTEM_RTC                 0x0803
+#define PCIS_SYSTEM_PCI_HOTPLUG         0x0804
+#define PCIS_SYSTEM_SDHCI               0x0805
+#define PCIS_SYSTEM_RCEC                0x0807
+#define PCIS_SYSTEM_OTHER               0x0880
+
+#define PCIC_INPUT                      0x09
+#define PCIS_INPUT_KEYBOARD             0x0900
+#define PCIS_INPUT_PEN                  0x0901
+#define PCIS_INPUT_MOUSE                0x0902
+#define PCIS_INPUT_SCANNER              0x0903
+#define PCIS_INPUT_GAMEPORT             0x0904
+#define PCIS_INPUT_OTHER                0x0980
+
+#define PCIC_DOCKING                    0x0a
+#define PCIS_DOCKING_GENERIC            0x0a00
+#define PCIS_DOCKING_OTHER              0x0a80
+
+#define PCIC_PROCESSOR                  0x0b
+#define PCIS_PROCESSOR_386              0x0b00
+#define PCIS_PROCESSOR_486              0x0b01
+#define PCIS_PROCESSOR_PENTIUM          0x0b02
+#define PCIS_PROCESSOR_ALPHA            0x0b10
+#define PCIS_PROCESSOR_POWERPC          0x0b20
+#define PCIS_PROCESSOR_MIPS             0x0b30
+#define PCIS_PROCESSOR_CO               0x0b40
+
+#define PCIC_SERIAL                     0x0c
+#define PCIS_SERIAL_FIREWIRE            0x0c00
+#define PCIS_SERIAL_FIREWIRE_OHCI       0x0c0010
+#define PCIS_SERIAL_ACCESS              0x0c01
+#define PCIS_SERIAL_SSA                 0x0c02
+#define PCIS_SERIAL_USB                 0x0c03
+#define PCIS_SERIAL_USB_UHCI            0x0c0300
+#define PCIS_SERIAL_USB_OHCI            0x0c0310
+#define PCIS_SERIAL_USB_EHCI            0x0c0320
+#define PCIS_SERIAL_USB_XHCI            0x0c0330
+#define PCIS_SERIAL_USB_DEVICE          0x0c03fe
+#define PCIS_SERIAL_FIBER               0x0c04
+#define PCIS_SERIAL_SMBUS               0x0c05
+#define PCIS_SERIAL_IPMI                0x0c07
+#define PCIS_SERIAL_IPMI_SMIC           0x0c0700
+#define PCIS_SERIAL_IPMI_KCS            0x0c0701
+#define PCIS_SERIAL_IPMI_BT             0x0c0702
+
+#define PCIC_WIRELESS                   0x0d
+#define PCIS_WIRELESS_RF_CONTROLLER     0x0d10
+#define PCIS_WIRELESS_WHCI              0x0d1010
+
+#define PCIC_INTELLIGENT                0x0e
+#define PCIS_INTELLIGENT_I2O            0x0e00
+
+#define PCIC_SATELLITE                  0x0f
+#define PCIS_SATELLITE_TV               0x0f00
+#define PCIS_SATELLITE_AUDIO            0x0f01
+#define PCIS_SATELLITE_VOICE            0x0f03
+#define PCIS_SATELLITE_DATA             0x0f04
+
+#define PCIC_CRYPT                      0x10
+#define PCIS_CRYPT_NETWORK              0x1000
+#define PCIS_CRYPT_ENTERTAINMENT        0x1001
+#define PCIS_CRYPT_OTHER                0x1080
+
+#define PCIC_SIGNAL_PROCESSING          0x11
+#define PCIS_SP_DPIO                    0x1100
+#define PCIS_SP_OTHER                   0x1180
+
+#define PCIS_OTHERS                     0xff
+
+/* Bridge Control Values. */
+#define PCIB_BCR_PERR_ENABLE            0x0001
+#define PCIB_BCR_SERR_ENABLE            0x0002
+#define PCIB_BCR_ISA_ENABLE             0x0004
+#define PCIB_BCR_VGA_ENABLE             0x0008
+#define PCIB_BCR_MASTER_ABORT_MODE      0x0020
+#define PCIB_BCR_SECBUS_RESET           0x0040
+#define PCIB_BCR_SECBUS_BACKTOBACK      0x0080
+#define PCIB_BCR_PRI_DISCARD_TIMEOUT    0x0100
+#define PCIB_BCR_SEC_DISCARD_TIMEOUT    0x0200
+#define PCIB_BCR_DISCARD_TIMER_STATUS   0x0400
+#define PCIB_BCR_DISCARD_TIMER_SERREN   0x0800
+
+#define CBB_BCR_PERR_ENABLE             0x0001
+#define CBB_BCR_SERR_ENABLE             0x0002
+#define CBB_BCR_ISA_ENABLE              0x0004
+#define CBB_BCR_VGA_ENABLE              0x0008
+#define CBB_BCR_MASTER_ABORT_MODE       0x0020
+#define CBB_BCR_CARDBUS_RESET           0x0040
+#define CBB_BCR_IREQ_INT_ENABLE         0x0080
+#define CBB_BCR_PREFETCH_0_ENABLE       0x0100
+#define CBB_BCR_PREFETCH_1_ENABLE       0x0200
+#define CBB_BCR_WRITE_POSTING_ENABLE    0x0400
+
+/* PCI power manangement */
+#define PCIR_POWER_CAP                  0x2
+#define PCIM_PCAP_SPEC                  0x0007
+#define PCIM_PCAP_PMEREQCLK             0x0008
+#define PCIM_PCAP_DEVSPECINIT           0x0020
+#define PCIM_PCAP_AUXPWR_0              0x0000
+#define PCIM_PCAP_AUXPWR_55             0x0040
+#define PCIM_PCAP_AUXPWR_100            0x0080
+#define PCIM_PCAP_AUXPWR_160            0x00c0
+#define PCIM_PCAP_AUXPWR_220            0x0100
+#define PCIM_PCAP_AUXPWR_270            0x0140
+#define PCIM_PCAP_AUXPWR_320            0x0180
+#define PCIM_PCAP_AUXPWR_375            0x01c0
+#define PCIM_PCAP_AUXPWRMASK            0x01c0
+#define PCIM_PCAP_D1SUPP                0x0200
+#define PCIM_PCAP_D2SUPP                0x0400
+#define PCIM_PCAP_PMEMASK               0xf800
+#define PCIM_PCAP_D0PME                 0x0800
+#define PCIM_PCAP_D1PME                 0x1000
+#define PCIM_PCAP_D2PME                 0x2000
+#define PCIM_PCAP_D3PME_HOT             0x4000
+#define PCIM_PCAP_D3PME_COLD            0x8000
+
+#define PCIR_POWER_STATUS               0x4
+#define PCIM_PSTAT_D0                   0x0000
+#define PCIM_PSTAT_D1                   0x0001
+#define PCIM_PSTAT_D2                   0x0002
+#define PCIM_PSTAT_D3                   0x0003
+#define PCIM_PSTAT_DMASK                0x0003
+#define PCIM_PSTAT_NOSOFTRESET          0x0008
+#define PCIM_PSTAT_PMEENABLE            0x0100
+#define PCIM_PSTAT_D0POWER              0x0000
+#define PCIM_PSTAT_D1POWER              0x0200
+#define PCIM_PSTAT_D2POWER              0x0400
+#define PCIM_PSTAT_D3POWER              0x0600
+#define PCIM_PSTAT_D0HEAT               0x0800
+#define PCIM_PSTAT_D1HEAT               0x0a00
+#define PCIM_PSTAT_D2HEAT               0x0c00
+#define PCIM_PSTAT_D3HEAT               0x0e00
+#define PCIM_PSTAT_DATASELMASK          0x1e00
+#define PCIM_PSTAT_DATAUNKN             0x0000
+#define PCIM_PSTAT_DATADIV10            0x2000
+#define PCIM_PSTAT_DATADIV100           0x4000
+#define PCIM_PSTAT_DATADIV1000          0x6000
+#define PCIM_PSTAT_DATADIVMASK          0x6000
+#define PCIM_PSTAT_PME                  0x8000
+
+#define PCIR_POWER_BSE                  0x6
+#define PCIM_PMCSR_BSE_D3B3             0x00
+#define PCIM_PMCSR_BSE_D3B2             0x40
+#define PCIM_PMCSR_BSE_BPCCE            0x80
+
+#define PCIR_POWER_DATA                 0x7
+
+/* VPD capability registers */
+#define PCIR_VPD_ADDR                   0x2
+#define PCIR_VPD_DATA                   0x4
+
+/* PCI Message Signalled Interrupts (MSI) */
+#define PCIR_MSI_CTRL                   0x2
+#define PCIM_MSICTRL_VECTOR             0x0100
+#define PCIM_MSICTRL_64BIT              0x0080
+#define PCIM_MSICTRL_MME_MASK           0x0070
+#define PCIM_MSICTRL_MME_SHIFT          0x4
+#define PCIM_MSICTRL_MME_1              0x0000
+#define PCIM_MSICTRL_MME_2              0x0010
+#define PCIM_MSICTRL_MME_4              0x0020
+#define PCIM_MSICTRL_MME_8              0x0030
+#define PCIM_MSICTRL_MME_16             0x0040
+#define PCIM_MSICTRL_MME_32             0x0050
+#define PCIM_MSICTRL_MMC_MASK           0x000e
+#define PCIM_MSICTRL_MMC_SHIFT          0x1
+#define PCIM_MSICTRL_MMC_1              0x0000
+#define PCIM_MSICTRL_MMC_2              0x0002
+#define PCIM_MSICTRL_MMC_4              0x0004
+#define PCIM_MSICTRL_MMC_8              0x0006
+#define PCIM_MSICTRL_MMC_16             0x0008
+#define PCIM_MSICTRL_MMC_32             0x000a
+#define PCIM_MSICTRL_MSI_ENABLE         0x0001
+#define PCIR_MSI_ADDR                   0x4
+#define PCIR_MSI_ADDR_HIGH              0x8
+#define PCIR_MSI_DATA                   0x8
+#define PCIR_MSI_DATA_64BIT             0xc
+#define PCIR_MSI_MASK                   0xc
+#define PCIR_MSI_MASK_64BIT             0x10
+#define PCIR_MSI_PENDING                0x14
+
+/* PCI Enhanced Allocation registers */
+#define PCIR_EA_NUM_ENT                 2           /* Number of Capability Entries */
+#define PCIM_EA_NUM_ENT_MASK            0x3f        /* Num Entries Mask */
+#define PCIR_EA_FIRST_ENT               4           /* First EA Entry in List */
+#define PCIR_EA_FIRST_ENT_BRIDGE        8           /* First EA Entry for Bridges */
+#define PCIM_EA_ES                      0x00000007  /* Entry Size */
+#define PCIM_EA_BEI                     0x000000f0  /* BAR Equivalent Indicator */
+#define PCIM_EA_BEI_OFFSET              4
+/* 0-5 map to BARs 0-5 respectively */
+#define PCIM_EA_BEI_BAR_0               0
+#define PCIM_EA_BEI_BAR_5               5
+#define PCIM_EA_BEI_BAR(x)              (((x) >> PCIM_EA_BEI_OFFSET) & 0xf)
+#define PCIM_EA_BEI_BRIDGE              0x6 /* Resource behind bridge */
+#define PCIM_EA_BEI_ENI                 0x7 /* Equivalent Not Indicated */
+#define PCIM_EA_BEI_ROM                 0x8 /* Expansion ROM */
+/* 9-14 map to VF BARs 0-5 respectively */
+#define PCIM_EA_BEI_VF_BAR_0            9
+#define PCIM_EA_BEI_VF_BAR_5            14
+#define PCIM_EA_BEI_RESERVED            0xf /* Reserved - Treat like ENI */
+#define PCIM_EA_PP                      0x0000ff00  /* Primary Properties */
+#define PCIM_EA_PP_OFFSET               8
+#define PCIM_EA_SP_OFFSET               16
+#define PCIM_EA_SP                      0x00ff0000  /* Secondary Properties */
+#define PCIM_EA_P_MEM                   0x00    /* Non-Prefetch Memory */
+#define PCIM_EA_P_MEM_PREFETCH          0x01    /* Prefetchable Memory */
+#define PCIM_EA_P_IO                    0x02    /* I/O Space */
+#define PCIM_EA_P_VF_MEM_PREFETCH       0x03    /* VF Prefetchable Memory */
+#define PCIM_EA_P_VF_MEM                0x04    /* VF Non-Prefetch Memory */
+#define PCIM_EA_P_BRIDGE_MEM            0x05    /* Bridge Non-Prefetch Memory */
+#define PCIM_EA_P_BRIDGE_MEM_PREFETCH   0x06    /* Bridge Prefetchable Memory */
+#define PCIM_EA_P_BRIDGE_IO             0x07    /* Bridge I/O Space */
+/* 0x08-0xfc reserved */
+#define PCIM_EA_P_MEM_RESERVED          0xfd    /* Reserved Memory */
+#define PCIM_EA_P_IO_RESERVED           0xfe    /* Reserved I/O Space */
+#define PCIM_EA_P_UNAVAILABLE           0xff    /* Entry Unavailable */
+#define PCIM_EA_WRITABLE                0x40000000  /* Writable: 1 = RW, 0 = HwInit */
+#define PCIM_EA_ENABLE                  0x80000000  /* Enable for this entry */
+#define PCIM_EA_BASE                    4   /* Base Address Offset */
+#define PCIM_EA_MAX_OFFSET              8   /* MaxOffset (resource length) */
+/* bit 0 is reserved */
+#define PCIM_EA_IS_64                   0x00000002  /* 64-bit field flag */
+#define PCIM_EA_FIELD_MASK              0xfffffffc  /* For Base & Max Offset */
+/* Bridge config register */
+#define PCIM_EA_SEC_NR(reg)             ((reg) & 0xff)
+#define PCIM_EA_SUB_NR(reg)             (((reg) >> 8) & 0xff)
+
+/* PCI-X definitions */
+
+/* For header type 0 devices */
+#define PCIXR_COMMAND                       0x2
+#define PCIXM_COMMAND_DPERR_E               0x0001  /* Data Parity Error Recovery */
+#define PCIXM_COMMAND_ERO                   0x0002  /* Enable Relaxed Ordering */
+#define PCIXM_COMMAND_MAX_READ              0x000c  /* Maximum Burst Read Count */
+#define PCIXM_COMMAND_MAX_READ_512          0x0000
+#define PCIXM_COMMAND_MAX_READ_1024         0x0004
+#define PCIXM_COMMAND_MAX_READ_2048         0x0008
+#define PCIXM_COMMAND_MAX_READ_4096         0x000c
+#define PCIXM_COMMAND_MAX_SPLITS            0x0070  /* Maximum Split Transactions */
+#define PCIXM_COMMAND_MAX_SPLITS_1          0x0000
+#define PCIXM_COMMAND_MAX_SPLITS_2          0x0010
+#define PCIXM_COMMAND_MAX_SPLITS_3          0x0020
+#define PCIXM_COMMAND_MAX_SPLITS_4          0x0030
+#define PCIXM_COMMAND_MAX_SPLITS_8          0x0040
+#define PCIXM_COMMAND_MAX_SPLITS_12         0x0050
+#define PCIXM_COMMAND_MAX_SPLITS_16         0x0060
+#define PCIXM_COMMAND_MAX_SPLITS_32         0x0070
+#define PCIXM_COMMAND_VERSION               0x3000
+#define PCIXR_STATUS                        0x4
+#define PCIXM_STATUS_DEVFN                  0x000000ff
+#define PCIXM_STATUS_BUS                    0x0000ff00
+#define PCIXM_STATUS_64BIT                  0x00010000
+#define PCIXM_STATUS_133CAP                 0x00020000
+#define PCIXM_STATUS_SC_DISCARDED           0x00040000
+#define PCIXM_STATUS_UNEXP_SC               0x00080000
+#define PCIXM_STATUS_COMPLEX_DEV            0x00100000
+#define PCIXM_STATUS_MAX_READ               0x00600000
+#define PCIXM_STATUS_MAX_READ_512           0x00000000
+#define PCIXM_STATUS_MAX_READ_1024          0x00200000
+#define PCIXM_STATUS_MAX_READ_2048          0x00400000
+#define PCIXM_STATUS_MAX_READ_4096          0x00600000
+#define PCIXM_STATUS_MAX_SPLITS             0x03800000
+#define PCIXM_STATUS_MAX_SPLITS_1           0x00000000
+#define PCIXM_STATUS_MAX_SPLITS_2           0x00800000
+#define PCIXM_STATUS_MAX_SPLITS_3           0x01000000
+#define PCIXM_STATUS_MAX_SPLITS_4           0x01800000
+#define PCIXM_STATUS_MAX_SPLITS_8           0x02000000
+#define PCIXM_STATUS_MAX_SPLITS_12          0x02800000
+#define PCIXM_STATUS_MAX_SPLITS_16          0x03000000
+#define PCIXM_STATUS_MAX_SPLITS_32          0x03800000
+#define PCIXM_STATUS_MAX_CUM_READ           0x1c000000
+#define PCIXM_STATUS_RCVD_SC_ERR            0x20000000
+#define PCIXM_STATUS_266CAP                 0x40000000
+#define PCIXM_STATUS_533CAP                 0x80000000
+
+/* For header type 1 devices (PCI-X bridges) */
+#define PCIXR_SEC_STATUS                    0x2
+#define PCIXM_SEC_STATUS_64BIT              0x0001
+#define PCIXM_SEC_STATUS_133CAP             0x0002
+#define PCIXM_SEC_STATUS_SC_DISC            0x0004
+#define PCIXM_SEC_STATUS_UNEXP_SC           0x0008
+#define PCIXM_SEC_STATUS_SC_OVERRUN         0x0010
+#define PCIXM_SEC_STATUS_SR_DELAYED         0x0020
+#define PCIXM_SEC_STATUS_BUS_MODE           0x03c0
+#define PCIXM_SEC_STATUS_VERSION            0x3000
+#define PCIXM_SEC_STATUS_266CAP             0x4000
+#define PCIXM_SEC_STATUS_533CAP             0x8000
+#define PCIXR_BRIDGE_STATUS                 0x4
+#define PCIXM_BRIDGE_STATUS_DEVFN           0x000000ff
+#define PCIXM_BRIDGE_STATUS_BUS             0x0000ff00
+#define PCIXM_BRIDGE_STATUS_64BIT           0x00010000
+#define PCIXM_BRIDGE_STATUS_133CAP          0x00020000
+#define PCIXM_BRIDGE_STATUS_SC_DISCARDED    0x00040000
+#define PCIXM_BRIDGE_STATUS_UNEXP_SC        0x00080000
+#define PCIXM_BRIDGE_STATUS_SC_OVERRUN      0x00100000
+#define PCIXM_BRIDGE_STATUS_SR_DELAYED      0x00200000
+#define PCIXM_BRIDGE_STATUS_DEVID_MSGCAP    0x20000000
+#define PCIXM_BRIDGE_STATUS_266CAP          0x40000000
+#define PCIXM_BRIDGE_STATUS_533CAP          0x80000000
+
+/* HT (HyperTransport) Capability definitions */
+#define PCIR_HT_COMMAND                     0x2
+#define PCIM_HTCMD_CAP_MASK                 0xf800  /* Capability type. */
+#define PCIM_HTCAP_SLAVE                    0x0000  /* 000xx */
+#define PCIM_HTCAP_HOST                     0x2000  /* 001xx */
+#define PCIM_HTCAP_SWITCH                   0x4000  /* 01000 */
+#define PCIM_HTCAP_INTERRUPT                0x8000  /* 10000 */
+#define PCIM_HTCAP_REVISION_ID              0x8800  /* 10001 */
+#define PCIM_HTCAP_UNITID_CLUMPING          0x9000  /* 10010 */
+#define PCIM_HTCAP_EXT_CONFIG_SPACE         0x9800  /* 10011 */
+#define PCIM_HTCAP_ADDRESS_MAPPING          0xa000  /* 10100 */
+#define PCIM_HTCAP_MSI_MAPPING              0xa800  /* 10101 */
+#define PCIM_HTCAP_DIRECT_ROUTE             0xb000  /* 10110 */
+#define PCIM_HTCAP_VCSET                    0xb800  /* 10111 */
+#define PCIM_HTCAP_RETRY_MODE               0xc000  /* 11000 */
+#define PCIM_HTCAP_X86_ENCODING             0xc800  /* 11001 */
+#define PCIM_HTCAP_GEN3                     0xd000  /* 11010 */
+#define PCIM_HTCAP_FLE                      0xd800  /* 11011 */
+#define PCIM_HTCAP_PM                       0xe000  /* 11100 */
+#define PCIM_HTCAP_HIGH_NODE_COUNT          0xe800  /* 11101 */
+
+/* HT MSI Mapping Capability definitions. */
+#define PCIM_HTCMD_MSI_ENABLE               0x0001
+#define PCIM_HTCMD_MSI_FIXED                0x0002
+#define PCIR_HTMSI_ADDRESS_LO               0x4
+#define PCIR_HTMSI_ADDRESS_HI               0x8
+
+/* PCI Vendor capability definitions */
+#define PCIR_VENDOR_LENGTH                  0x2
+#define PCIR_VENDOR_DATA                    0x3
+
+/* PCI Device capability definitions */
+#define PCIR_DEVICE_LENGTH                  0x2
+
+/* PCI EHCI Debug Port definitions */
+#define PCIR_DEBUG_PORT                     0x2
+#define PCIM_DEBUG_PORT_OFFSET              0x1fff
+#define PCIM_DEBUG_PORT_BAR                 0xe000
+
+/* PCI-PCI Bridge Subvendor definitions */
+#define PCIR_SUBVENDCAP_ID                  0x4
+#define PCIR_SUBVENDCAP                     0x4
+#define PCIR_SUBDEVCAP                      0x6
+
+/* PCI Express definitions */
+#define PCIER_FLAGS                         0x2
+#define PCIEM_FLAGS_VERSION                 0x000f
+#define PCIEM_FLAGS_TYPE                    0x00f0
+#define PCIEM_TYPE_ENDPOINT                 0x0000
+#define PCIEM_TYPE_LEGACY_ENDPOINT          0x0010
+#define PCIEM_TYPE_ROOT_PORT                0x0040
+#define PCIEM_TYPE_UPSTREAM_PORT            0x0050
+#define PCIEM_TYPE_DOWNSTREAM_PORT          0x0060
+#define PCIEM_TYPE_PCI_BRIDGE               0x0070
+#define PCIEM_TYPE_PCIE_BRIDGE              0x0080
+#define PCIEM_TYPE_ROOT_INT_EP              0x0090
+#define PCIEM_TYPE_ROOT_EC                  0x00a0
+#define PCIEM_FLAGS_SLOT                    0x0100
+#define PCIEM_FLAGS_IRQ                     0x3e00
+#define PCIER_DEVICE_CAP                    0x4
+#define PCIEM_CAP_MAX_PAYLOAD               0x00000007
+#define PCIEM_CAP_PHANTHOM_FUNCS            0x00000018
+#define PCIEM_CAP_EXT_TAG_FIELD             0x00000020
+#define PCIEM_CAP_L0S_LATENCY               0x000001c0
+#define PCIEM_CAP_L1_LATENCY                0x00000e00
+#define PCIEM_CAP_ROLE_ERR_RPT              0x00008000
+#define PCIEM_CAP_SLOT_PWR_LIM_VAL          0x03fc0000
+#define PCIEM_CAP_SLOT_PWR_LIM_SCALE        0x0c000000
+#define PCIEM_CAP_FLR                       0x10000000
+#define PCIER_DEVICE_CTL                    0x8
+#define PCIEM_CTL_COR_ENABLE                0x0001
+#define PCIEM_CTL_NFER_ENABLE               0x0002
+#define PCIEM_CTL_FER_ENABLE                0x0004
+#define PCIEM_CTL_URR_ENABLE                0x0008
+#define PCIEM_CTL_RELAXED_ORD_ENABLE        0x0010
+#define PCIEM_CTL_MAX_PAYLOAD               0x00e0
+#define PCIEM_CTL_EXT_TAG_FIELD             0x0100
+#define PCIEM_CTL_PHANTHOM_FUNCS            0x0200
+#define PCIEM_CTL_AUX_POWER_PM              0x0400
+#define PCIEM_CTL_NOSNOOP_ENABLE            0x0800
+#define PCIEM_CTL_MAX_READ_REQUEST          0x7000
+#define PCIEM_CTL_BRDG_CFG_RETRY            0x8000  /* PCI-E - PCI/PCI-X bridges */
+#define PCIEM_CTL_INITIATE_FLR              0x8000  /* FLR capable endpoints */
+#define PCIER_DEVICE_STA                    0xa
+#define PCIEM_STA_CORRECTABLE_ERROR         0x0001
+#define PCIEM_STA_NON_FATAL_ERROR           0x0002
+#define PCIEM_STA_FATAL_ERROR               0x0004
+#define PCIEM_STA_UNSUPPORTED_REQ           0x0008
+#define PCIEM_STA_AUX_POWER                 0x0010
+#define PCIEM_STA_TRANSACTION_PND           0x0020
+#define PCIER_LINK_CAP                      0xc
+#define PCIEM_LINK_CAP_MAX_SPEED            0x0000000f
+#define PCIEM_LINK_CAP_MAX_WIDTH            0x000003f0
+#define PCIEM_LINK_CAP_ASPM                 0x00000c00
+#define PCIEM_LINK_CAP_L0S_EXIT             0x00007000
+#define PCIEM_LINK_CAP_L1_EXIT              0x00038000
+#define PCIEM_LINK_CAP_CLOCK_PM             0x00040000
+#define PCIEM_LINK_CAP_SURPRISE_DOWN        0x00080000
+#define PCIEM_LINK_CAP_DL_ACTIVE            0x00100000
+#define PCIEM_LINK_CAP_LINK_BW_NOTIFY       0x00200000
+#define PCIEM_LINK_CAP_ASPM_COMPLIANCE      0x00400000
+#define PCIEM_LINK_CAP_PORT                 0xff000000
+#define PCIER_LINK_CTL                      0x10
+#define PCIEM_LINK_CTL_ASPMC_DIS            0x0000
+#define PCIEM_LINK_CTL_ASPMC_L0S            0x0001
+#define PCIEM_LINK_CTL_ASPMC_L1             0x0002
+#define PCIEM_LINK_CTL_ASPMC                0x0003
+#define PCIEM_LINK_CTL_RCB                  0x0008
+#define PCIEM_LINK_CTL_LINK_DIS             0x0010
+#define PCIEM_LINK_CTL_RETRAIN_LINK         0x0020
+#define PCIEM_LINK_CTL_COMMON_CLOCK         0x0040
+#define PCIEM_LINK_CTL_EXTENDED_SYNC        0x0080
+#define PCIEM_LINK_CTL_ECPM                 0x0100
+#define PCIEM_LINK_CTL_HAWD                 0x0200
+#define PCIEM_LINK_CTL_LBMIE                0x0400
+#define PCIEM_LINK_CTL_LABIE                0x0800
+#define PCIER_LINK_STA                      0x12
+#define PCIEM_LINK_STA_SPEED                0x000f
+#define PCIEM_LINK_STA_WIDTH                0x03f0
+#define PCIEM_LINK_STA_TRAINING_ERROR       0x0400
+#define PCIEM_LINK_STA_TRAINING             0x0800
+#define PCIEM_LINK_STA_SLOT_CLOCK           0x1000
+#define PCIEM_LINK_STA_DL_ACTIVE            0x2000
+#define PCIEM_LINK_STA_LINK_BW_MGMT         0x4000
+#define PCIEM_LINK_STA_LINK_AUTO_BW         0x8000
+#define PCIER_SLOT_CAP                      0x14
+#define PCIEM_SLOT_CAP_APB                  0x00000001
+#define PCIEM_SLOT_CAP_PCP                  0x00000002
+#define PCIEM_SLOT_CAP_MRLSP                0x00000004
+#define PCIEM_SLOT_CAP_AIP                  0x00000008
+#define PCIEM_SLOT_CAP_PIP                  0x00000010
+#define PCIEM_SLOT_CAP_HPS                  0x00000020
+#define PCIEM_SLOT_CAP_HPC                  0x00000040
+#define PCIEM_SLOT_CAP_SPLV                 0x00007f80
+#define PCIEM_SLOT_CAP_SPLS                 0x00018000
+#define PCIEM_SLOT_CAP_EIP                  0x00020000
+#define PCIEM_SLOT_CAP_NCCS                 0x00040000
+#define PCIEM_SLOT_CAP_PSN                  0xfff80000
+#define PCIER_SLOT_CTL                      0x18
+#define PCIEM_SLOT_CTL_ABPE                 0x0001
+#define PCIEM_SLOT_CTL_PFDE                 0x0002
+#define PCIEM_SLOT_CTL_MRLSCE               0x0004
+#define PCIEM_SLOT_CTL_PDCE                 0x0008
+#define PCIEM_SLOT_CTL_CCIE                 0x0010
+#define PCIEM_SLOT_CTL_HPIE                 0x0020
+#define PCIEM_SLOT_CTL_AIC                  0x00c0
+#define PCIEM_SLOT_CTL_AI_ON                0x0040
+#define PCIEM_SLOT_CTL_AI_BLINK             0x0080
+#define PCIEM_SLOT_CTL_AI_OFF               0x00c0
+#define PCIEM_SLOT_CTL_PIC                  0x0300
+#define PCIEM_SLOT_CTL_PI_ON                0x0100
+#define PCIEM_SLOT_CTL_PI_BLINK             0x0200
+#define PCIEM_SLOT_CTL_PI_OFF               0x0300
+#define PCIEM_SLOT_CTL_PCC                  0x0400
+#define PCIEM_SLOT_CTL_PC_ON                0x0000
+#define PCIEM_SLOT_CTL_PC_OFF               0x0400
+#define PCIEM_SLOT_CTL_EIC                  0x0800
+#define PCIEM_SLOT_CTL_DLLSCE               0x1000
+#define PCIER_SLOT_STA                      0x1a
+#define PCIEM_SLOT_STA_ABP                  0x0001
+#define PCIEM_SLOT_STA_PFD                  0x0002
+#define PCIEM_SLOT_STA_MRLSC                0x0004
+#define PCIEM_SLOT_STA_PDC                  0x0008
+#define PCIEM_SLOT_STA_CC                   0x0010
+#define PCIEM_SLOT_STA_MRLSS                0x0020
+#define PCIEM_SLOT_STA_PDS                  0x0040
+#define PCIEM_SLOT_STA_EIS                  0x0080
+#define PCIEM_SLOT_STA_DLLSC                0x0100
+#define PCIER_ROOT_CTL                      0x1c
+#define PCIEM_ROOT_CTL_SERR_CORR            0x0001
+#define PCIEM_ROOT_CTL_SERR_NONFATAL        0x0002
+#define PCIEM_ROOT_CTL_SERR_FATAL           0x0004
+#define PCIEM_ROOT_CTL_PME                  0x0008
+#define PCIEM_ROOT_CTL_CRS_VIS              0x0010
+#define PCIER_ROOT_CAP                      0x1e
+#define PCIEM_ROOT_CAP_CRS_VIS              0x0001
+#define PCIER_ROOT_STA                      0x20
+#define PCIEM_ROOT_STA_PME_REQID_MASK       0x0000ffff
+#define PCIEM_ROOT_STA_PME_STATUS           0x00010000
+#define PCIEM_ROOT_STA_PME_PEND             0x00020000
+#define PCIER_DEVICE_CAP2                   0x24
+#define PCIEM_CAP2_COMP_TIMO_RANGES         0x0000000f
+#define PCIEM_CAP2_COMP_TIMO_RANGE_A        0x00000001
+#define PCIEM_CAP2_COMP_TIMO_RANGE_B        0x00000002
+#define PCIEM_CAP2_COMP_TIMO_RANGE_C        0x00000004
+#define PCIEM_CAP2_COMP_TIMO_RANGE_D        0x00000008
+#define PCIEM_CAP2_COMP_TIMO_DISABLE        0x00000010
+#define PCIEM_CAP2_ARI                      0x00000020
+#define PCIER_DEVICE_CTL2                   0x28
+#define PCIEM_CTL2_COMP_TIMO_VAL            0x000f
+#define PCIEM_CTL2_COMP_TIMO_50MS           0x0000
+#define PCIEM_CTL2_COMP_TIMO_100US          0x0001
+#define PCIEM_CTL2_COMP_TIMO_10MS           0x0002
+#define PCIEM_CTL2_COMP_TIMO_55MS           0x0005
+#define PCIEM_CTL2_COMP_TIMO_210MS          0x0006
+#define PCIEM_CTL2_COMP_TIMO_900MS          0x0009
+#define PCIEM_CTL2_COMP_TIMO_3500MS         0x000a
+#define PCIEM_CTL2_COMP_TIMO_13S            0x000d
+#define PCIEM_CTL2_COMP_TIMO_64S            0x000e
+#define PCIEM_CTL2_COMP_TIMO_DISABLE        0x0010
+#define PCIEM_CTL2_ARI                      0x0020
+#define PCIEM_CTL2_ATOMIC_REQ_ENABLE        0x0040
+#define PCIEM_CTL2_ATOMIC_EGR_BLOCK         0x0080
+#define PCIEM_CTL2_ID_ORDERED_REQ_EN        0x0100
+#define PCIEM_CTL2_ID_ORDERED_CMP_EN        0x0200
+#define PCIEM_CTL2_LTR_ENABLE               0x0400
+#define PCIEM_CTL2_OBFF                     0x6000
+#define PCIEM_OBFF_DISABLE                  0x0000
+#define PCIEM_OBFF_MSGA_ENABLE              0x2000
+#define PCIEM_OBFF_MSGB_ENABLE              0x4000
+#define PCIEM_OBFF_WAKE_ENABLE              0x6000
+#define PCIEM_CTL2_END2END_TLP              0x8000
+#define PCIER_DEVICE_STA2                   0x2a
+#define PCIER_LINK_CAP2                     0x2c
+#define PCIER_LINK_CTL2                     0x30
+#define PCIEM_LNKCTL2_TLS                   0x000f
+#define PCIEM_LNKCTL2_TLS_2_5GT             0x0001
+#define PCIEM_LNKCTL2_TLS_5_0GT             0x0002
+#define PCIEM_LNKCTL2_TLS_8_0GT             0x0003
+#define PCIEM_LNKCTL2_TLS_16_0GT            0x0004
+#define PCIEM_LNKCTL2_TLS_32_0GT            0x0005
+#define PCIEM_LNKCTL2_TLS_64_0GT            0x0006
+#define PCIEM_LNKCTL2_ENTER_COMP            0x0010
+#define PCIEM_LNKCTL2_TX_MARGIN             0x0380
+#define PCIEM_LNKCTL2_HASD                  0x0020
+#define PCIER_LINK_STA2                     0x32
+#define PCIER_SLOT_CAP2                     0x34
+#define PCIER_SLOT_CTL2                     0x38
+#define PCIER_SLOT_STA2                     0x3a
+
+/* MSI-X definitions */
+#define PCIR_MSIX_CTRL                      0x2
+#define PCIM_MSIXCTRL_MSIX_ENABLE           0x8000
+#define PCIM_MSIXCTRL_FUNCTION_MASK         0x4000
+#define PCIM_MSIXCTRL_TABLE_SIZE            0x07ff
+#define PCIR_MSIX_TABLE                     0x4
+#define PCIR_MSIX_PBA                       0x8
+#define PCIM_MSIX_BIR_MASK                  0x7
+#define PCIM_MSIX_TABLE_OFFSET              0xfffffff8
+#define PCIM_MSIX_BIR_BAR_10                0
+#define PCIM_MSIX_BIR_BAR_14                1
+#define PCIM_MSIX_BIR_BAR_18                2
+#define PCIM_MSIX_BIR_BAR_1C                3
+#define PCIM_MSIX_BIR_BAR_20                4
+#define PCIM_MSIX_BIR_BAR_24                5
+#define PCIM_MSIX_ENTRY_SIZE                16
+#define PCIM_MSIX_ENTRY_LOWER_ADDR          0x0  /* Message Address */
+#define PCIM_MSIX_ENTRY_UPPER_ADDR          0x4  /* Message Upper Address */
+#define PCIM_MSIX_ENTRY_DATA                0x8  /* Message Data */
+#define PCIM_MSIX_ENTRY_VECTOR_CTRL         0xc  /* Vector Control */
+#define PCIM_MSIX_ENTRYVECTOR_CTRL_MASK     0x1
+
+/* PCI Advanced Features definitions */
+#define PCIR_PCIAF_CAP                      0x3
+#define PCIM_PCIAFCAP_TP                    0x01
+#define PCIM_PCIAFCAP_FLR                   0x02
+#define PCIR_PCIAF_CTRL                     0x4
+#define PCIR_PCIAFCTRL_FLR                  0x01
+#define PCIR_PCIAF_STATUS                   0x5
+#define PCIR_PCIAFSTATUS_TP                 0x01
+
+/* Advanced Error Reporting */
+#define PCIR_AER_UC_STATUS                  0x04
+#define PCIM_AER_UC_TRAINING_ERROR          0x00000001
+#define PCIM_AER_UC_DL_PROTOCOL_ERROR       0x00000010
+#define PCIM_AER_UC_SURPRISE_LINK_DOWN      0x00000020
+#define PCIM_AER_UC_POISONED_TLP            0x00001000
+#define PCIM_AER_UC_FC_PROTOCOL_ERROR       0x00002000
+#define PCIM_AER_UC_COMPLETION_TIMEOUT      0x00004000
+#define PCIM_AER_UC_COMPLETER_ABORT         0x00008000
+#define PCIM_AER_UC_UNEXPECTED_COMPLETION   0x00010000
+#define PCIM_AER_UC_RECEIVER_OVERFLOW       0x00020000
+#define PCIM_AER_UC_MALFORMED_TLP           0x00040000
+#define PCIM_AER_UC_ECRC_ERROR              0x00080000
+#define PCIM_AER_UC_UNSUPPORTED_REQUEST     0x00100000
+#define PCIM_AER_UC_ACS_VIOLATION           0x00200000
+#define PCIM_AER_UC_INTERNAL_ERROR          0x00400000
+#define PCIM_AER_UC_MC_BLOCKED_TLP          0x00800000
+#define PCIM_AER_UC_ATOMIC_EGRESS_BLK       0x01000000
+#define PCIM_AER_UC_TLP_PREFIX_BLOCKED      0x02000000
+#define PCIR_AER_UC_MASK                    0x08    /* Shares bits with UC_STATUS */
+#define PCIR_AER_UC_SEVERITY                0x0c    /* Shares bits with UC_STATUS */
+#define PCIR_AER_COR_STATUS                 0x10
+#define PCIM_AER_COR_RECEIVER_ERROR         0x00000001
+#define PCIM_AER_COR_BAD_TLP                0x00000040
+#define PCIM_AER_COR_BAD_DLLP               0x00000080
+#define PCIM_AER_COR_REPLAY_ROLLOVER        0x00000100
+#define PCIM_AER_COR_REPLAY_TIMEOUT         0x00001000
+#define PCIM_AER_COR_ADVISORY_NF_ERROR      0x00002000
+#define PCIM_AER_COR_INTERNAL_ERROR         0x00004000
+#define PCIM_AER_COR_HEADER_LOG_OVFLOW      0x00008000
+#define PCIR_AER_COR_MASK                   0x14    /* Shares bits with COR_STATUS */
+#define PCIR_AER_CAP_CONTROL                0x18
+#define PCIM_AER_FIRST_ERROR_PTR            0x0000001f
+#define PCIM_AER_ECRC_GEN_CAPABLE           0x00000020
+#define PCIM_AER_ECRC_GEN_ENABLE            0x00000040
+#define PCIM_AER_ECRC_CHECK_CAPABLE         0x00000080
+#define PCIM_AER_ECRC_CHECK_ENABLE          0x00000100
+#define PCIM_AER_MULT_HDR_CAPABLE           0x00000200
+#define PCIM_AER_MULT_HDR_ENABLE            0x00000400
+#define PCIM_AER_TLP_PREFIX_LOG_PRESENT     0x00000800
+#define PCIR_AER_HEADER_LOG                 0x1c
+#define PCIR_AER_ROOTERR_CMD                0x2c    /* Only for root complex ports */
+#define PCIM_AER_ROOTERR_COR_ENABLE         0x00000001
+#define PCIM_AER_ROOTERR_NF_ENABLE          0x00000002
+#define PCIM_AER_ROOTERR_F_ENABLE           0x00000004
+#define PCIR_AER_ROOTERR_STATUS             0x30    /* Only for root complex ports */
+#define PCIM_AER_ROOTERR_COR_ERR            0x00000001
+#define PCIM_AER_ROOTERR_MULTI_COR_ERR      0x00000002
+#define PCIM_AER_ROOTERR_UC_ERR             0x00000004
+#define PCIM_AER_ROOTERR_MULTI_UC_ERR       0x00000008
+#define PCIM_AER_ROOTERR_FIRST_UC_FATAL     0x00000010
+#define PCIM_AER_ROOTERR_NF_ERR             0x00000020
+#define PCIM_AER_ROOTERR_F_ERR              0x00000040
+#define PCIM_AER_ROOTERR_INT_MESSAGE        0xf8000000
+#define PCIR_AER_COR_SOURCE_ID              0x34    /* Only for root complex ports */
+#define PCIR_AER_ERR_SOURCE_ID              0x36    /* Only for root complex ports */
+#define PCIR_AER_TLP_PREFIX_LOG             0x38    /* Only for TLP prefix functions */
+
+/* Virtual Channel definitions */
+#define PCIR_VC_CAP1                        0x04
+#define PCIM_VC_CAP1_EXT_COUNT              0x00000007
+#define PCIM_VC_CAP1_LOWPRI_EXT_COUNT       0x00000070
+#define PCIR_VC_CAP2                        0x08
+#define PCIR_VC_CONTROL                     0x0c
+#define PCIR_VC_STATUS                      0x0e
+#define PCIR_VC_RESOURCE_CAP(n)             (0x10 + (n) * 0x0c)
+#define PCIR_VC_RESOURCE_CTL(n)             (0x14 + (n) * 0x0c)
+#define PCIR_VC_RESOURCE_STA(n)             (0x18 + (n) * 0x0c)
+
+/* Serial Number definitions */
+#define PCIR_SERIAL_LOW                     0x04
+#define PCIR_SERIAL_HIGH                    0x08
+
+/* SR-IOV definitions */
+#define PCIR_SRIOV_CTL                      0x08
+#define PCIM_SRIOV_VF_EN                    0x01
+#define PCIM_SRIOV_VF_MSE                   0x08    /* Memory space enable. */
+#define PCIM_SRIOV_ARI_EN                   0x10
+#define PCIR_SRIOV_TOTAL_VFS                0x0e
+#define PCIR_SRIOV_NUM_VFS                  0x10
+#define PCIR_SRIOV_VF_OFF                   0x14
+#define PCIR_SRIOV_VF_STRIDE                0x16
+#define PCIR_SRIOV_VF_DID                   0x1a
+#define PCIR_SRIOV_PAGE_CAP                 0x1c
+#define PCIR_SRIOV_PAGE_SIZE                0x20
+
+#define PCI_SRIOV_BASE_PAGE_SHIFT           12
+
+#define PCIR_SRIOV_BARS                     0x24
+#define PCIR_SRIOV_BAR(x)                   (PCIR_SRIOV_BARS + (x) * 4)
+
+/* Extended Capability Vendor-Specific definitions */
+#define PCIR_VSEC_HEADER                    0x04
+#define PCIR_VSEC_ID(hdr)                   ((hdr) & 0xffff)
+#define PCIR_VSEC_REV(hdr)                  (((hdr) & 0xf0000) >> 16)
+#define PCIR_VSEC_LENGTH(hdr)               (((hdr) & 0xfff00000) >> 20)
+#define PCIR_VSEC_DATA                      0x08
+
+/* ASPM L1 PM Substates */
+#define PCIR_L1SS_CAP                       0x04        /* Capabilities Register */
+#define PCIM_L1SS_CAP_PCIPM_L1_2            0x00000001  /* PCI-PM L1.2 Supported */
+#define PCIM_L1SS_CAP_PCIPM_L1_1            0x00000002  /* PCI-PM L1.1 Supported */
+#define PCIM_L1SS_CAP_ASPM_L1_2             0x00000004  /* ASPM L1.2 Supported */
+#define PCIM_L1SS_CAP_ASPM_L1_1             0x00000008  /* ASPM L1.1 Supported */
+#define PCIM_L1SS_CAP_L1_PM_SS              0x00000010  /* L1 PM Substates Supported */
+#define PCIM_L1SS_CAP_CM_RESTORE_TIME       0x0000ff00  /* Port Common_Mode_Restore_Time */
+#define PCIM_L1SS_CAP_P_PWR_ON_SCALE        0x00030000  /* Port T_POWER_ON scale */
+#define PCIM_L1SS_CAP_P_PWR_ON_VALUE        0x00f80000  /* Port T_POWER_ON value */
+#define PCIR_L1SS_CTL1                      0x08        /* Control 1 Register */
+#define PCIM_L1SS_CTL1_PCIPM_L1_2           0x00000001  /* PCI-PM L1.2 Enable */
+#define PCIM_L1SS_CTL1_PCIPM_L1_1           0x00000002  /* PCI-PM L1.1 Enable */
+#define PCIM_L1SS_CTL1_ASPM_L1_2            0x00000004  /* ASPM L1.2 Enable */
+#define PCIM_L1SS_CTL1_ASPM_L1_1            0x00000008  /* ASPM L1.1 Enable */
+#define PCIM_L1SS_CTL1_L1_2_MASK            0x00000005
+#define PCIM_L1SS_CTL1_L1SS_MASK            0x0000000f
+#define PCIM_L1SS_CTL1_CM_RESTORE_TIME      0x0000ff00  /* Common_Mode_Restore_Time */
+#define PCIM_L1SS_CTL1_LTR_L12_TH_VALUE     0x03ff0000  /* LTR_L1.2_THRESHOLD_Value */
+#define PCIM_L1SS_CTL1_LTR_L12_TH_SCALE     0xe0000000  /* LTR_L1.2_THRESHOLD_Scale */
+#define PCIR_L1SS_CTL2                      0x0c        /* Control 2 Register */
+#define PCIM_L1SS_CTL2_T_PWR_ON_SCALE       0x00000003  /* T_POWER_ON Scale */
+#define PCIM_L1SS_CTL2_T_PWR_ON_VALUE       0x000000f8  /* T_POWER_ON Value */
+
+/* Alternative Routing-ID Interpretation */
+#define PCIR_ARI_CAP                        0x04                /* Capabilities Register */
+#define PCIM_ARI_CAP_MFVC                   0x0001              /* MFVC Function Groups Capability */
+#define PCIM_ARI_CAP_ACS                    0x0002              /* ACS Function Groups Capability */
+#define PCIM_ARI_CAP_NFN(x)                 (((x) >> 8) & 0xff) /* Next Function Number */
+#define PCIR_ARI_CTRL                       0x06                /* ARI Control Register */
+#define PCIM_ARI_CTRL_MFVC                  0x0001              /* MFVC Function Groups Enable */
+#define PCIM_ARI_CTRL_ACS                   0x0002              /* ACS Function Groups Enable */
+#define PCIM_ARI_CTRL_FG(x)                 (((x) >> 4) & 7)    /* Function Group */
+#define PCIR_EXT_CAP_ARI_SIZEOF             8
+
+/*
+ * PCI Express Firmware Interface definitions
+ */
+#define PCI_OSC_STATUS                      0
+#define PCI_OSC_SUPPORT                     1
+#define PCIM_OSC_SUPPORT_EXT_PCI_CONF       0x01    /* Extended PCI Config Space */
+#define PCIM_OSC_SUPPORT_ASPM               0x02    /* Active State Power Management */
+#define PCIM_OSC_SUPPORT_CPMC               0x04    /* Clock Power Management Cap */
+#define PCIM_OSC_SUPPORT_SEG_GROUP          0x08    /* PCI Segment Groups supported */
+#define PCIM_OSC_SUPPORT_MSI                0x10    /* MSI signalling supported */
+#define PCI_OSC_CTL                         2
+#define PCIM_OSC_CTL_PCIE_HP                0x01    /* PCIe Native Hot Plug */
+#define PCIM_OSC_CTL_SHPC_HP                0x02    /* SHPC Native Hot Plug */
+#define PCIM_OSC_CTL_PCIE_PME               0x04    /* PCIe Native Power Mgt Events */
+#define PCIM_OSC_CTL_PCIE_AER               0x08    /* PCIe Advanced Error Reporting */
+#define PCIM_OSC_CTL_PCIE_CAP_STRUCT        0x10    /* Various Capability Structures */
+
+#endif /* __PCI_REGS_H__ */

+ 121 - 0
components/drivers/pci/pme.c

@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <drivers/pci.h>
+#include <drivers/core/power_domain.h>
+
+#define DBG_TAG "pci.pme"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+/*
+ * Power Management Capability Register:
+ *
+ *   31      27 26  25 24    22  21  20  19 18  16 15        8 7              0
+ *  +---------+---+---+--------+---+---+---+------+-----------+----------------+
+ *  |         |   |   |        |   |   |   |      |           | Capabilitiy ID |
+ *  +---------+---+---+--------+---+---+---+------+-----------+----------------+
+ *       ^      ^   ^      ^     ^   ^   ^     ^        ^
+ *       |      |   |      |     |   |   |     |        |
+ *       |      |   |      |     |   |   |     |        +---- Next Capabilitiy Pointer
+ *       |      |   |      |     |   |   |     +------------- Version
+ *       |      |   |      |     |   |   +------------------- PME Clock
+ *       |      |   |      |     |   +----------------------- Immediate Readiness on Return to D0
+ *       |      |   |      |     +--------------------------- Device Specifiic Initializtion
+ *       |      |   |      +--------------------------------- Aux Current
+ *       |      |   +---------------------------------------- D1 Support
+ *       |      +-------------------------------------------- D2 Support
+ *       +--------------------------------------------------- PME Support
+ */
+
+void rt_pci_pme_init(struct rt_pci_device *pdev)
+{
+    rt_uint16_t pmc;
+
+    if (!pdev || !(pdev->pme_cap = rt_pci_find_capability(pdev, PCIY_PMG)))
+    {
+        return;
+    }
+
+    rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_CAP, &pmc);
+
+    if ((pmc & PCIM_PCAP_SPEC) > 3)
+    {
+        LOG_E("%s: Unsupported PME CAP regs spec %u",
+                rt_dm_dev_get_name(&pdev->parent), pmc & PCIM_PCAP_SPEC);
+
+        return;
+    }
+
+    pmc &= PCIM_PCAP_PMEMASK;
+
+    if (pmc)
+    {
+        pdev->pme_support = RT_FIELD_GET(PCIM_PCAP_PMEMASK, pmc);
+
+        rt_pci_pme_active(pdev, RT_FALSE);
+    }
+}
+
+rt_err_t rt_pci_enable_wake(struct rt_pci_device *pdev,
+        enum rt_pci_power state, rt_bool_t enable)
+{
+    if (!pdev || state >= RT_PCI_PME_MAX)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (enable)
+    {
+        if (rt_pci_pme_capable(pdev, state) ||
+            rt_pci_pme_capable(pdev, RT_PCI_D3COLD))
+        {
+            rt_pci_pme_active(pdev, RT_EOK);
+        }
+    }
+    else
+    {
+        rt_pci_pme_active(pdev, RT_FALSE);
+    }
+
+    return RT_EOK;
+}
+
+static void pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
+{
+    rt_uint16_t pmcsr;
+
+    if (!pdev->pme_support)
+    {
+        return;
+    }
+
+    rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, &pmcsr);
+    /* Clear PME_Status by writing 1 to it and enable PME# */
+    pmcsr |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+
+    if (!enable)
+    {
+        pmcsr &= ~PCIM_PSTAT_PMEENABLE;
+    }
+
+    rt_pci_write_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, pmcsr);
+}
+
+void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
+{
+    if (!pdev)
+    {
+        return;
+    }
+
+    pci_pme_active(pdev, enable);
+    rt_dm_power_domain_attach(&pdev->parent, enable);
+}

+ 922 - 0
components/drivers/pci/probe.c

@@ -0,0 +1,922 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-10-24     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+
+#define DBG_TAG "pci.probe"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+#include <drivers/pci.h>
+#include <drivers/core/bus.h>
+
+rt_inline void spin_lock(struct rt_spinlock *spinlock)
+{
+    rt_hw_spin_lock(&spinlock->lock);
+}
+
+rt_inline void spin_unlock(struct rt_spinlock *spinlock)
+{
+    rt_hw_spin_unlock(&spinlock->lock);
+}
+
+struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size)
+{
+    struct rt_pci_host_bridge *bridge = rt_calloc(1, sizeof(*bridge) + priv_size);
+
+    return bridge;
+}
+
+rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *bridge)
+{
+    if (!bridge)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (bridge->bus_regions)
+    {
+        rt_free(bridge->bus_regions);
+    }
+
+    if (bridge->dma_regions)
+    {
+        rt_free(bridge->dma_regions);
+    }
+
+    rt_free(bridge);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err = RT_EOK;
+
+    if (host_bridge->parent.ofw_node)
+    {
+        err = rt_pci_ofw_host_bridge_init(host_bridge->parent.ofw_node, host_bridge);
+    }
+
+    return err;
+}
+
+struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus)
+{
+    struct rt_pci_device *pdev = rt_calloc(1, sizeof(*pdev));
+
+    if (!pdev)
+    {
+        return RT_NULL;
+    }
+
+    rt_list_init(&pdev->list);
+    pdev->bus = bus;
+
+    if (bus)
+    {
+        spin_lock(&bus->lock);
+        rt_list_insert_before(&bus->devices_nodes, &pdev->list);
+        spin_unlock(&bus->lock);
+    }
+
+    pdev->subsystem_vendor = PCI_ANY_ID;
+    pdev->subsystem_device = PCI_ANY_ID;
+
+    pdev->irq = -1;
+
+    for (int i = 0; i < RT_ARRAY_SIZE(pdev->resource); ++i)
+    {
+        pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
+    }
+
+#ifdef RT_PCI_MSI
+    rt_list_init(&pdev->msi_desc_nodes);
+    rt_spin_lock_init(&pdev->msi_lock);
+#endif
+
+    return pdev;
+}
+
+struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn)
+{
+    rt_err_t err;
+    struct rt_pci_device *pdev = RT_NULL;
+    rt_uint16_t vendor = PCI_ANY_ID, device = PCI_ANY_ID;
+
+    if (!bus)
+    {
+        goto _end;
+    }
+
+    err = rt_pci_bus_read_config_u16(bus, devfn, PCIR_VENDOR, &vendor);
+    rt_pci_bus_read_config_u16(bus, devfn, PCIR_DEVICE, &device);
+
+    if (vendor == (typeof(vendor))PCI_ANY_ID ||
+        vendor == (typeof(vendor))0x0000 || err)
+    {
+        goto _end;
+    }
+
+    if (!(pdev = rt_pci_alloc_device(bus)))
+    {
+        goto _end;
+    }
+
+    pdev->devfn = devfn;
+    pdev->vendor = vendor;
+    pdev->device = device;
+
+    rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u",
+            rt_pci_domain(pdev), pdev->bus->number,
+            RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
+
+    if (rt_pci_setup_device(pdev))
+    {
+        rt_free(pdev);
+        pdev = RT_NULL;
+
+        goto _end;
+    }
+
+    rt_pci_device_register(pdev);
+
+_end:
+    return pdev;
+}
+
+static rt_bool_t pci_intx_mask_broken(struct rt_pci_device *pdev)
+{
+    rt_bool_t res = RT_FALSE;
+    rt_uint16_t orig, toggle, new;
+
+    rt_pci_read_config_u16(pdev, PCIR_COMMAND, &orig);
+    toggle = orig ^ PCIM_CMD_INTxDIS;
+    rt_pci_write_config_u16(pdev, PCIR_COMMAND, toggle);
+    rt_pci_read_config_u16(pdev, PCIR_COMMAND, &new);
+
+    rt_pci_write_config_u16(pdev, PCIR_COMMAND, orig);
+
+    if (new != toggle)
+    {
+        res = RT_TRUE;
+    }
+
+    return res;
+}
+
+static void pci_read_irq(struct rt_pci_device *pdev)
+{
+    rt_uint8_t irq = 0;
+
+    rt_pci_read_config_u8(pdev, PCIR_INTPIN, &irq);
+    pdev->pin = irq;
+
+    if (irq)
+    {
+        rt_pci_read_config_u8(pdev, PCIR_INTLINE, &irq);
+    }
+    pdev->irq = irq;
+}
+
+static void pcie_set_port_type(struct rt_pci_device *pdev)
+{
+    int pos;
+
+    if (!(pos = rt_pci_find_capability(pdev, PCIY_EXPRESS)))
+    {
+        return;
+    }
+
+    pdev->pcie_cap = pos;
+}
+
+static void pci_configure_ari(struct rt_pci_device *pdev)
+{
+    rt_uint32_t cap, ctl2_ari;
+    struct rt_pci_device *bridge;
+
+    if (!rt_pci_is_pcie(pdev) || pdev->devfn)
+    {
+        return;
+    }
+
+    bridge = pdev->bus->self;
+
+    if (rt_pci_is_root_bus(pdev->bus) || !bridge)
+    {
+        return;
+    }
+
+    rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CAP2, &cap);
+    if (!(cap & PCIEM_CAP2_ARI))
+    {
+        return;
+    }
+
+    rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, &ctl2_ari);
+
+    if (rt_pci_find_ext_capability(pdev, PCIZ_ARI))
+    {
+        ctl2_ari |= PCIEM_CTL2_ARI;
+        bridge->ari_enabled = RT_TRUE;
+    }
+    else
+    {
+        ctl2_ari &= ~PCIEM_CTL2_ARI;
+        bridge->ari_enabled = RT_FALSE;
+    }
+
+    rt_pci_write_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, ctl2_ari);
+}
+
+static rt_uint16_t pci_cfg_space_size_ext(struct rt_pci_device *pdev)
+{
+    rt_uint32_t status;
+
+    if (rt_pci_read_config_u32(pdev, PCI_REGMAX + 1, &status))
+    {
+        return PCI_REGMAX + 1;
+    }
+
+    return PCIE_REGMAX + 1;
+}
+
+static rt_uint16_t pci_cfg_space_size(struct rt_pci_device *pdev)
+{
+    int pos;
+    rt_uint32_t status;
+    rt_uint16_t class = pdev->class >> 8;
+
+    if (class == PCIS_BRIDGE_HOST)
+    {
+        return pci_cfg_space_size_ext(pdev);
+    }
+
+    if (rt_pci_is_pcie(pdev))
+    {
+        return pci_cfg_space_size_ext(pdev);
+    }
+
+    pos = rt_pci_find_capability(pdev, PCIY_PCIX);
+    if (!pos)
+    {
+        return PCI_REGMAX + 1;
+    }
+
+    rt_pci_read_config_u32(pdev, pos + PCIXR_STATUS, &status);
+    if (status & (PCIXM_STATUS_266CAP | PCIXM_STATUS_533CAP))
+    {
+        return pci_cfg_space_size_ext(pdev);
+    }
+
+    return PCI_REGMAX + 1;
+}
+
+static void pci_init_capabilities(struct rt_pci_device *pdev)
+{
+    rt_pci_pme_init(pdev);
+
+#ifdef RT_PCI_MSI
+    rt_pci_msi_init(pdev);  /* Disable MSI */
+    rt_pci_msix_init(pdev); /* Disable MSI-X */
+#endif
+
+    pcie_set_port_type(pdev);
+    pdev->cfg_size = pci_cfg_space_size(pdev);
+    pci_configure_ari(pdev);
+
+    pdev->no_msi = RT_FALSE;
+    pdev->msi_enabled = RT_FALSE;
+    pdev->msix_enabled = RT_FALSE;
+}
+
+rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev)
+{
+    rt_uint8_t pos;
+    rt_uint32_t class = 0;
+    struct rt_pci_host_bridge *host_bridge;
+
+    if (!pdev)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (!(host_bridge = rt_pci_find_host_bridge(pdev->bus)))
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_pci_ofw_device_init(pdev);
+
+    rt_pci_read_config_u32(pdev, PCIR_REVID, &class);
+
+    pdev->revision = class & 0xff;
+    pdev->class = class >> 8;   /* Upper 3 bytes */
+    rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &pdev->hdr_type);
+
+    /* Clear errors left from system firmware */
+    rt_pci_write_config_u16(pdev, PCIR_STATUS, 0xffff);
+
+    if (pdev->hdr_type & 0x80)
+    {
+        pdev->multi_function = RT_TRUE;
+    }
+    pdev->hdr_type &= PCIM_HDRTYPE;
+
+    if (pci_intx_mask_broken(pdev))
+    {
+        pdev->broken_intx_masking = RT_TRUE;
+    }
+
+    rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u", rt_pci_domain(pdev),
+            pdev->bus->number, RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
+
+    switch (pdev->hdr_type)
+    {
+    case PCIM_HDRTYPE_NORMAL:
+        if (class == PCIS_BRIDGE_PCI)
+        {
+            goto error;
+        }
+        pci_read_irq(pdev);
+        rt_pci_device_alloc_resource(host_bridge, pdev);
+        rt_pci_read_config_u16(pdev, PCIR_SUBVEND_0, &pdev->subsystem_vendor);
+        rt_pci_read_config_u16(pdev, PCIR_SUBDEV_0, &pdev->subsystem_device);
+        break;
+
+    case PCIM_HDRTYPE_BRIDGE:
+        pci_read_irq(pdev);
+        rt_pci_device_alloc_resource(host_bridge, pdev);
+        pos = rt_pci_find_capability(pdev, PCIY_SUBVENDOR);
+        if (pos)
+        {
+            rt_pci_read_config_u16(pdev, PCIR_SUBVENDCAP, &pdev->subsystem_vendor);
+            rt_pci_read_config_u16(pdev, PCIR_SUBDEVCAP, &pdev->subsystem_device);
+        }
+        break;
+
+    case PCIM_HDRTYPE_CARDBUS:
+        if (class != PCIS_BRIDGE_CARDBUS)
+        {
+            goto error;
+        }
+        pci_read_irq(pdev);
+        rt_pci_device_alloc_resource(host_bridge, pdev);
+        rt_pci_read_config_u16(pdev, PCIR_SUBVEND_2, &pdev->subsystem_vendor);
+        rt_pci_read_config_u16(pdev, PCIR_SUBDEV_2, &pdev->subsystem_device);
+        break;
+
+    default:
+        LOG_E("Ignoring device unknown header type %02x", pdev->hdr_type);
+        return -RT_EIO;
+
+    error:
+        LOG_E("Ignoring class %08x (doesn't match header type %02x)", pdev->class, pdev->hdr_type);
+        pdev->class = PCIC_NOT_DEFINED << 8;
+    }
+
+    pci_init_capabilities(pdev);
+
+    if (rt_pci_is_pcie(pdev))
+    {
+        rt_pci_read_config_u16(pdev, pdev->pcie_cap + PCIER_FLAGS, &pdev->exp_flags);
+    }
+
+    return RT_EOK;
+}
+
+static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent);
+
+static rt_err_t pci_child_bus_init(struct rt_pci_bus *bus, rt_uint32_t bus_no,
+        struct rt_pci_host_bridge *host_bridge, struct rt_pci_device *pdev)
+{
+    rt_err_t err;
+    struct rt_pci_bus *parent_bus = bus->parent;
+
+    bus->sysdata = parent_bus->sysdata;
+    bus->self = pdev;
+    bus->ops = host_bridge->child_ops ? : parent_bus->ops;
+
+    bus->number = bus_no;
+    rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus_no);
+
+    rt_pci_ofw_bus_init(bus);
+
+    if (bus->ops->add)
+    {
+        if ((err = bus->ops->add(bus)))
+        {
+            rt_pci_ofw_bus_free(bus);
+
+            LOG_E("PCI-Bus<%s> add bus failed with err = %s",
+                    bus->name, rt_strerror(err));
+
+            return err;
+        }
+    }
+
+    return RT_EOK;
+}
+
+static rt_bool_t pci_ea_fixed_busnrs(struct rt_pci_device *pdev,
+        rt_uint8_t *sec, rt_uint8_t *sub)
+{
+    int pos, offset;
+    rt_uint32_t dw;
+    rt_uint8_t ea_sec, ea_sub;
+
+    pos = rt_pci_find_capability(pdev, PCIY_EA);
+    if (!pos)
+    {
+        return RT_FALSE;
+    }
+
+    offset = pos + PCIR_EA_FIRST_ENT;
+    rt_pci_read_config_u32(pdev, offset, &dw);
+    ea_sec = PCIM_EA_SEC_NR(dw);
+    ea_sub = PCIM_EA_SUB_NR(dw);
+    if (ea_sec  == 0 || ea_sub < ea_sec)
+    {
+        return RT_FALSE;
+    }
+
+    *sec = ea_sec;
+    *sub = ea_sub;
+
+    return RT_TRUE;
+}
+
+static void pcie_fixup_link(struct rt_pci_device *pdev)
+{
+    int pos = pdev->pcie_cap;
+    rt_uint16_t exp_lnkctl, exp_lnkctl2, exp_lnksta;
+    rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
+
+    if ((pdev->exp_flags & PCIEM_FLAGS_VERSION) < 2)
+    {
+        return;
+    }
+
+    if (exp_type != PCIEM_TYPE_ROOT_PORT &&
+        exp_type != PCIEM_TYPE_DOWNSTREAM_PORT &&
+        exp_type != PCIEM_TYPE_PCIE_BRIDGE)
+    {
+        return;
+    }
+
+    rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL, &exp_lnkctl);
+    rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL2, &exp_lnkctl2);
+
+    rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2,
+            (exp_lnkctl2 & ~PCIEM_LNKCTL2_TLS) | PCIEM_LNKCTL2_TLS_2_5GT);
+    rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
+            exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
+
+    for (int i = 0; i < 20; ++i)
+    {
+        rt_pci_read_config_u16(pdev, pos + PCIER_LINK_STA, &exp_lnksta);
+
+        if (!!(exp_lnksta & PCIEM_LINK_STA_DL_ACTIVE))
+        {
+            return;
+        }
+
+        rt_thread_mdelay(10);
+    }
+
+    /* Fail, restore */
+    rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2, exp_lnkctl2);
+    rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
+            exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
+}
+
+static rt_uint32_t pci_scan_bridge_extend(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
+        rt_uint32_t bus_no_start, rt_uint32_t buses, rt_bool_t reconfigured)
+{
+    rt_bool_t fixed_buses;
+    rt_uint8_t fixed_sub, fixed_sec;
+    rt_uint8_t primary, secondary, subordinate;
+    rt_uint32_t value, bus_no = bus_no_start;
+    struct rt_pci_bus *next_bus;
+    struct rt_pci_host_bridge *host_bridge;
+
+    /* We not supported init CardBus, it always used in the PC servers. */
+    if (pdev->hdr_type == PCIM_HDRTYPE_CARDBUS)
+    {
+        LOG_E("CardBus is not supported in system");
+
+        goto _end;
+    }
+
+    rt_pci_read_config_u32(pdev, PCIR_PRIBUS_1, &value);
+    primary = value & 0xff;
+    secondary = (value >> 8) & 0xff;
+    subordinate = (value >> 16) & 0xff;
+
+    if (primary == bus->number && bus->number > secondary && secondary > subordinate)
+    {
+        if (!reconfigured)
+        {
+            goto _end;
+        }
+
+        LOG_I("Bridge configuration: primary(%02x) secondary(%02x) subordinate(%02x)",
+                primary, secondary, subordinate);
+    }
+
+    if (pdev->pcie_cap)
+    {
+        pcie_fixup_link(pdev);
+    }
+
+    ++bus_no;
+    /* Count of subordinate */
+    buses -= !!buses;
+
+    host_bridge = rt_pci_find_host_bridge(bus);
+    RT_ASSERT(host_bridge != RT_NULL);
+
+    /* Clear errors */
+    rt_pci_write_config_u16(pdev, PCIR_STATUS, RT_UINT16_MAX);
+
+    fixed_buses = pci_ea_fixed_busnrs(pdev, &fixed_sec, &fixed_sub);
+
+    if (!(next_bus = pci_alloc_bus(bus)))
+    {
+        goto _end;
+    }
+
+    /* Clear bus info */
+    rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value & ~0xffffff);
+
+    if (!(next_bus = pci_alloc_bus(bus)))
+    {
+        LOG_E("Alloc bus(%02x) fail", bus_no);
+        goto _end;
+    }
+
+    if (pci_child_bus_init(next_bus, bus_no, host_bridge, pdev))
+    {
+        goto _end;
+    }
+
+    /* Fill primary, secondary */
+    value = (buses & 0xff000000) | (bus->number << 0) | (next_bus->number << 8);
+    rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
+
+    bus_no = rt_pci_scan_child_buses(next_bus, buses);
+
+    /* Fill subordinate */
+    value |= next_bus->number + rt_list_len(&next_bus->children_nodes);
+    rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
+
+    if (fixed_buses)
+    {
+        bus_no = fixed_sub;
+    }
+    rt_pci_write_config_u8(pdev, PCIR_SUBBUS_1, bus_no);
+
+_end:
+    return bus_no;
+}
+
+rt_uint32_t rt_pci_scan_bridge(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
+        rt_uint32_t bus_no_start, rt_bool_t reconfigured)
+{
+    if (!bus || !pdev)
+    {
+        return RT_UINT32_MAX;
+    }
+
+    return pci_scan_bridge_extend(bus, pdev, bus_no_start, 0, reconfigured);
+}
+
+rt_inline rt_bool_t only_one_child(struct rt_pci_bus *bus)
+{
+    struct rt_pci_device *pdev;
+
+    if (rt_pci_is_root_bus(bus))
+    {
+        return RT_FALSE;
+    }
+
+    pdev = bus->self;
+
+    if (rt_pci_is_pcie(pdev))
+    {
+        rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
+
+        if (exp_type == PCIEM_TYPE_ROOT_PORT ||
+            exp_type == PCIEM_TYPE_DOWNSTREAM_PORT ||
+            exp_type == PCIEM_TYPE_PCIE_BRIDGE)
+        {
+            return RT_TRUE;
+        }
+    }
+
+    return RT_FALSE;
+}
+
+static int next_fn(struct rt_pci_bus *bus, struct rt_pci_device *pdev, int fn)
+{
+    if (!rt_pci_is_root_bus(bus) && bus->self->ari_enabled)
+    {
+        int pos, next_fn;
+        rt_uint16_t cap = 0;
+
+        if (!pdev)
+        {
+            return -RT_EINVAL;
+        }
+
+        pos = rt_pci_find_ext_capability(pdev, PCIZ_ARI);
+
+        if (!pos)
+        {
+            return -RT_EINVAL;
+        }
+
+        rt_pci_read_config_u16(pdev, pos + PCIR_ARI_CAP, &cap);
+        next_fn = PCIM_ARI_CAP_NFN(cap);
+
+        if (next_fn <= fn)
+        {
+            return -RT_EINVAL;
+        }
+
+        return next_fn;
+    }
+
+    if (fn >= RT_PCI_FUNCTION_MAX - 1)
+    {
+        return -RT_EINVAL;
+    }
+
+    if (pdev && !pdev->multi_function)
+    {
+        return -RT_EINVAL;
+    }
+
+    return fn + 1;
+}
+
+rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn)
+{
+    rt_size_t nr = 0;
+    struct rt_pci_device *pdev = RT_NULL;
+
+    if (!bus)
+    {
+        return nr;
+    }
+
+    if (devfn > 0 && only_one_child(bus))
+    {
+        return nr;
+    }
+
+    for (int func = 0; func >= 0; func = next_fn(bus, pdev, func))
+    {
+        pdev = rt_pci_scan_single_device(bus, devfn + func);
+
+        if (pdev)
+        {
+            ++nr;
+
+            if (func > 0)
+            {
+                pdev->multi_function = RT_TRUE;
+            }
+        }
+        else if (func == 0)
+        {
+            break;
+        }
+    }
+
+    return nr;
+}
+
+rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses)
+{
+    rt_uint32_t bus_no;
+    struct rt_pci_device *pdev = RT_NULL;
+
+    if (!bus)
+    {
+        bus_no = RT_UINT32_MAX;
+
+        goto _end;
+    }
+
+    bus_no = bus->number;
+
+    for (rt_uint32_t devfn = 0;
+        devfn < RT_PCI_DEVFN(RT_PCI_DEVICE_MAX - 1, RT_PCI_FUNCTION_MAX - 1);
+        devfn += RT_PCI_FUNCTION_MAX)
+    {
+        rt_pci_scan_slot(bus, devfn);
+    }
+
+    rt_pci_foreach_bridge(pdev, bus)
+    {
+        int offset;
+
+        bus_no = pci_scan_bridge_extend(bus, pdev, bus_no, buses, RT_TRUE);
+        offset = bus_no - bus->number;
+
+        if (buses > offset)
+        {
+            buses -= offset;
+        }
+        else
+        {
+            break;
+        }
+    }
+
+_end:
+    return bus_no;
+}
+
+rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus)
+{
+    return rt_pci_scan_child_buses(bus, 0);
+}
+
+static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent)
+{
+    struct rt_pci_bus *bus = rt_calloc(1, sizeof(*bus));
+
+    if (!bus)
+    {
+        return RT_NULL;
+    }
+
+    bus->parent = parent;
+
+    rt_list_init(&bus->list);
+    rt_list_init(&bus->children_nodes);
+    rt_list_init(&bus->devices_nodes);
+
+    rt_spin_lock_init(&bus->lock);
+
+    return bus;
+}
+
+rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge)
+{
+    struct rt_pci_bus *bus = pci_alloc_bus(RT_NULL);
+
+    if (!bus)
+    {
+        return -RT_ENOMEM;
+    }
+
+    host_bridge->root_bus = bus;
+
+    bus->sysdata = host_bridge->sysdata;
+    bus->host_bridge = host_bridge;
+    bus->ops = host_bridge->ops;
+
+    bus->number = host_bridge->bus_range[0];
+    rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus->number);
+
+    if (bus->ops->add)
+    {
+        rt_err_t err = bus->ops->add(bus);
+
+        if (err)
+        {
+            LOG_E("PCI-Bus<%s> add bus failed with err = %s", bus->name, rt_strerror(err));
+        }
+    }
+
+    return RT_EOK;
+}
+
+rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err;
+
+    if ((err = rt_pci_host_bridge_register(host_bridge)))
+    {
+        return err;
+    }
+
+    rt_pci_scan_child_bus(host_bridge->root_bus);
+
+    return err;
+}
+
+rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err;
+
+    err = rt_pci_scan_root_bus_bridge(host_bridge);
+
+    return err;
+}
+
+static rt_bool_t pci_remove_bus_device(struct rt_pci_device *pdev, void *data)
+{
+    /* Bus will free if this is the last device */
+    rt_bus_remove_device(&pdev->parent);
+
+    /* To find all devices, always return false */
+    return RT_FALSE;
+}
+
+rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge)
+{
+    rt_err_t err = RT_EOK;
+
+    if (host_bridge && host_bridge->root_bus)
+    {
+        rt_pci_enum_device(host_bridge->root_bus, pci_remove_bus_device, RT_NULL);
+        host_bridge->root_bus = RT_NULL;
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus)
+{
+    rt_err_t err = RT_EOK;
+
+    if (bus)
+    {
+        spin_lock(&bus->lock);
+
+        if (rt_list_isempty(&bus->children_nodes) &&
+            rt_list_isempty(&bus->devices_nodes))
+        {
+            rt_list_remove(&bus->list);
+            spin_unlock(&bus->lock);
+
+            if (bus->ops->remove)
+            {
+                bus->ops->remove(bus);
+            }
+
+            rt_pci_ofw_bus_free(bus);
+            rt_free(bus);
+        }
+        else
+        {
+            spin_unlock(&bus->lock);
+
+            err = -RT_EBUSY;
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev)
+{
+    rt_err_t err = RT_EOK;
+
+    if (pdev)
+    {
+        struct rt_pci_bus *bus = pdev->bus;
+
+        spin_lock(&bus->lock);
+
+        while (pdev->parent.ref_count > 1)
+        {
+            spin_unlock(&bus->lock);
+
+            rt_thread_yield();
+
+            spin_lock(&bus->lock);
+        }
+        rt_list_remove(&pdev->list);
+
+        spin_unlock(&bus->lock);
+
+        rt_free(pdev);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}