pcie-dw_host.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-23 GuEe-GUI first version
  9. */
  10. #define DBG_TAG "pcie.dw-host"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include "pcie-dw.h"
  14. static void dw_pcie_irq_ack(struct rt_pic_irq *pirq)
  15. {
  16. int hwirq = pirq->hwirq;
  17. rt_uint32_t res, bit, ctrl;
  18. struct dw_pcie_port *port = pirq->pic->priv_data;
  19. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  20. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  21. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  22. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  23. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, RT_BIT(bit));
  24. }
  25. static void dw_pcie_irq_mask(struct rt_pic_irq *pirq)
  26. {
  27. rt_ubase_t level;
  28. int hwirq = pirq->hwirq;
  29. rt_uint32_t res, bit, ctrl;
  30. struct dw_pcie_port *port = pirq->pic->priv_data;
  31. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  32. rt_pci_msi_mask_irq(pirq);
  33. level = rt_spin_lock_irqsave(&port->lock);
  34. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  35. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  36. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  37. port->irq_mask[ctrl] |= RT_BIT(bit);
  38. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
  39. rt_spin_unlock_irqrestore(&port->lock, level);
  40. }
  41. static void dw_pcie_irq_unmask(struct rt_pic_irq *pirq)
  42. {
  43. rt_ubase_t level;
  44. int hwirq = pirq->hwirq;
  45. rt_uint32_t res, bit, ctrl;
  46. struct dw_pcie_port *port = pirq->pic->priv_data;
  47. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  48. rt_pci_msi_unmask_irq(pirq);
  49. level = rt_spin_lock_irqsave(&port->lock);
  50. ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
  51. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  52. bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
  53. port->irq_mask[ctrl] &= ~RT_BIT(bit);
  54. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
  55. rt_spin_unlock_irqrestore(&port->lock, level);
  56. }
  57. static void dw_pcie_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
  58. {
  59. rt_uint64_t msi_target;
  60. struct dw_pcie_port *port = pirq->pic->priv_data;
  61. msi_target = (rt_uint64_t)port->msi_data_phy;
  62. msg->address_lo = rt_lower_32_bits(msi_target);
  63. msg->address_hi = rt_upper_32_bits(msi_target);
  64. msg->data = pirq->hwirq;
  65. }
  66. static int dw_pcie_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
  67. {
  68. rt_ubase_t level;
  69. int irq, hwirq;
  70. struct rt_pic_irq *pirq;
  71. struct dw_pcie_port *port = pic->priv_data;
  72. level = rt_spin_lock_irqsave(&port->lock);
  73. hwirq = rt_bitmap_next_clear_bit(port->msi_map, 0, port->irq_count);
  74. if (hwirq >= port->irq_count)
  75. {
  76. irq = -RT_EEMPTY;
  77. goto _out_lock;
  78. }
  79. pirq = rt_pic_find_irq(pic, hwirq);
  80. irq = rt_pic_config_irq(pic, hwirq, hwirq);
  81. pirq->mode = RT_IRQ_MODE_EDGE_RISING;
  82. rt_bitmap_set_bit(port->msi_map, hwirq);
  83. _out_lock:
  84. rt_spin_unlock_irqrestore(&port->lock, level);
  85. return irq;
  86. }
  87. static void dw_pcie_irq_free_msi(struct rt_pic *pic, int irq)
  88. {
  89. rt_ubase_t level;
  90. struct rt_pic_irq *pirq;
  91. struct dw_pcie_port *port = pic->priv_data;
  92. pirq = rt_pic_find_pirq(pic, irq);
  93. if (!pirq)
  94. {
  95. return;
  96. }
  97. level = rt_spin_lock_irqsave(&port->lock);
  98. rt_bitmap_clear_bit(port->msi_map, pirq->hwirq);
  99. rt_spin_unlock_irqrestore(&port->lock, level);
  100. }
  101. const static struct rt_pic_ops dw_pci_msi_ops =
  102. {
  103. .name = "DWPCI-MSI",
  104. .irq_ack = dw_pcie_irq_ack,
  105. .irq_mask = dw_pcie_irq_mask,
  106. .irq_unmask = dw_pcie_irq_unmask,
  107. .irq_compose_msi_msg = dw_pcie_compose_msi_msg,
  108. .irq_alloc_msi = dw_pcie_irq_alloc_msi,
  109. .irq_free_msi = dw_pcie_irq_free_msi,
  110. .flags = RT_PIC_F_IRQ_ROUTING,
  111. };
  112. /* MSI int handler */
  113. rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port)
  114. {
  115. rt_err_t err;
  116. int i, pos;
  117. rt_bitmap_t status;
  118. rt_uint32_t num_ctrls;
  119. struct rt_pic_irq *pirq;
  120. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  121. struct rt_pic *msi_pic = port->msi_pic;
  122. err = -RT_EEMPTY;
  123. num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
  124. for (i = 0; i < num_ctrls; ++i)
  125. {
  126. status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
  127. (i * MSI_REG_CTRL_BLOCK_SIZE));
  128. if (!status)
  129. {
  130. continue;
  131. }
  132. err = RT_EOK;
  133. rt_bitmap_for_each_set_bit(&status, pos, MAX_MSI_IRQS_PER_CTRL)
  134. {
  135. pirq = rt_pic_find_irq(msi_pic, pos + i * MAX_MSI_IRQS_PER_CTRL);
  136. dw_pcie_irq_ack(pirq);
  137. rt_pic_handle_isr(pirq);
  138. }
  139. }
  140. return err;
  141. }
  142. static void dw_pcie_msi_isr(int irqno, void *param)
  143. {
  144. struct dw_pcie_port *port = param;
  145. dw_handle_msi_irq(port);
  146. }
  147. void dw_pcie_free_msi(struct dw_pcie_port *port)
  148. {
  149. if (port->msi_irq >= 0)
  150. {
  151. rt_hw_interrupt_mask(port->msi_irq);
  152. rt_pic_detach_irq(port->msi_irq, port);
  153. }
  154. if (port->msi_data)
  155. {
  156. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  157. rt_dma_free_coherent(pci->dev, sizeof(rt_uint64_t), port->msi_data,
  158. port->msi_data_phy);
  159. }
  160. }
  161. void dw_pcie_msi_init(struct dw_pcie_port *port)
  162. {
  163. #ifdef RT_PCI_MSI
  164. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  165. rt_uint64_t msi_target = (rt_uint64_t)port->msi_data_phy;
  166. /* Program the msi_data_phy */
  167. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, rt_lower_32_bits(msi_target));
  168. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, rt_upper_32_bits(msi_target));
  169. #endif
  170. }
  171. static const struct rt_pci_ops dw_child_pcie_ops;
  172. static const struct rt_pci_ops dw_pcie_ops;
  173. rt_err_t dw_pcie_host_init(struct dw_pcie_port *port)
  174. {
  175. rt_err_t err;
  176. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  177. struct rt_device *dev = pci->dev;
  178. struct rt_pci_host_bridge *bridge;
  179. rt_spin_lock_init(&port->lock);
  180. rt_dm_dev_get_address_by_name(dev, "config", &port->cfg0_addr, &port->cfg0_size);
  181. if (port->cfg0_addr)
  182. {
  183. port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  184. if (!port->cfg0_base)
  185. {
  186. return -RT_EIO;
  187. }
  188. }
  189. else if (!port->cfg0_base)
  190. {
  191. LOG_E("Missing 'config' reg space");
  192. }
  193. if (!(bridge = rt_pci_host_bridge_alloc(0)))
  194. {
  195. return -RT_ENOMEM;
  196. }
  197. bridge->parent.ofw_node = dev->ofw_node;
  198. if ((err = rt_pci_host_bridge_init(bridge)))
  199. {
  200. goto _err_free_bridge;
  201. }
  202. port->bridge = bridge;
  203. for (int i = 0; i < bridge->bus_regions_nr; ++i)
  204. {
  205. struct rt_pci_bus_region *region = &bridge->bus_regions[i];
  206. switch (region->flags)
  207. {
  208. case PCI_BUS_REGION_F_IO:
  209. port->io_addr = region->cpu_addr;
  210. port->io_bus_addr = region->phy_addr;
  211. port->io_size = region->size;
  212. break;
  213. case PCI_BUS_REGION_F_NONE:
  214. port->cfg0_size = region->size;
  215. port->cfg0_addr = region->cpu_addr;
  216. if (!pci->dbi_base)
  217. {
  218. pci->dbi_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  219. if (!pci->dbi_base)
  220. {
  221. LOG_E("Error with ioremap");
  222. return -RT_ENOMEM;
  223. }
  224. }
  225. break;
  226. default:
  227. break;
  228. }
  229. }
  230. if (!port->cfg0_base && port->cfg0_addr)
  231. {
  232. port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
  233. if (!port->cfg0_base)
  234. {
  235. return -RT_ENOMEM;
  236. }
  237. }
  238. if (rt_dm_dev_prop_read_u32(dev, "num-viewport", &pci->num_viewport))
  239. {
  240. pci->num_viewport = 2;
  241. }
  242. if (pci->link_gen < 1)
  243. {
  244. pci->link_gen = -1;
  245. rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
  246. }
  247. /*
  248. * If a specific SoC driver needs to change the default number of vectors,
  249. * it needs to implement the set_irq_count callback.
  250. */
  251. if (!port->ops->set_irq_count)
  252. {
  253. port->irq_count = MSI_DEF_NUM_VECTORS;
  254. }
  255. else
  256. {
  257. port->ops->set_irq_count(port);
  258. if (port->irq_count > MAX_MSI_IRQS || port->irq_count == 0)
  259. {
  260. LOG_E("Invalid count of irq = %d", port->irq_count);
  261. return -RT_EINVAL;
  262. }
  263. }
  264. if (!port->ops->msi_host_init)
  265. {
  266. port->msi_pic = rt_calloc(1, sizeof(*port->msi_pic));
  267. if (!port->msi_pic)
  268. {
  269. return -RT_ENOMEM;
  270. }
  271. port->msi_pic->priv_data = port;
  272. port->msi_pic->ops = &dw_pci_msi_ops;
  273. rt_pic_linear_irq(port->msi_pic, port->irq_count);
  274. rt_pic_user_extends(port->msi_pic);
  275. if (port->msi_irq)
  276. {
  277. rt_hw_interrupt_install(port->msi_irq, dw_pcie_msi_isr, port, "dwc-pci-msi");
  278. rt_hw_interrupt_umask(port->msi_irq);
  279. }
  280. port->msi_data = rt_dma_alloc_coherent(pci->dev, sizeof(rt_uint64_t),
  281. &port->msi_data_phy);
  282. if (!port->msi_data)
  283. {
  284. err = -RT_ENOMEM;
  285. goto _err_free_msi;
  286. }
  287. }
  288. else
  289. {
  290. if ((err = port->ops->msi_host_init(port)))
  291. {
  292. return err;
  293. }
  294. }
  295. /* Set default bus ops */
  296. bridge->ops = &dw_pcie_ops;
  297. bridge->child_ops = &dw_child_pcie_ops;
  298. if (port->ops->host_init && (err = port->ops->host_init(port)))
  299. {
  300. goto _err_free_msi;
  301. }
  302. bridge->sysdata = port;
  303. if ((err = rt_pci_host_bridge_probe(bridge)))
  304. {
  305. goto _err_free_msi;
  306. }
  307. return RT_EOK;
  308. _err_free_msi:
  309. if (!port->ops->msi_host_init)
  310. {
  311. dw_pcie_free_msi(port);
  312. rt_pic_cancel_irq(port->msi_pic);
  313. rt_free(port->msi_pic);
  314. port->msi_pic = RT_NULL;
  315. }
  316. _err_free_bridge:
  317. rt_pci_host_bridge_free(bridge);
  318. port->bridge = RT_NULL;
  319. return err;
  320. }
  321. void dw_pcie_host_deinit(struct dw_pcie_port *port)
  322. {
  323. if (!port->ops->msi_host_init)
  324. {
  325. dw_pcie_free_msi(port);
  326. }
  327. }
  328. void dw_pcie_host_free(struct dw_pcie_port *port)
  329. {
  330. if (!port->ops->msi_host_init)
  331. {
  332. dw_pcie_free_msi(port);
  333. rt_pic_cancel_irq(port->msi_pic);
  334. rt_free(port->msi_pic);
  335. }
  336. if (port->bridge)
  337. {
  338. rt_pci_host_bridge_free(port->bridge);
  339. }
  340. }
  341. static void *dw_pcie_other_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
  342. {
  343. int type;
  344. rt_uint32_t busdev;
  345. struct dw_pcie_port *port = bus->sysdata;
  346. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  347. /*
  348. * Checking whether the link is up here is a last line of defense
  349. * against platforms that forward errors on the system bus as
  350. * SError upon PCI configuration transactions issued when the link is down.
  351. * This check is racy by definition and does not stop the system from
  352. * triggering an SError if the link goes down after this check is performed.
  353. */
  354. if (!dw_pcie_link_up(pci))
  355. {
  356. return RT_NULL;
  357. }
  358. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(RT_PCI_SLOT(devfn)) |
  359. PCIE_ATU_FUNC(RT_PCI_FUNC(devfn));
  360. if (rt_pci_is_root_bus(bus->parent))
  361. {
  362. type = PCIE_ATU_TYPE_CFG0;
  363. }
  364. else
  365. {
  366. type = PCIE_ATU_TYPE_CFG1;
  367. }
  368. dw_pcie_prog_outbound_atu(pci, 0, type, port->cfg0_addr, busdev, port->cfg0_size);
  369. return port->cfg0_base + reg;
  370. }
  371. static rt_err_t dw_pcie_other_read_conf(struct rt_pci_bus *bus,
  372. rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
  373. {
  374. rt_err_t err;
  375. struct dw_pcie_port *port = bus->sysdata;
  376. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  377. err = rt_pci_bus_read_config_uxx(bus, devfn, reg, width, value);
  378. if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
  379. {
  380. dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  381. port->io_addr, port->io_bus_addr, port->io_size);
  382. }
  383. return err;
  384. }
  385. static rt_err_t dw_pcie_other_write_conf(struct rt_pci_bus *bus,
  386. rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
  387. {
  388. rt_err_t err;
  389. struct dw_pcie_port *port = bus->sysdata;
  390. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  391. err = rt_pci_bus_write_config_uxx(bus, devfn, reg, width, value);
  392. if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
  393. {
  394. dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  395. port->io_addr, port->io_bus_addr, port->io_size);
  396. }
  397. return err;
  398. }
  399. static const struct rt_pci_ops dw_child_pcie_ops =
  400. {
  401. .map = dw_pcie_other_conf_map,
  402. .read = dw_pcie_other_read_conf,
  403. .write = dw_pcie_other_write_conf,
  404. };
  405. void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
  406. {
  407. struct dw_pcie_port *port = bus->sysdata;
  408. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  409. if (RT_PCI_SLOT(devfn) > 0)
  410. {
  411. return RT_NULL;
  412. }
  413. return pci->dbi_base + reg;
  414. }
  415. static const struct rt_pci_ops dw_pcie_ops =
  416. {
  417. .map = dw_pcie_own_conf_map,
  418. .read = rt_pci_bus_read_config_uxx,
  419. .write = rt_pci_bus_write_config_uxx,
  420. };
  421. void dw_pcie_setup_rc(struct dw_pcie_port *port)
  422. {
  423. rt_uint32_t val, num_ctrls;
  424. struct dw_pcie *pci = to_dw_pcie_from_port(port);
  425. /*
  426. * Enable DBI read-only registers for writing/updating configuration.
  427. * Write permission gets disabled towards the end of this function.
  428. */
  429. dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
  430. dw_pcie_setup(pci);
  431. if (!port->ops->msi_host_init)
  432. {
  433. num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
  434. /* Initialize IRQ Status array */
  435. for (int ctrl = 0; ctrl < num_ctrls; ++ctrl)
  436. {
  437. port->irq_mask[ctrl] = ~0;
  438. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
  439. (ctrl * MSI_REG_CTRL_BLOCK_SIZE), port->irq_mask[ctrl]);
  440. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
  441. (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0);
  442. }
  443. }
  444. /* Setup RC BARs */
  445. dw_pcie_writel_dbi(pci, PCIR_BAR(0), PCIM_BAR_MEM_TYPE_64);
  446. dw_pcie_writel_dbi(pci, PCIR_BAR(1), PCIM_BAR_MEM_TYPE_32);
  447. /* Setup interrupt pins */
  448. val = dw_pcie_readl_dbi(pci, PCIR_INTLINE);
  449. val &= 0xffff00ff;
  450. val |= 0x00000100;
  451. dw_pcie_writel_dbi(pci, PCIR_INTLINE, val);
  452. /* Setup bus numbers */
  453. val = dw_pcie_readl_dbi(pci, PCIR_PRIBUS_1);
  454. val &= 0xff000000;
  455. val |= 0x00ff0100;
  456. dw_pcie_writel_dbi(pci, PCIR_PRIBUS_1, val);
  457. /* Setup command register */
  458. val = dw_pcie_readl_dbi(pci, PCIR_COMMAND);
  459. val &= 0xffff0000;
  460. val |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN;
  461. dw_pcie_writel_dbi(pci, PCIR_COMMAND, val);
  462. /*
  463. * If the platform provides its own child bus config accesses, it means
  464. * the platform uses its own address translation component rather than
  465. * ATU, so we should not program the ATU here.
  466. */
  467. if (pci->port.bridge->child_ops == &dw_child_pcie_ops)
  468. {
  469. int atu_idx = 0;
  470. struct rt_pci_host_bridge *bridge = port->bridge;
  471. /* Get last memory resource entry */
  472. for (int i = 0; i < bridge->bus_regions_nr; ++i)
  473. {
  474. struct rt_pci_bus_region *region = &bridge->bus_regions[i];
  475. if (region->flags != PCI_BUS_REGION_F_MEM)
  476. {
  477. continue;
  478. }
  479. if (pci->num_viewport <= ++atu_idx)
  480. {
  481. break;
  482. }
  483. dw_pcie_prog_outbound_atu(pci, atu_idx,
  484. PCIE_ATU_TYPE_MEM, region->cpu_addr,
  485. region->phy_addr, region->size);
  486. }
  487. if (port->io_size)
  488. {
  489. if (pci->num_viewport > ++atu_idx)
  490. {
  491. dw_pcie_prog_outbound_atu(pci, atu_idx,
  492. PCIE_ATU_TYPE_IO, port->io_addr,
  493. port->io_bus_addr, port->io_size);
  494. }
  495. else
  496. {
  497. pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
  498. }
  499. }
  500. if (pci->num_viewport <= atu_idx)
  501. {
  502. LOG_W("Resources exceed number of ATU entries (%d)", pci->num_viewport);
  503. }
  504. }
  505. dw_pcie_writel_dbi(pci, PCIR_BAR(0), 0);
  506. /* Program correct class for RC */
  507. dw_pcie_writew_dbi(pci, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
  508. val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  509. val |= PORT_LOGIC_SPEED_CHANGE;
  510. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
  511. dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
  512. }