pcie-dw.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-23 GuEe-GUI first version
  9. */
  10. #define DBG_TAG "pcie.dw"
  11. #define DBG_LVL DBG_INFO
  12. #include <rtdbg.h>
  13. #include "pcie-dw.h"
  14. static rt_uint8_t __dw_pcie_find_next_cap(struct dw_pcie *pci,
  15. rt_uint8_t cap_ptr, rt_uint8_t cap)
  16. {
  17. rt_uint16_t reg;
  18. rt_uint8_t cap_id, next_cap_ptr;
  19. if (!cap_ptr)
  20. {
  21. return 0;
  22. }
  23. reg = dw_pcie_readw_dbi(pci, cap_ptr);
  24. cap_id = (reg & 0x00ff);
  25. if (cap_id > PCIY_MAX)
  26. {
  27. return 0;
  28. }
  29. if (cap_id == cap)
  30. {
  31. return cap_ptr;
  32. }
  33. next_cap_ptr = (reg & 0xff00) >> 8;
  34. return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  35. }
  36. rt_uint8_t dw_pcie_find_capability(struct dw_pcie *pci, rt_uint8_t cap)
  37. {
  38. rt_uint16_t reg;
  39. rt_uint8_t next_cap_ptr;
  40. reg = dw_pcie_readw_dbi(pci, PCIR_CAP_PTR);
  41. next_cap_ptr = (reg & 0x00ff);
  42. return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  43. }
  44. static rt_uint16_t dw_pcie_find_next_ext_capability(struct dw_pcie *pci,
  45. rt_uint16_t start, rt_uint8_t cap)
  46. {
  47. rt_uint32_t header;
  48. int ttl, pos = PCI_REGMAX + 1;
  49. /* minimum 8 bytes per capability */
  50. ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
  51. if (start)
  52. {
  53. pos = start;
  54. }
  55. header = dw_pcie_readl_dbi(pci, pos);
  56. /*
  57. * If we have no capabilities, this is indicated by cap ID,
  58. * cap version and next pointer all being 0.
  59. */
  60. if (header == 0)
  61. {
  62. return 0;
  63. }
  64. while (ttl-- > 0)
  65. {
  66. if (PCI_EXTCAP_ID(header) == cap && pos != start)
  67. {
  68. return pos;
  69. }
  70. pos = PCI_EXTCAP_NEXTPTR(header);
  71. if (pos < PCI_REGMAX + 1)
  72. {
  73. break;
  74. }
  75. header = dw_pcie_readl_dbi(pci, pos);
  76. }
  77. return 0;
  78. }
  79. rt_uint16_t dw_pcie_find_ext_capability(struct dw_pcie *pci, rt_uint8_t cap)
  80. {
  81. return dw_pcie_find_next_ext_capability(pci, 0, cap);
  82. }
  83. rt_err_t dw_pcie_read(void *addr, rt_size_t size, rt_uint32_t *out_val)
  84. {
  85. /* Check aligned */
  86. if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
  87. {
  88. *out_val = 0;
  89. return -RT_EINVAL;
  90. }
  91. if (size == 4)
  92. {
  93. *out_val = HWREG32(addr);
  94. }
  95. else if (size == 2)
  96. {
  97. *out_val = HWREG16(addr);
  98. }
  99. else if (size == 1)
  100. {
  101. *out_val = HWREG8(addr);
  102. }
  103. else
  104. {
  105. *out_val = 0;
  106. return -RT_EINVAL;
  107. }
  108. return RT_EOK;
  109. }
  110. rt_err_t dw_pcie_write(void *addr, rt_size_t size, rt_uint32_t val)
  111. {
  112. /* Check aligned */
  113. if ((rt_ubase_t)addr & ((rt_ubase_t)size - 1))
  114. {
  115. return -RT_EINVAL;
  116. }
  117. if (size == 4)
  118. {
  119. HWREG32(addr) = val;
  120. }
  121. else if (size == 2)
  122. {
  123. HWREG16(addr) = val;
  124. }
  125. else if (size == 1)
  126. {
  127. HWREG8(addr) = val;
  128. }
  129. else
  130. {
  131. return -RT_EINVAL;
  132. }
  133. return RT_EOK;
  134. }
  135. rt_uint32_t dw_pcie_read_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size)
  136. {
  137. rt_err_t err;
  138. rt_uint32_t val = 0;
  139. if (pci->ops->read_dbi)
  140. {
  141. return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
  142. }
  143. if ((err = dw_pcie_read(pci->dbi_base + reg, size, &val)))
  144. {
  145. LOG_E("Read DBI address error = %s", rt_strerror(err));
  146. }
  147. return val;
  148. }
  149. void dw_pcie_write_dbi(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
  150. {
  151. rt_err_t err;
  152. if (pci->ops->write_dbi)
  153. {
  154. pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
  155. return;
  156. }
  157. if ((err = dw_pcie_write(pci->dbi_base + reg, size, val)))
  158. {
  159. LOG_E("Write DBI address error = %s", rt_strerror(err));
  160. }
  161. }
  162. void dw_pcie_write_dbi2(struct dw_pcie *pci, rt_uint32_t reg, rt_size_t size, rt_uint32_t val)
  163. {
  164. rt_err_t err;
  165. if (pci->ops && pci->ops->write_dbi2)
  166. {
  167. pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
  168. return;
  169. }
  170. if ((err = dw_pcie_write(pci->dbi_base2 + reg, size, val)))
  171. {
  172. LOG_E("Write DBI2 address error = %s", rt_strerror(err));
  173. }
  174. }
  175. rt_uint32_t dw_pcie_readl_atu(struct dw_pcie *pci, rt_uint32_t reg)
  176. {
  177. rt_err_t err;
  178. rt_uint32_t val = 0;
  179. if (pci->ops->read_dbi)
  180. {
  181. return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
  182. }
  183. if ((err = dw_pcie_read(pci->atu_base + reg, 4, &val)))
  184. {
  185. LOG_E("Read ATU address error = %s", rt_strerror(err));
  186. }
  187. return val;
  188. }
  189. void dw_pcie_writel_atu(struct dw_pcie *pci, rt_uint32_t reg, rt_uint32_t val)
  190. {
  191. rt_err_t err;
  192. if (pci->ops->write_dbi)
  193. {
  194. pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
  195. return;
  196. }
  197. if ((err = dw_pcie_write(pci->atu_base + reg, 4, val)))
  198. {
  199. LOG_E("Write ATU address error = %s", rt_strerror(err));
  200. }
  201. }
  202. static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, rt_uint8_t func_no,
  203. int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
  204. {
  205. rt_uint64_t limit_addr = cpu_addr + size - 1;
  206. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
  207. rt_lower_32_bits(cpu_addr));
  208. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
  209. rt_upper_32_bits(cpu_addr));
  210. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
  211. rt_lower_32_bits(limit_addr));
  212. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
  213. rt_upper_32_bits(limit_addr));
  214. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
  215. rt_lower_32_bits(pci_addr));
  216. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
  217. rt_upper_32_bits(pci_addr));
  218. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
  219. type | PCIE_ATU_FUNC_NUM(func_no));
  220. dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
  221. PCIE_ATU_ENABLE);
  222. /*
  223. * Make sure ATU enable takes effect before any subsequent config
  224. * and I/O accesses.
  225. */
  226. for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
  227. {
  228. if (dw_pcie_readl_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
  229. {
  230. return;
  231. }
  232. rt_thread_mdelay(LINK_WAIT_IATU);
  233. }
  234. LOG_E("Outbound iATU is not being enabled");
  235. }
  236. static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
  237. int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
  238. {
  239. if (pci->ops->cpu_addr_fixup)
  240. {
  241. cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
  242. }
  243. if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
  244. {
  245. dw_pcie_prog_outbound_atu_unroll(pci, func_no,
  246. index, type, cpu_addr, pci_addr, size);
  247. return;
  248. }
  249. dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
  250. dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, rt_lower_32_bits(cpu_addr));
  251. dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, rt_upper_32_bits(cpu_addr));
  252. dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, rt_lower_32_bits(cpu_addr + size - 1));
  253. dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(pci_addr));
  254. dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(pci_addr));
  255. dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
  256. dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
  257. /*
  258. * Make sure ATU enable takes effect before any subsequent config
  259. * and I/O accesses.
  260. */
  261. for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
  262. {
  263. if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
  264. {
  265. return;
  266. }
  267. rt_thread_mdelay(LINK_WAIT_IATU);
  268. }
  269. LOG_E("Outbound iATU is not being enabled");
  270. }
  271. void dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
  272. int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
  273. {
  274. __dw_pcie_prog_outbound_atu(pci, 0, index, type, cpu_addr, pci_addr, size);
  275. }
  276. void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, rt_uint8_t func_no,
  277. int index, int type, rt_uint64_t cpu_addr, rt_uint64_t pci_addr, rt_size_t size)
  278. {
  279. __dw_pcie_prog_outbound_atu(pci, func_no, index, type, cpu_addr, pci_addr, size);
  280. }
  281. static rt_err_t dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci,
  282. rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
  283. enum dw_pcie_aspace_type aspace_type)
  284. {
  285. int type;
  286. dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
  287. rt_lower_32_bits(cpu_addr));
  288. dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
  289. rt_upper_32_bits(cpu_addr));
  290. switch (aspace_type)
  291. {
  292. case DW_PCIE_ASPACE_MEM:
  293. type = PCIE_ATU_TYPE_MEM;
  294. break;
  295. case DW_PCIE_ASPACE_IO:
  296. type = PCIE_ATU_TYPE_IO;
  297. break;
  298. default:
  299. return -RT_EINVAL;
  300. }
  301. dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
  302. type | PCIE_ATU_FUNC_NUM(func_no));
  303. dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
  304. PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE |
  305. PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
  306. /*
  307. * Make sure ATU enable takes effect before any subsequent config
  308. * and I/O accesses.
  309. */
  310. for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
  311. {
  312. if (dw_pcie_readl_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2) & PCIE_ATU_ENABLE)
  313. {
  314. return RT_EOK;
  315. }
  316. rt_thread_mdelay(LINK_WAIT_IATU);
  317. }
  318. LOG_E("Inbound iATU is not being enabled");
  319. return -RT_EBUSY;
  320. }
  321. rt_err_t dw_pcie_prog_inbound_atu(struct dw_pcie *pci,
  322. rt_uint8_t func_no, int index, int bar, rt_uint64_t cpu_addr,
  323. enum dw_pcie_aspace_type aspace_type)
  324. {
  325. int type;
  326. if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN)
  327. {
  328. return dw_pcie_prog_inbound_atu_unroll(pci, func_no,
  329. index, bar, cpu_addr, aspace_type);
  330. }
  331. dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | index);
  332. dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, rt_lower_32_bits(cpu_addr));
  333. dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, rt_upper_32_bits(cpu_addr));
  334. switch (aspace_type)
  335. {
  336. case DW_PCIE_ASPACE_MEM:
  337. type = PCIE_ATU_TYPE_MEM;
  338. break;
  339. case DW_PCIE_ASPACE_IO:
  340. type = PCIE_ATU_TYPE_IO;
  341. break;
  342. default:
  343. return -RT_EINVAL;
  344. }
  345. dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | PCIE_ATU_FUNC_NUM(func_no));
  346. dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
  347. PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
  348. /*
  349. * Make sure ATU enable takes effect before any subsequent config
  350. * and I/O accesses.
  351. */
  352. for (int retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; ++retries)
  353. {
  354. if (dw_pcie_readl_dbi(pci, PCIE_ATU_CR2) & PCIE_ATU_ENABLE)
  355. {
  356. return RT_EOK;
  357. }
  358. rt_thread_mdelay(LINK_WAIT_IATU);
  359. }
  360. LOG_E("Inbound iATU is not being enabled");
  361. return -RT_EBUSY;
  362. }
  363. void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type)
  364. {
  365. rt_uint32_t region;
  366. switch (type)
  367. {
  368. case DW_PCIE_REGION_INBOUND:
  369. region = PCIE_ATU_REGION_INBOUND;
  370. break;
  371. case DW_PCIE_REGION_OUTBOUND:
  372. region = PCIE_ATU_REGION_OUTBOUND;
  373. break;
  374. default:
  375. return;
  376. }
  377. if (pci->iatu_unroll_enabled)
  378. {
  379. if (region == PCIE_ATU_REGION_INBOUND)
  380. {
  381. dw_pcie_writel_ib_unroll(pci, index,
  382. PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
  383. }
  384. else
  385. {
  386. dw_pcie_writel_ob_unroll(pci, index,
  387. PCIE_ATU_UNR_REGION_CTRL2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
  388. }
  389. }
  390. else
  391. {
  392. dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
  393. dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(rt_uint32_t)PCIE_ATU_ENABLE);
  394. }
  395. }
  396. rt_err_t dw_pcie_wait_for_link(struct dw_pcie *pci)
  397. {
  398. /* Check if the link is up or not */
  399. for (int retries = 0; retries < LINK_WAIT_MAX_RETRIES; ++retries)
  400. {
  401. if (dw_pcie_link_up(pci))
  402. {
  403. LOG_I("%s: Link up", rt_dm_dev_get_name(pci->dev));
  404. return RT_EOK;
  405. }
  406. rt_hw_us_delay((LINK_WAIT_USLEEP_MIN + LINK_WAIT_USLEEP_MAX) >> 1);
  407. }
  408. LOG_I("PHY link never came up");
  409. return -RT_ETIMEOUT;
  410. }
  411. rt_bool_t dw_pcie_link_up(struct dw_pcie *pci)
  412. {
  413. rt_uint32_t val;
  414. if (pci->ops->link_up)
  415. {
  416. return pci->ops->link_up(pci);
  417. }
  418. val = HWREG32(pci->dbi_base + PCIE_PORT_DEBUG1);
  419. return (val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING));
  420. }
  421. void dw_pcie_upconfig_setup(struct dw_pcie *pci)
  422. {
  423. rt_uint32_t val;
  424. val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
  425. val |= PORT_MLTI_UPCFG_SUPPORT;
  426. dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
  427. }
  428. static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, rt_uint32_t link_gen)
  429. {
  430. rt_uint32_t cap, ctrl2, link_speed;
  431. rt_uint8_t offset = dw_pcie_find_capability(pci, PCIY_EXPRESS);
  432. cap = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CAP);
  433. ctrl2 = dw_pcie_readl_dbi(pci, offset + PCIER_LINK_CTL2);
  434. ctrl2 &= ~PCIEM_LNKCTL2_TLS;
  435. switch (link_gen)
  436. {
  437. case 1: link_speed = PCIEM_LNKCTL2_TLS_2_5GT; break;
  438. case 2: link_speed = PCIEM_LNKCTL2_TLS_5_0GT; break;
  439. case 3: link_speed = PCIEM_LNKCTL2_TLS_8_0GT; break;
  440. case 4: link_speed = PCIEM_LNKCTL2_TLS_16_0GT; break;
  441. default:
  442. /* Use hardware capability */
  443. link_speed = RT_FIELD_GET(PCIEM_LINK_CAP_MAX_SPEED, cap);
  444. ctrl2 &= ~PCIEM_LNKCTL2_HASD;
  445. break;
  446. }
  447. dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CTL2, ctrl2 | link_speed);
  448. cap &= ~((rt_uint32_t)PCIEM_LINK_CAP_MAX_SPEED);
  449. dw_pcie_writel_dbi(pci, offset + PCIER_LINK_CAP, cap | link_speed);
  450. }
  451. void dw_pcie_setup(struct dw_pcie *pci)
  452. {
  453. rt_uint32_t val;
  454. struct rt_device *dev = pci->dev;
  455. if (pci->version >= 0x480a || (!pci->version && dw_pcie_iatu_unroll_enabled(pci)))
  456. {
  457. pci->iatu_unroll_enabled |= DWC_IATU_UNROLL_EN;
  458. if (!pci->atu_base)
  459. {
  460. pci->atu_base = rt_dm_dev_iomap_by_name(dev, "atu");
  461. }
  462. if (!pci->atu_base)
  463. {
  464. pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
  465. }
  466. }
  467. LOG_D("iATU unroll is %sabled", pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN ? "en" : "dis");
  468. if (pci->link_gen > 0)
  469. {
  470. dw_pcie_link_set_max_speed(pci, pci->link_gen);
  471. }
  472. /* Configure Gen1 N_FTS */
  473. if (pci->fts_number[0])
  474. {
  475. val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
  476. val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
  477. val |= PORT_AFR_N_FTS(pci->fts_number[0]);
  478. val |= PORT_AFR_CC_N_FTS(pci->fts_number[0]);
  479. dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
  480. }
  481. /* Configure Gen2+ N_FTS */
  482. if (pci->fts_number[1])
  483. {
  484. val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  485. val &= ~PORT_LOGIC_N_FTS_MASK;
  486. val |= pci->fts_number[1];
  487. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
  488. }
  489. val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
  490. val &= ~PORT_LINK_FAST_LINK_MODE;
  491. val |= PORT_LINK_DLL_LINK_EN;
  492. dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
  493. if (rt_dm_dev_prop_read_bool(dev, "snps,enable-cdm-check"))
  494. {
  495. val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
  496. val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | PCIE_PL_CHK_REG_CHK_REG_START;
  497. dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
  498. }
  499. rt_dm_dev_prop_read_u32(dev, "num-lanes", &pci->num_lanes);
  500. if (!pci->num_lanes)
  501. {
  502. LOG_D("Using h/w default number of lanes");
  503. return;
  504. }
  505. /* Set the number of lanes */
  506. val &= ~PORT_LINK_FAST_LINK_MODE;
  507. val &= ~PORT_LINK_MODE_MASK;
  508. switch (pci->num_lanes)
  509. {
  510. case 1: val |= PORT_LINK_MODE_1_LANES; break;
  511. case 2: val |= PORT_LINK_MODE_2_LANES; break;
  512. case 4: val |= PORT_LINK_MODE_4_LANES; break;
  513. case 8: val |= PORT_LINK_MODE_8_LANES; break;
  514. default:
  515. LOG_E("Invail num-lanes = %d", pci->num_lanes);
  516. return;
  517. }
  518. dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
  519. /* Set link width speed control register */
  520. val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  521. val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
  522. switch (pci->num_lanes)
  523. {
  524. case 1: val |= PORT_LOGIC_LINK_WIDTH_1_LANES; break;
  525. case 2: val |= PORT_LOGIC_LINK_WIDTH_2_LANES; break;
  526. case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break;
  527. case 8: val |= PORT_LOGIC_LINK_WIDTH_8_LANES; break;
  528. }
  529. val |= pci->user_speed;
  530. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
  531. }