pci.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-10-24 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtservice.h>
  12. #define DBG_TAG "rtdm.pci"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pci.h>
  16. #include <drivers/misc.h>
  17. #include <drivers/core/bus.h>
  18. rt_inline void spin_lock(struct rt_spinlock *spinlock)
  19. {
  20. rt_hw_spin_lock(&spinlock->lock);
  21. }
  22. rt_inline void spin_unlock(struct rt_spinlock *spinlock)
  23. {
  24. rt_hw_spin_unlock(&spinlock->lock);
  25. }
  26. rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev)
  27. {
  28. struct rt_pci_host_bridge *host_bridge;
  29. if (!pdev)
  30. {
  31. return RT_UINT32_MAX;
  32. }
  33. if ((host_bridge = rt_pci_find_host_bridge(pdev->bus)))
  34. {
  35. return host_bridge->domain;
  36. }
  37. return RT_UINT32_MAX;
  38. }
  39. static rt_uint8_t pci_find_next_cap_ttl(struct rt_pci_bus *bus,
  40. rt_uint32_t devfn, rt_uint8_t pos, int cap, int *ttl)
  41. {
  42. rt_uint8_t ret = 0, id;
  43. rt_uint16_t ent;
  44. rt_pci_bus_read_config_u8(bus, devfn, pos, &pos);
  45. while ((*ttl)--)
  46. {
  47. if (pos < 0x40)
  48. {
  49. break;
  50. }
  51. pos &= ~3;
  52. rt_pci_bus_read_config_u16(bus, devfn, pos, &ent);
  53. id = ent & 0xff;
  54. if (id == 0xff)
  55. {
  56. break;
  57. }
  58. if (id == cap)
  59. {
  60. ret = pos;
  61. break;
  62. }
  63. pos = (ent >> 8);
  64. }
  65. return ret;
  66. }
  67. static rt_uint8_t pci_find_next_cap(struct rt_pci_bus *bus,
  68. rt_uint32_t devfn, rt_uint8_t pos, int cap)
  69. {
  70. int ttl = RT_PCI_FIND_CAP_TTL;
  71. return pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
  72. }
  73. static rt_uint8_t pci_bus_find_cap_start(struct rt_pci_bus *bus,
  74. rt_uint32_t devfn, rt_uint8_t hdr_type)
  75. {
  76. rt_uint8_t res = 0;
  77. rt_uint16_t status;
  78. rt_pci_bus_read_config_u16(bus, devfn, PCIR_STATUS, &status);
  79. if (status & PCIM_STATUS_CAPPRESENT)
  80. {
  81. switch (hdr_type)
  82. {
  83. case PCIM_HDRTYPE_NORMAL:
  84. case PCIM_HDRTYPE_BRIDGE:
  85. res = PCIR_CAP_PTR;
  86. break;
  87. case PCIM_HDRTYPE_CARDBUS:
  88. res = PCIR_CAP_PTR_2;
  89. break;
  90. }
  91. }
  92. return res;
  93. }
  94. rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap)
  95. {
  96. rt_uint8_t hdr_type, ret = RT_UINT8_MAX;
  97. if (bus)
  98. {
  99. rt_pci_bus_read_config_u8(bus, devfn, PCIR_HDRTYPE, &hdr_type);
  100. ret = pci_bus_find_cap_start(bus, devfn, hdr_type & PCIM_HDRTYPE);
  101. if (ret)
  102. {
  103. ret = pci_find_next_cap(bus, devfn, ret, cap);
  104. }
  105. }
  106. return ret;
  107. }
  108. rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap)
  109. {
  110. rt_uint8_t res = RT_UINT8_MAX;
  111. if (pdev)
  112. {
  113. res = pci_bus_find_cap_start(pdev->bus, pdev->devfn, pdev->hdr_type);
  114. if (res)
  115. {
  116. res = pci_find_next_cap(pdev->bus, pdev->devfn, res, cap);
  117. }
  118. }
  119. return res;
  120. }
  121. rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap)
  122. {
  123. rt_uint8_t res = RT_UINT8_MAX;
  124. if (pdev)
  125. {
  126. res = pci_find_next_cap(pdev->bus, pdev->devfn, pos + PCICAP_NEXTPTR, cap);
  127. }
  128. return res;
  129. }
  130. rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap)
  131. {
  132. return rt_pci_find_ext_next_capability(pdev, 0, cap);
  133. }
  134. rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap)
  135. {
  136. int ttl;
  137. rt_uint32_t header;
  138. rt_uint16_t start = pos;
  139. /* minimum 8 bytes per capability */
  140. ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
  141. if (pdev->cfg_size <= PCI_REGMAX + 1)
  142. {
  143. return 0;
  144. }
  145. if (!pos)
  146. {
  147. pos = PCI_REGMAX + 1;
  148. }
  149. if (rt_pci_read_config_u32(pdev, pos, &header))
  150. {
  151. return 0;
  152. }
  153. /*
  154. * If we have no capabilities, this is indicated by cap ID,
  155. * cap version and next pointer all being 0.
  156. */
  157. if (header == 0)
  158. {
  159. return 0;
  160. }
  161. while (ttl-- > 0)
  162. {
  163. if (PCI_EXTCAP_ID(header) == cap && pos != start)
  164. {
  165. return pos;
  166. }
  167. pos = PCI_EXTCAP_NEXTPTR(header);
  168. if (pos < PCI_REGMAX + 1)
  169. {
  170. break;
  171. }
  172. if (rt_pci_read_config_u32(pdev, pos, &header))
  173. {
  174. break;
  175. }
  176. }
  177. return 0;
  178. }
  179. static void pci_set_master(struct rt_pci_device *pdev, rt_bool_t enable)
  180. {
  181. rt_uint16_t old_cmd, cmd;
  182. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &old_cmd);
  183. if (enable)
  184. {
  185. cmd = old_cmd | PCIM_CMD_BUSMASTEREN;
  186. }
  187. else
  188. {
  189. cmd = old_cmd & ~PCIM_CMD_BUSMASTEREN;
  190. }
  191. if (cmd != old_cmd)
  192. {
  193. rt_pci_write_config_u16(pdev, PCIR_COMMAND, cmd);
  194. }
  195. pdev->busmaster = !!enable;
  196. }
  197. void rt_pci_set_master(struct rt_pci_device *pdev)
  198. {
  199. if (pdev)
  200. {
  201. pci_set_master(pdev, RT_TRUE);
  202. }
  203. }
  204. void rt_pci_clear_master(struct rt_pci_device *pdev)
  205. {
  206. if (pdev)
  207. {
  208. pci_set_master(pdev, RT_FALSE);
  209. }
  210. }
  211. void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable)
  212. {
  213. rt_uint16_t pci_command, new;
  214. if (!pdev)
  215. {
  216. return;
  217. }
  218. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &pci_command);
  219. if (enable)
  220. {
  221. new = pci_command & ~PCIM_CMD_INTxDIS;
  222. }
  223. else
  224. {
  225. new = pci_command | PCIM_CMD_INTxDIS;
  226. }
  227. if (new != pci_command)
  228. {
  229. rt_pci_write_config_u16(pdev, PCIR_COMMAND, new);
  230. }
  231. }
  232. static rt_bool_t pci_check_and_set_intx_mask(struct rt_pci_device *pdev, rt_bool_t mask)
  233. {
  234. rt_ubase_t level;
  235. rt_bool_t irq_pending;
  236. rt_bool_t res = RT_TRUE;
  237. rt_uint16_t origcmd, newcmd;
  238. rt_uint32_t cmd_status_dword;
  239. struct rt_pci_bus *bus = pdev->bus;
  240. level = rt_spin_lock_irqsave(&rt_pci_lock);
  241. bus->ops->read(bus, pdev->devfn, PCIR_COMMAND, 4, &cmd_status_dword);
  242. irq_pending = (cmd_status_dword >> 16) & PCIM_STATUS_INTxSTATE;
  243. /*
  244. * Check interrupt status register to see whether our device
  245. * triggered the interrupt (when masking) or the next IRQ is
  246. * already pending (when unmasking).
  247. */
  248. if (mask != irq_pending)
  249. {
  250. res = RT_FALSE;
  251. }
  252. else
  253. {
  254. origcmd = cmd_status_dword;
  255. newcmd = origcmd & ~PCIM_CMD_INTxDIS;
  256. if (mask)
  257. {
  258. newcmd |= PCIM_CMD_INTxDIS;
  259. }
  260. if (newcmd != origcmd)
  261. {
  262. bus->ops->write(bus, pdev->devfn, PCIR_COMMAND, 2, newcmd);
  263. }
  264. }
  265. rt_spin_unlock_irqrestore(&rt_pci_lock, level);
  266. return res;
  267. }
  268. rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev)
  269. {
  270. rt_bool_t res = RT_FALSE;
  271. if (pdev)
  272. {
  273. res = pci_check_and_set_intx_mask(pdev, RT_TRUE);
  274. }
  275. return res;
  276. }
  277. rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev)
  278. {
  279. rt_bool_t res = RT_FALSE;
  280. if (pdev)
  281. {
  282. res = pci_check_and_set_intx_mask(pdev, RT_FALSE);
  283. }
  284. return res;
  285. }
  286. void rt_pci_irq_mask(struct rt_pci_device *pdev)
  287. {
  288. if (pdev)
  289. {
  290. rt_bool_t unused;
  291. struct rt_pic_irq *pirq;
  292. rt_pci_intx(pdev, RT_FALSE);
  293. pirq = rt_pic_find_pirq(pdev->intx_pic, pdev->irq);
  294. RT_ASSERT(pirq != RT_NULL);
  295. rt_hw_spin_lock(&pirq->rw_lock.lock);
  296. unused = rt_list_isempty(&pirq->isr.list);
  297. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  298. if (unused)
  299. {
  300. rt_hw_interrupt_mask(pdev->irq);
  301. }
  302. }
  303. }
  304. void rt_pci_irq_unmask(struct rt_pci_device *pdev)
  305. {
  306. if (pdev)
  307. {
  308. rt_hw_interrupt_umask(pdev->irq);
  309. rt_pci_intx(pdev, RT_TRUE);
  310. }
  311. }
  312. struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus)
  313. {
  314. if (!bus)
  315. {
  316. return RT_NULL;
  317. }
  318. while (bus->parent)
  319. {
  320. bus = bus->parent;
  321. }
  322. return bus;
  323. }
  324. struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus)
  325. {
  326. if (!bus)
  327. {
  328. return RT_NULL;
  329. }
  330. if ((bus = rt_pci_find_root_bus(bus)))
  331. {
  332. return rt_container_of(bus->host_bridge, struct rt_pci_host_bridge, parent);
  333. }
  334. return RT_NULL;
  335. }
  336. rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin)
  337. {
  338. int slot = 0;
  339. if (!pdev->ari_enabled)
  340. {
  341. slot = RT_PCI_SLOT(pdev->devfn);
  342. }
  343. return (((pin - 1) + slot) % 4) + 1;
  344. }
  345. rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp)
  346. {
  347. rt_uint8_t pin = *pinp;
  348. while (!rt_pci_is_root_bus(pdev->bus))
  349. {
  350. pin = rt_pci_irq_intx(pdev, pin);
  351. pdev = pdev->bus->self;
  352. }
  353. *pinp = pin;
  354. return RT_PCI_SLOT(pdev->devfn);
  355. }
  356. rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge)
  357. {
  358. rt_err_t err = host_bridge->bus_regions_nr == 0 ? -RT_EEMPTY : RT_EOK;
  359. for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
  360. {
  361. struct rt_pci_bus_region *region = &host_bridge->bus_regions[i];
  362. /*
  363. * Avoid allocating PCI resources from address 0 -- this is illegal
  364. * according to PCI 2.1 and moreover. Use a reasonable starting value of
  365. * 0x1000 instead if the bus start address is below 0x1000.
  366. */
  367. region->bus_start = rt_max_t(rt_size_t, 0x1000, region->phy_addr);
  368. LOG_I("Bus %s region(%d):",
  369. region->flags == PCI_BUS_REGION_F_MEM ? "Memory" :
  370. (region->flags == PCI_BUS_REGION_F_PREFETCH ? "Prefetchable Mem" :
  371. (region->flags == PCI_BUS_REGION_F_IO ? "I/O" : "Unknown")), i);
  372. LOG_I(" cpu: [%p, %p]", region->cpu_addr, (region->cpu_addr + region->size - 1));
  373. LOG_I(" physical: [%p, %p]", region->phy_addr, (region->phy_addr + region->size - 1));
  374. }
  375. return err;
  376. }
  377. struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
  378. void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64)
  379. {
  380. struct rt_pci_bus_region *bus_region, *region = RT_NULL;
  381. bus_region = &host_bridge->bus_regions[0];
  382. for (int i = 0; i < host_bridge->bus_regions_nr; ++i, ++bus_region)
  383. {
  384. if (bus_region->flags == flags && bus_region->size > 0)
  385. {
  386. void *addr;
  387. region = bus_region;
  388. addr = (void *)(((region->bus_start - 1) | (size - 1)) + 1);
  389. if ((rt_uint64_t)addr - region->phy_addr + size <= region->size)
  390. {
  391. rt_bool_t addr64 = !!rt_upper_32_bits((rt_ubase_t)addr);
  392. if (mem64)
  393. {
  394. if (!addr64)
  395. {
  396. region = RT_NULL;
  397. /* Try again */
  398. continue;
  399. }
  400. }
  401. else if (addr64)
  402. {
  403. region = RT_NULL;
  404. /* Try again */
  405. continue;
  406. }
  407. region->bus_start = ((rt_uint64_t)addr + size);
  408. *out_addr = addr;
  409. }
  410. break;
  411. }
  412. }
  413. if (!region && mem64)
  414. {
  415. /* Retry */
  416. region = rt_pci_region_alloc(host_bridge, out_addr, size, flags, RT_FALSE);
  417. }
  418. return region;
  419. }
  420. rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
  421. struct rt_pci_device *pdev)
  422. {
  423. rt_err_t err = RT_EOK;
  424. rt_size_t size;
  425. rt_ubase_t addr = 0;
  426. rt_uint32_t cfg;
  427. rt_size_t bars_nr;
  428. rt_uint8_t hdr_type;
  429. rt_bool_t prefetch = RT_FALSE;
  430. rt_uint16_t class, command = 0;
  431. for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
  432. {
  433. if (host_bridge->bus_regions[i].flags == PCI_BUS_REGION_F_PREFETCH)
  434. {
  435. prefetch = RT_TRUE;
  436. break;
  437. }
  438. }
  439. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &command);
  440. command = (command & ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) | PCIM_CMD_BUSMASTEREN;
  441. rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &hdr_type);
  442. if (pdev->hdr_type != hdr_type)
  443. {
  444. LOG_W("%s may not initialized", rt_dm_dev_get_name(&pdev->parent));
  445. }
  446. switch (hdr_type)
  447. {
  448. case PCIM_HDRTYPE_NORMAL:
  449. bars_nr = PCI_STD_NUM_BARS;
  450. break;
  451. case PCIM_HDRTYPE_BRIDGE:
  452. bars_nr = 2;
  453. break;
  454. case PCIM_HDRTYPE_CARDBUS:
  455. bars_nr = 0;
  456. break;
  457. default:
  458. bars_nr = 0;
  459. break;
  460. }
  461. for (int i = 0; i < bars_nr; ++i)
  462. {
  463. rt_ubase_t flags;
  464. rt_ubase_t bar_base;
  465. rt_bool_t mem64 = RT_FALSE;
  466. struct rt_pci_bus_region *region;
  467. cfg = 0;
  468. bar_base = PCIR_BAR(i);
  469. rt_pci_write_config_u32(pdev, bar_base, RT_UINT32_MAX);
  470. rt_pci_read_config_u32(pdev, bar_base, &cfg);
  471. if (!cfg)
  472. {
  473. continue;
  474. }
  475. else if (cfg == RT_UINT32_MAX)
  476. {
  477. rt_pci_write_config_u32(pdev, bar_base, 0UL);
  478. continue;
  479. }
  480. if (cfg & PCIM_BAR_SPACE)
  481. {
  482. mem64 = RT_FALSE;
  483. flags = PCI_BUS_REGION_F_IO;
  484. size = cfg & PCIM_BAR_IO_MASK;
  485. size &= ~(size - 1);
  486. }
  487. else
  488. {
  489. /* memory */
  490. if ((cfg & PCIM_BAR_MEM_TYPE_MASK) == PCIM_BAR_MEM_TYPE_64)
  491. {
  492. /* 64bits */
  493. rt_uint32_t cfg64;
  494. rt_uint64_t bar64;
  495. mem64 = RT_TRUE;
  496. rt_pci_write_config_u32(pdev, bar_base + sizeof(rt_uint32_t), RT_UINT32_MAX);
  497. rt_pci_read_config_u32(pdev, bar_base + sizeof(rt_uint32_t), &cfg64);
  498. bar64 = ((rt_uint64_t)cfg64 << 32) | cfg;
  499. size = ~(bar64 & PCIM_BAR_MEM_MASK) + 1;
  500. }
  501. else
  502. {
  503. /* 32bits */
  504. mem64 = RT_FALSE;
  505. size = (rt_uint32_t)(~(cfg & PCIM_BAR_MEM_MASK) + 1);
  506. }
  507. if (prefetch && (cfg & PCIM_BAR_MEM_PREFETCH))
  508. {
  509. flags = PCI_BUS_REGION_F_PREFETCH;
  510. }
  511. else
  512. {
  513. flags = PCI_BUS_REGION_F_MEM;
  514. }
  515. }
  516. region = rt_pci_region_alloc(host_bridge, (void **)&addr, size, flags, mem64);
  517. if (region)
  518. {
  519. rt_pci_write_config_u32(pdev, bar_base, addr);
  520. if (mem64)
  521. {
  522. bar_base += sizeof(rt_uint32_t);
  523. #ifdef RT_PCI_SYS_64BIT
  524. rt_pci_write_config_u32(pdev, bar_base, (rt_uint32_t)(addr >> 32));
  525. #else
  526. /*
  527. * If we are a 64-bit decoder then increment to the upper 32 bits
  528. * of the bar and force it to locate in the lower 4GB of memory.
  529. */
  530. rt_pci_write_config_u32(pdev, bar_base, 0UL);
  531. #endif
  532. }
  533. pdev->resource[i].size = size;
  534. pdev->resource[i].base = region->cpu_addr + (addr - region->phy_addr);
  535. pdev->resource[i].flags = flags;
  536. if (mem64)
  537. {
  538. ++i;
  539. pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
  540. }
  541. }
  542. else
  543. {
  544. err = -RT_ERROR;
  545. LOG_W("%s alloc bar(%d) address fail", rt_dm_dev_get_name(&pdev->parent), i);
  546. }
  547. command |= (cfg & PCIM_BAR_SPACE) ? PCIM_CMD_PORTEN : PCIM_CMD_MEMEN;
  548. }
  549. if (hdr_type == PCIM_HDRTYPE_NORMAL || hdr_type == PCIM_HDRTYPE_BRIDGE)
  550. {
  551. int rom_addr = (hdr_type == PCIM_HDRTYPE_NORMAL) ? PCIR_BIOS : PCIR_BIOS_1;
  552. rt_pci_write_config_u32(pdev, rom_addr, 0xfffffffe);
  553. rt_pci_read_config_u32(pdev, rom_addr, &cfg);
  554. if (cfg)
  555. {
  556. size = -(cfg & ~1);
  557. if (rt_pci_region_alloc(host_bridge, (void **)&addr, size, PCI_BUS_REGION_F_MEM, RT_FALSE))
  558. {
  559. rt_pci_write_config_u32(pdev, rom_addr, addr);
  560. }
  561. command |= PCIM_CMD_MEMEN;
  562. }
  563. }
  564. rt_pci_read_config_u16(pdev, PCIR_SUBCLASS, &class);
  565. if (class == PCIS_DISPLAY_VGA)
  566. {
  567. command |= PCIM_CMD_PORTEN;
  568. }
  569. rt_pci_write_config_u16(pdev, PCIR_COMMAND, command);
  570. rt_pci_write_config_u8(pdev, PCIR_CACHELNSZ, RT_PCI_CACHE_LINE_SIZE);
  571. rt_pci_write_config_u8(pdev, PCIR_LATTIMER, 0x80);
  572. return err;
  573. }
  574. struct rt_pci_bus_resource *rt_pci_find_bar(struct rt_pci_device* pdev,rt_ubase_t flags,int index)
  575. {
  576. for (int i = 0; i < RT_PCI_BAR_NR_MAX; i++)
  577. {
  578. if (pdev->resource[i].flags == flags)
  579. {
  580. index--;
  581. if (index == 0)
  582. return &pdev->resource[i];
  583. }
  584. }
  585. return RT_NULL;
  586. }
  587. void rt_pci_enum_device(struct rt_pci_bus *bus,
  588. rt_bool_t (callback(struct rt_pci_device *, void *)), void *data)
  589. {
  590. rt_bool_t is_end = RT_FALSE;
  591. struct rt_spinlock *lock;
  592. struct rt_pci_bus *parent;
  593. struct rt_pci_device *pdev, *last_pdev = RT_NULL;
  594. /* Walk tree */
  595. while (bus && !is_end)
  596. {
  597. /* Goto bottom */
  598. for (;;)
  599. {
  600. lock = &bus->lock;
  601. spin_lock(lock);
  602. if (rt_list_isempty(&bus->children_nodes))
  603. {
  604. parent = bus->parent;
  605. break;
  606. }
  607. bus = rt_list_entry(&bus->children_nodes, struct rt_pci_bus, list);
  608. spin_unlock(lock);
  609. }
  610. rt_list_for_each_entry(pdev, &bus->devices_nodes, list)
  611. {
  612. if (last_pdev)
  613. {
  614. spin_unlock(lock);
  615. if (callback(last_pdev, data))
  616. {
  617. spin_lock(lock);
  618. --last_pdev->parent.ref_count;
  619. is_end = RT_TRUE;
  620. break;
  621. }
  622. spin_lock(lock);
  623. --last_pdev->parent.ref_count;
  624. }
  625. ++pdev->parent.ref_count;
  626. last_pdev = pdev;
  627. }
  628. if (!is_end && last_pdev)
  629. {
  630. spin_unlock(lock);
  631. if (callback(last_pdev, data))
  632. {
  633. is_end = RT_TRUE;
  634. }
  635. spin_lock(lock);
  636. --last_pdev->parent.ref_count;
  637. }
  638. last_pdev = RT_NULL;
  639. spin_unlock(lock);
  640. /* Up a level or goto next */
  641. while (!is_end)
  642. {
  643. lock = &bus->lock;
  644. if (!parent)
  645. {
  646. /* Root bus, is end */
  647. bus = RT_NULL;
  648. break;
  649. }
  650. spin_lock(lock);
  651. if (bus->list.next != &parent->children_nodes)
  652. {
  653. /* Has next sibling */
  654. bus = rt_list_entry(bus->list.next, struct rt_pci_bus, list);
  655. spin_unlock(lock);
  656. break;
  657. }
  658. /* All device on this buss' parent */
  659. rt_list_for_each_entry(pdev, &parent->devices_nodes, list)
  660. {
  661. if (last_pdev)
  662. {
  663. spin_unlock(lock);
  664. if (callback(last_pdev, data))
  665. {
  666. spin_lock(lock);
  667. --last_pdev->parent.ref_count;
  668. is_end = RT_TRUE;
  669. break;
  670. }
  671. spin_lock(lock);
  672. --last_pdev->parent.ref_count;
  673. }
  674. ++pdev->parent.ref_count;
  675. last_pdev = pdev;
  676. }
  677. if (!is_end && last_pdev)
  678. {
  679. spin_unlock(lock);
  680. if (callback(last_pdev, data))
  681. {
  682. is_end = RT_TRUE;
  683. }
  684. spin_lock(lock);
  685. --last_pdev->parent.ref_count;
  686. }
  687. last_pdev = RT_NULL;
  688. bus = parent;
  689. parent = parent->parent;
  690. spin_unlock(lock);
  691. }
  692. }
  693. }
  694. const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
  695. const struct rt_pci_device_id *id)
  696. {
  697. if ((id->vendor == PCI_ANY_ID || id->vendor == pdev->vendor) &&
  698. (id->device == PCI_ANY_ID || id->device == pdev->device) &&
  699. (id->subsystem_vendor == PCI_ANY_ID || id->subsystem_vendor == pdev->subsystem_vendor) &&
  700. (id->subsystem_device == PCI_ANY_ID || id->subsystem_device == pdev->subsystem_device) &&
  701. !((id->class ^ pdev->class) & id->class_mask))
  702. {
  703. return id;
  704. }
  705. return RT_NULL;
  706. }
  707. const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
  708. const struct rt_pci_device_id *ids)
  709. {
  710. while (ids->vendor || ids->subsystem_vendor || ids->class_mask)
  711. {
  712. if (rt_pci_match_id(pdev, ids))
  713. {
  714. return ids;
  715. }
  716. ++ids;
  717. }
  718. return RT_NULL;
  719. }
  720. static struct rt_bus pci_bus;
  721. rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv)
  722. {
  723. RT_ASSERT(pdrv != RT_NULL);
  724. pdrv->parent.bus = &pci_bus;
  725. #if RT_NAME_MAX > 0
  726. rt_strcpy(pdrv->parent.parent.name, pdrv->name);
  727. #else
  728. pdrv->parent.parent.name = pdrv->name;
  729. #endif
  730. return rt_driver_register(&pdrv->parent);
  731. }
  732. rt_err_t rt_pci_device_register(struct rt_pci_device *pdev)
  733. {
  734. rt_err_t err;
  735. RT_ASSERT(pdev != RT_NULL);
  736. if ((err = rt_bus_add_device(&pci_bus, &pdev->parent)))
  737. {
  738. return err;
  739. }
  740. return RT_EOK;
  741. }
  742. static rt_bool_t pci_match(rt_driver_t drv, rt_device_t dev)
  743. {
  744. rt_bool_t match = RT_FALSE;
  745. struct rt_pci_driver *pdrv = rt_container_of(drv, struct rt_pci_driver, parent);
  746. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  747. if (pdrv->name && pdev->name)
  748. {
  749. match = rt_strcmp(pdrv->name, pdev->name) ? RT_FALSE : RT_TRUE;
  750. }
  751. if (!match)
  752. {
  753. pdev->id = rt_pci_match_ids(pdev, pdrv->ids);
  754. match = pdev->id ? RT_TRUE : RT_FALSE;
  755. }
  756. return match;
  757. }
  758. static rt_err_t pci_probe(rt_device_t dev)
  759. {
  760. rt_err_t err = RT_EOK;
  761. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  762. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  763. rt_pci_assign_irq(pdev);
  764. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_TRUE);
  765. err = pdrv->probe(pdev);
  766. if (err)
  767. {
  768. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  769. }
  770. return err;
  771. }
  772. static rt_err_t pci_remove(rt_device_t dev)
  773. {
  774. rt_err_t err = RT_EOK;
  775. struct rt_pci_bus *bus;
  776. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  777. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  778. if (pdrv && pdrv->remove)
  779. {
  780. if ((err = pdrv->remove(pdev)))
  781. {
  782. return err;
  783. }
  784. }
  785. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  786. bus = pdev->bus;
  787. rt_pci_device_remove(pdev);
  788. /* Just try to remove */
  789. rt_pci_bus_remove(bus);
  790. return err;
  791. }
  792. static rt_err_t pci_shutdown(rt_device_t dev)
  793. {
  794. struct rt_pci_bus *bus;
  795. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  796. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  797. if (pdrv && pdrv->shutdown)
  798. {
  799. pdrv->shutdown(pdev);
  800. }
  801. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  802. bus = pdev->bus;
  803. rt_pci_device_remove(pdev);
  804. /* Just try to remove */
  805. rt_pci_bus_remove(bus);
  806. return RT_EOK;
  807. }
  808. static struct rt_bus pci_bus =
  809. {
  810. .name = "pci",
  811. .match = pci_match,
  812. .probe = pci_probe,
  813. .remove = pci_remove,
  814. .shutdown = pci_shutdown,
  815. };
  816. static int pci_bus_init(void)
  817. {
  818. rt_bus_register(&pci_bus);
  819. return 0;
  820. }
  821. INIT_CORE_EXPORT(pci_bus_init);