pci.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-10-24 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtservice.h>
  12. #define DBG_TAG "rtdm.pci"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pci.h>
  16. #include <drivers/misc.h>
  17. #include <drivers/core/bus.h>
  18. rt_inline void spin_lock(struct rt_spinlock *spinlock)
  19. {
  20. rt_hw_spin_lock(&spinlock->lock);
  21. }
  22. rt_inline void spin_unlock(struct rt_spinlock *spinlock)
  23. {
  24. rt_hw_spin_unlock(&spinlock->lock);
  25. }
  26. rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev)
  27. {
  28. struct rt_pci_host_bridge *host_bridge;
  29. if (!pdev)
  30. {
  31. return RT_UINT32_MAX;
  32. }
  33. if ((host_bridge = rt_pci_find_host_bridge(pdev->bus)))
  34. {
  35. return host_bridge->domain;
  36. }
  37. return RT_UINT32_MAX;
  38. }
  39. static rt_uint8_t pci_find_next_cap_ttl(struct rt_pci_bus *bus,
  40. rt_uint32_t devfn, rt_uint8_t pos, int cap, int *ttl)
  41. {
  42. rt_uint8_t ret = 0, id;
  43. rt_uint16_t ent;
  44. rt_pci_bus_read_config_u8(bus, devfn, pos, &pos);
  45. while ((*ttl)--)
  46. {
  47. if (pos < 0x40)
  48. {
  49. break;
  50. }
  51. pos &= ~3;
  52. rt_pci_bus_read_config_u16(bus, devfn, pos, &ent);
  53. id = ent & 0xff;
  54. if (id == 0xff)
  55. {
  56. break;
  57. }
  58. if (id == cap)
  59. {
  60. ret = pos;
  61. break;
  62. }
  63. pos = (ent >> 8);
  64. }
  65. return ret;
  66. }
  67. static rt_uint8_t pci_find_next_cap(struct rt_pci_bus *bus,
  68. rt_uint32_t devfn, rt_uint8_t pos, int cap)
  69. {
  70. int ttl = RT_PCI_FIND_CAP_TTL;
  71. return pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
  72. }
  73. static rt_uint8_t pci_bus_find_cap_start(struct rt_pci_bus *bus,
  74. rt_uint32_t devfn, rt_uint8_t hdr_type)
  75. {
  76. rt_uint8_t res = 0;
  77. rt_uint16_t status;
  78. rt_pci_bus_read_config_u16(bus, devfn, PCIR_STATUS, &status);
  79. if (status & PCIM_STATUS_CAPPRESENT)
  80. {
  81. switch (hdr_type)
  82. {
  83. case PCIM_HDRTYPE_NORMAL:
  84. case PCIM_HDRTYPE_BRIDGE:
  85. res = PCIR_CAP_PTR;
  86. break;
  87. case PCIM_HDRTYPE_CARDBUS:
  88. res = PCIR_CAP_PTR_2;
  89. break;
  90. }
  91. }
  92. return res;
  93. }
  94. rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap)
  95. {
  96. rt_uint8_t hdr_type, ret = RT_UINT8_MAX;
  97. if (bus)
  98. {
  99. rt_pci_bus_read_config_u8(bus, devfn, PCIR_HDRTYPE, &hdr_type);
  100. ret = pci_bus_find_cap_start(bus, devfn, hdr_type & PCIM_HDRTYPE);
  101. if (ret)
  102. {
  103. ret = pci_find_next_cap(bus, devfn, ret, cap);
  104. }
  105. }
  106. return ret;
  107. }
  108. rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap)
  109. {
  110. rt_uint8_t res = RT_UINT8_MAX;
  111. if (pdev)
  112. {
  113. res = pci_bus_find_cap_start(pdev->bus, pdev->devfn, pdev->hdr_type);
  114. if (res)
  115. {
  116. res = pci_find_next_cap(pdev->bus, pdev->devfn, res, cap);
  117. }
  118. }
  119. return res;
  120. }
  121. rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap)
  122. {
  123. rt_uint8_t res = RT_UINT8_MAX;
  124. if (pdev)
  125. {
  126. res = pci_find_next_cap(pdev->bus, pdev->devfn, pos + PCICAP_NEXTPTR, cap);
  127. }
  128. return res;
  129. }
  130. rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap)
  131. {
  132. return rt_pci_find_ext_next_capability(pdev, 0, cap);
  133. }
  134. rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap)
  135. {
  136. int ttl;
  137. rt_uint32_t header;
  138. rt_uint16_t start = pos;
  139. /* minimum 8 bytes per capability */
  140. ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
  141. if (pdev->cfg_size <= PCI_REGMAX + 1)
  142. {
  143. return 0;
  144. }
  145. if (!pos)
  146. {
  147. pos = PCI_REGMAX + 1;
  148. }
  149. if (rt_pci_read_config_u32(pdev, pos, &header))
  150. {
  151. return 0;
  152. }
  153. /*
  154. * If we have no capabilities, this is indicated by cap ID,
  155. * cap version and next pointer all being 0.
  156. */
  157. if (header == 0)
  158. {
  159. return 0;
  160. }
  161. while (ttl-- > 0)
  162. {
  163. if (PCI_EXTCAP_ID(header) == cap && pos != start)
  164. {
  165. return pos;
  166. }
  167. pos = PCI_EXTCAP_NEXTPTR(header);
  168. if (pos < PCI_REGMAX + 1)
  169. {
  170. break;
  171. }
  172. if (rt_pci_read_config_u32(pdev, pos, &header))
  173. {
  174. break;
  175. }
  176. }
  177. return 0;
  178. }
  179. static void pci_set_master(struct rt_pci_device *pdev, rt_bool_t enable)
  180. {
  181. rt_uint16_t old_cmd, cmd;
  182. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &old_cmd);
  183. if (enable)
  184. {
  185. cmd = old_cmd | PCIM_CMD_BUSMASTEREN;
  186. }
  187. else
  188. {
  189. cmd = old_cmd & ~PCIM_CMD_BUSMASTEREN;
  190. }
  191. if (cmd != old_cmd)
  192. {
  193. rt_pci_write_config_u16(pdev, PCIR_COMMAND, cmd);
  194. }
  195. pdev->busmaster = !!enable;
  196. }
  197. void rt_pci_set_master(struct rt_pci_device *pdev)
  198. {
  199. if (pdev)
  200. {
  201. pci_set_master(pdev, RT_TRUE);
  202. }
  203. }
  204. void rt_pci_clear_master(struct rt_pci_device *pdev)
  205. {
  206. if (pdev)
  207. {
  208. pci_set_master(pdev, RT_FALSE);
  209. }
  210. }
  211. void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable)
  212. {
  213. rt_uint16_t pci_command, new;
  214. if (!pdev)
  215. {
  216. return;
  217. }
  218. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &pci_command);
  219. if (enable)
  220. {
  221. new = pci_command & ~PCIM_CMD_INTxDIS;
  222. }
  223. else
  224. {
  225. new = pci_command | PCIM_CMD_INTxDIS;
  226. }
  227. if (new != pci_command)
  228. {
  229. rt_pci_write_config_u16(pdev, PCIR_COMMAND, new);
  230. }
  231. }
  232. static rt_bool_t pci_check_and_set_intx_mask(struct rt_pci_device *pdev, rt_bool_t mask)
  233. {
  234. rt_ubase_t level;
  235. rt_bool_t irq_pending;
  236. rt_bool_t res = RT_TRUE;
  237. rt_uint16_t origcmd, newcmd;
  238. rt_uint32_t cmd_status_dword;
  239. struct rt_pci_bus *bus = pdev->bus;
  240. level = rt_spin_lock_irqsave(&rt_pci_lock);
  241. bus->ops->read(bus, pdev->devfn, PCIR_COMMAND, 4, &cmd_status_dword);
  242. irq_pending = (cmd_status_dword >> 16) & PCIM_STATUS_INTxSTATE;
  243. /*
  244. * Check interrupt status register to see whether our device
  245. * triggered the interrupt (when masking) or the next IRQ is
  246. * already pending (when unmasking).
  247. */
  248. if (mask != irq_pending)
  249. {
  250. res = RT_FALSE;
  251. }
  252. else
  253. {
  254. origcmd = cmd_status_dword;
  255. newcmd = origcmd & ~PCIM_CMD_INTxDIS;
  256. if (mask)
  257. {
  258. newcmd |= PCIM_CMD_INTxDIS;
  259. }
  260. if (newcmd != origcmd)
  261. {
  262. bus->ops->write(bus, pdev->devfn, PCIR_COMMAND, 2, newcmd);
  263. }
  264. }
  265. rt_spin_unlock_irqrestore(&rt_pci_lock, level);
  266. return res;
  267. }
  268. rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev)
  269. {
  270. rt_bool_t res = RT_FALSE;
  271. if (pdev)
  272. {
  273. res = pci_check_and_set_intx_mask(pdev, RT_TRUE);
  274. }
  275. return res;
  276. }
  277. rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev)
  278. {
  279. rt_bool_t res = RT_FALSE;
  280. if (pdev)
  281. {
  282. res = pci_check_and_set_intx_mask(pdev, RT_FALSE);
  283. }
  284. return res;
  285. }
  286. void rt_pci_irq_mask(struct rt_pci_device *pdev)
  287. {
  288. if (pdev)
  289. {
  290. rt_bool_t unused;
  291. struct rt_pic_irq *pirq;
  292. rt_pci_intx(pdev, RT_FALSE);
  293. pirq = rt_pic_find_pirq(pdev->intx_pic, pdev->irq);
  294. RT_ASSERT(pirq != RT_NULL);
  295. rt_hw_spin_lock(&pirq->rw_lock.lock);
  296. unused = rt_list_isempty(&pirq->isr.list);
  297. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  298. if (unused)
  299. {
  300. rt_hw_interrupt_mask(pdev->irq);
  301. }
  302. }
  303. }
  304. void rt_pci_irq_unmask(struct rt_pci_device *pdev)
  305. {
  306. if (pdev)
  307. {
  308. rt_hw_interrupt_umask(pdev->irq);
  309. rt_pci_intx(pdev, RT_TRUE);
  310. }
  311. }
  312. struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus)
  313. {
  314. if (!bus)
  315. {
  316. return RT_NULL;
  317. }
  318. while (bus->parent)
  319. {
  320. bus = bus->parent;
  321. }
  322. return bus;
  323. }
  324. struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus)
  325. {
  326. if (!bus)
  327. {
  328. return RT_NULL;
  329. }
  330. if ((bus = rt_pci_find_root_bus(bus)))
  331. {
  332. return rt_container_of(bus->host_bridge, struct rt_pci_host_bridge, parent);
  333. }
  334. return RT_NULL;
  335. }
  336. rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin)
  337. {
  338. int slot = 0;
  339. if (!pdev->ari_enabled)
  340. {
  341. slot = RT_PCI_SLOT(pdev->devfn);
  342. }
  343. return (((pin - 1) + slot) % 4) + 1;
  344. }
  345. rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp)
  346. {
  347. rt_uint8_t pin = *pinp;
  348. while (!rt_pci_is_root_bus(pdev->bus))
  349. {
  350. pin = rt_pci_irq_intx(pdev, pin);
  351. pdev = pdev->bus->self;
  352. }
  353. *pinp = pin;
  354. return RT_PCI_SLOT(pdev->devfn);
  355. }
  356. rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge)
  357. {
  358. rt_err_t err = host_bridge->bus_regions_nr == 0 ? -RT_EEMPTY : RT_EOK;
  359. for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
  360. {
  361. struct rt_pci_bus_region *region = &host_bridge->bus_regions[i];
  362. /*
  363. * Avoid allocating PCI resources from address 0 -- this is illegal
  364. * according to PCI 2.1 and moreover. Use a reasonable starting value of
  365. * 0x1000 instead if the bus start address is below 0x1000.
  366. */
  367. region->bus_start = rt_max_t(rt_size_t, 0x1000, region->phy_addr);
  368. LOG_I("Bus %s region(%d):",
  369. region->flags == PCI_BUS_REGION_F_MEM ? "Memory" :
  370. (region->flags == PCI_BUS_REGION_F_PREFETCH ? "Prefetchable Mem" :
  371. (region->flags == PCI_BUS_REGION_F_IO ? "I/O" : "Unknown")), i);
  372. LOG_I(" cpu: [%p, %p]", region->cpu_addr, (region->cpu_addr + region->size - 1));
  373. LOG_I(" physical: [%p, %p]", region->phy_addr, (region->phy_addr + region->size - 1));
  374. }
  375. return err;
  376. }
  377. struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
  378. void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64)
  379. {
  380. struct rt_pci_bus_region *bus_region, *region = RT_NULL;
  381. bus_region = &host_bridge->bus_regions[0];
  382. for (int i = 0; i < host_bridge->bus_regions_nr; ++i, ++bus_region)
  383. {
  384. if (bus_region->flags == flags && bus_region->size > 0)
  385. {
  386. void *addr;
  387. region = bus_region;
  388. addr = (void *)(((region->bus_start - 1) | (size - 1)) + 1);
  389. if ((rt_uint64_t)addr - region->phy_addr + size <= region->size)
  390. {
  391. rt_bool_t addr64 = !!rt_upper_32_bits((rt_ubase_t)addr);
  392. if (mem64)
  393. {
  394. if (!addr64)
  395. {
  396. region = RT_NULL;
  397. /* Try again */
  398. continue;
  399. }
  400. }
  401. else if (addr64)
  402. {
  403. region = RT_NULL;
  404. /* Try again */
  405. continue;
  406. }
  407. region->bus_start = ((rt_uint64_t)addr + size);
  408. *out_addr = addr;
  409. }
  410. break;
  411. }
  412. }
  413. if (!region && mem64)
  414. {
  415. /* Retry */
  416. region = rt_pci_region_alloc(host_bridge, out_addr, size, flags, RT_FALSE);
  417. }
  418. return region;
  419. }
  420. rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
  421. struct rt_pci_device *pdev)
  422. {
  423. rt_err_t err = RT_EOK;
  424. rt_size_t size;
  425. rt_ubase_t addr = 0;
  426. rt_uint32_t cfg;
  427. rt_size_t bars_nr;
  428. rt_uint8_t hdr_type;
  429. rt_bool_t prefetch = RT_FALSE;
  430. rt_uint16_t class, command = 0;
  431. for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
  432. {
  433. if (host_bridge->bus_regions[i].flags == PCI_BUS_REGION_F_PREFETCH)
  434. {
  435. prefetch = RT_TRUE;
  436. break;
  437. }
  438. }
  439. rt_pci_read_config_u16(pdev, PCIR_COMMAND, &command);
  440. command = (command & ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) | PCIM_CMD_BUSMASTEREN;
  441. rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &hdr_type);
  442. if (pdev->hdr_type != hdr_type)
  443. {
  444. LOG_W("%s may not initialized", rt_dm_dev_get_name(&pdev->parent));
  445. }
  446. switch (hdr_type)
  447. {
  448. case PCIM_HDRTYPE_NORMAL:
  449. bars_nr = PCI_STD_NUM_BARS;
  450. break;
  451. case PCIM_HDRTYPE_BRIDGE:
  452. bars_nr = 2;
  453. break;
  454. case PCIM_HDRTYPE_CARDBUS:
  455. bars_nr = 0;
  456. break;
  457. default:
  458. bars_nr = 0;
  459. break;
  460. }
  461. for (int i = 0; i < bars_nr; ++i)
  462. {
  463. rt_ubase_t flags;
  464. rt_ubase_t bar_base;
  465. rt_bool_t mem64 = RT_FALSE;
  466. struct rt_pci_bus_region *region;
  467. cfg = 0;
  468. bar_base = PCIR_BAR(i);
  469. rt_pci_write_config_u32(pdev, bar_base, RT_UINT32_MAX);
  470. rt_pci_read_config_u32(pdev, bar_base, &cfg);
  471. if (!cfg)
  472. {
  473. continue;
  474. }
  475. else if (cfg == RT_UINT32_MAX)
  476. {
  477. rt_pci_write_config_u32(pdev, bar_base, 0UL);
  478. continue;
  479. }
  480. if (cfg & PCIM_BAR_SPACE)
  481. {
  482. mem64 = RT_FALSE;
  483. flags = PCI_BUS_REGION_F_IO;
  484. size = cfg & PCIM_BAR_IO_MASK;
  485. size &= ~(size - 1);
  486. }
  487. else
  488. {
  489. /* memory */
  490. if ((cfg & PCIM_BAR_MEM_TYPE_MASK) == PCIM_BAR_MEM_TYPE_64)
  491. {
  492. /* 64bits */
  493. rt_uint32_t cfg64;
  494. rt_uint64_t bar64;
  495. mem64 = RT_TRUE;
  496. rt_pci_write_config_u32(pdev, bar_base + sizeof(rt_uint32_t), RT_UINT32_MAX);
  497. rt_pci_read_config_u32(pdev, bar_base + sizeof(rt_uint32_t), &cfg64);
  498. bar64 = ((rt_uint64_t)cfg64 << 32) | cfg;
  499. size = ~(bar64 & PCIM_BAR_MEM_MASK) + 1;
  500. }
  501. else
  502. {
  503. /* 32bits */
  504. mem64 = RT_FALSE;
  505. size = (rt_uint32_t)(~(cfg & PCIM_BAR_MEM_MASK) + 1);
  506. }
  507. if (prefetch && (cfg & PCIM_BAR_MEM_PREFETCH))
  508. {
  509. flags = PCI_BUS_REGION_F_PREFETCH;
  510. }
  511. else
  512. {
  513. flags = PCI_BUS_REGION_F_MEM;
  514. }
  515. }
  516. region = rt_pci_region_alloc(host_bridge, (void **)&addr, size, flags, mem64);
  517. if (region)
  518. {
  519. rt_pci_write_config_u32(pdev, bar_base, addr);
  520. if (mem64)
  521. {
  522. bar_base += sizeof(rt_uint32_t);
  523. #ifdef RT_PCI_SYS_64BIT
  524. rt_pci_write_config_u32(pdev, bar_base, (rt_uint32_t)(addr >> 32));
  525. #else
  526. /*
  527. * If we are a 64-bit decoder then increment to the upper 32 bits
  528. * of the bar and force it to locate in the lower 4GB of memory.
  529. */
  530. rt_pci_write_config_u32(pdev, bar_base, 0UL);
  531. #endif
  532. }
  533. pdev->resource[i].size = size;
  534. pdev->resource[i].base = region->cpu_addr + (addr - region->phy_addr);
  535. pdev->resource[i].flags = flags;
  536. if (mem64)
  537. {
  538. ++i;
  539. pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
  540. }
  541. }
  542. else
  543. {
  544. err = -RT_ERROR;
  545. LOG_W("%s alloc bar(%d) address fail", rt_dm_dev_get_name(&pdev->parent), i);
  546. }
  547. command |= (cfg & PCIM_BAR_SPACE) ? PCIM_CMD_PORTEN : PCIM_CMD_MEMEN;
  548. }
  549. if (hdr_type == PCIM_HDRTYPE_NORMAL || hdr_type == PCIM_HDRTYPE_BRIDGE)
  550. {
  551. int rom_addr = (hdr_type == PCIM_HDRTYPE_NORMAL) ? PCIR_BIOS : PCIR_BIOS_1;
  552. rt_pci_write_config_u32(pdev, rom_addr, 0xfffffffe);
  553. rt_pci_read_config_u32(pdev, rom_addr, &cfg);
  554. if (cfg)
  555. {
  556. size = -(cfg & ~1);
  557. if (rt_pci_region_alloc(host_bridge, (void **)&addr, size, PCI_BUS_REGION_F_MEM, RT_FALSE))
  558. {
  559. rt_pci_write_config_u32(pdev, rom_addr, addr);
  560. }
  561. command |= PCIM_CMD_MEMEN;
  562. }
  563. }
  564. rt_pci_read_config_u16(pdev, PCIR_SUBCLASS, &class);
  565. if (class == PCIS_DISPLAY_VGA)
  566. {
  567. command |= PCIM_CMD_PORTEN;
  568. }
  569. rt_pci_write_config_u16(pdev, PCIR_COMMAND, command);
  570. rt_pci_write_config_u8(pdev, PCIR_CACHELNSZ, RT_PCI_CACHE_LINE_SIZE);
  571. rt_pci_write_config_u8(pdev, PCIR_LATTIMER, 0x80);
  572. return err;
  573. }
  574. void rt_pci_enum_device(struct rt_pci_bus *bus,
  575. rt_bool_t (callback(struct rt_pci_device *, void *)), void *data)
  576. {
  577. rt_bool_t is_end = RT_FALSE;
  578. struct rt_spinlock *lock;
  579. struct rt_pci_bus *parent;
  580. struct rt_pci_device *pdev, *last_pdev = RT_NULL;
  581. /* Walk tree */
  582. while (bus && !is_end)
  583. {
  584. /* Goto bottom */
  585. for (;;)
  586. {
  587. lock = &bus->lock;
  588. spin_lock(lock);
  589. if (rt_list_isempty(&bus->children_nodes))
  590. {
  591. parent = bus->parent;
  592. break;
  593. }
  594. bus = rt_list_entry(&bus->children_nodes, struct rt_pci_bus, list);
  595. spin_unlock(lock);
  596. }
  597. rt_list_for_each_entry(pdev, &bus->devices_nodes, list)
  598. {
  599. if (last_pdev)
  600. {
  601. spin_unlock(lock);
  602. if (callback(last_pdev, data))
  603. {
  604. spin_lock(lock);
  605. --last_pdev->parent.ref_count;
  606. is_end = RT_TRUE;
  607. break;
  608. }
  609. spin_lock(lock);
  610. --last_pdev->parent.ref_count;
  611. }
  612. ++pdev->parent.ref_count;
  613. last_pdev = pdev;
  614. }
  615. if (!is_end && last_pdev)
  616. {
  617. spin_unlock(lock);
  618. if (callback(last_pdev, data))
  619. {
  620. is_end = RT_TRUE;
  621. }
  622. spin_lock(lock);
  623. --last_pdev->parent.ref_count;
  624. }
  625. last_pdev = RT_NULL;
  626. spin_unlock(lock);
  627. /* Up a level or goto next */
  628. while (!is_end)
  629. {
  630. lock = &bus->lock;
  631. if (!parent)
  632. {
  633. /* Root bus, is end */
  634. bus = RT_NULL;
  635. break;
  636. }
  637. spin_lock(lock);
  638. if (bus->list.next != &parent->children_nodes)
  639. {
  640. /* Has next sibling */
  641. bus = rt_list_entry(bus->list.next, struct rt_pci_bus, list);
  642. spin_unlock(lock);
  643. break;
  644. }
  645. /* All device on this buss' parent */
  646. rt_list_for_each_entry(pdev, &parent->devices_nodes, list)
  647. {
  648. if (last_pdev)
  649. {
  650. spin_unlock(lock);
  651. if (callback(last_pdev, data))
  652. {
  653. spin_lock(lock);
  654. --last_pdev->parent.ref_count;
  655. is_end = RT_TRUE;
  656. break;
  657. }
  658. spin_lock(lock);
  659. --last_pdev->parent.ref_count;
  660. }
  661. ++pdev->parent.ref_count;
  662. last_pdev = pdev;
  663. }
  664. if (!is_end && last_pdev)
  665. {
  666. spin_unlock(lock);
  667. if (callback(last_pdev, data))
  668. {
  669. is_end = RT_TRUE;
  670. }
  671. spin_lock(lock);
  672. --last_pdev->parent.ref_count;
  673. }
  674. last_pdev = RT_NULL;
  675. bus = parent;
  676. parent = parent->parent;
  677. spin_unlock(lock);
  678. }
  679. }
  680. }
  681. const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
  682. const struct rt_pci_device_id *id)
  683. {
  684. if ((id->vendor == PCI_ANY_ID || id->vendor == pdev->vendor) &&
  685. (id->device == PCI_ANY_ID || id->device == pdev->device) &&
  686. (id->subsystem_vendor == PCI_ANY_ID || id->subsystem_vendor == pdev->subsystem_vendor) &&
  687. (id->subsystem_device == PCI_ANY_ID || id->subsystem_device == pdev->subsystem_device) &&
  688. !((id->class ^ pdev->class) & id->class_mask))
  689. {
  690. return id;
  691. }
  692. return RT_NULL;
  693. }
  694. const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
  695. const struct rt_pci_device_id *ids)
  696. {
  697. while (ids->vendor || ids->subsystem_vendor || ids->class_mask)
  698. {
  699. if (rt_pci_match_id(pdev, ids))
  700. {
  701. return ids;
  702. }
  703. ++ids;
  704. }
  705. return RT_NULL;
  706. }
  707. static struct rt_bus pci_bus;
  708. rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv)
  709. {
  710. RT_ASSERT(pdrv != RT_NULL);
  711. pdrv->parent.bus = &pci_bus;
  712. #if RT_NAME_MAX > 0
  713. rt_strcpy(pdrv->parent.parent.name, pdrv->name);
  714. #else
  715. pdrv->parent.parent.name = pdrv->name;
  716. #endif
  717. return rt_driver_register(&pdrv->parent);
  718. }
  719. rt_err_t rt_pci_device_register(struct rt_pci_device *pdev)
  720. {
  721. rt_err_t err;
  722. RT_ASSERT(pdev != RT_NULL);
  723. if ((err = rt_bus_add_device(&pci_bus, &pdev->parent)))
  724. {
  725. return err;
  726. }
  727. return RT_EOK;
  728. }
  729. static rt_bool_t pci_match(rt_driver_t drv, rt_device_t dev)
  730. {
  731. rt_bool_t match = RT_FALSE;
  732. struct rt_pci_driver *pdrv = rt_container_of(drv, struct rt_pci_driver, parent);
  733. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  734. if (pdrv->name && pdev->name)
  735. {
  736. match = rt_strcmp(pdrv->name, pdev->name) ? RT_FALSE : RT_TRUE;
  737. }
  738. if (!match)
  739. {
  740. pdev->id = rt_pci_match_ids(pdev, pdrv->ids);
  741. match = pdev->id ? RT_TRUE : RT_FALSE;
  742. }
  743. return match;
  744. }
  745. static rt_err_t pci_probe(rt_device_t dev)
  746. {
  747. rt_err_t err = RT_EOK;
  748. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  749. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  750. rt_pci_assign_irq(pdev);
  751. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_TRUE);
  752. err = pdrv->probe(pdev);
  753. if (err)
  754. {
  755. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  756. }
  757. return err;
  758. }
  759. static rt_err_t pci_remove(rt_device_t dev)
  760. {
  761. rt_err_t err = RT_EOK;
  762. struct rt_pci_bus *bus;
  763. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  764. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  765. if (pdrv && pdrv->remove)
  766. {
  767. if ((err = pdrv->remove(pdev)))
  768. {
  769. return err;
  770. }
  771. }
  772. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  773. bus = pdev->bus;
  774. rt_pci_device_remove(pdev);
  775. /* Just try to remove */
  776. rt_pci_bus_remove(bus);
  777. return err;
  778. }
  779. static rt_err_t pci_shutdown(rt_device_t dev)
  780. {
  781. struct rt_pci_bus *bus;
  782. struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
  783. struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
  784. if (pdrv && pdrv->shutdown)
  785. {
  786. pdrv->shutdown(pdev);
  787. }
  788. rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
  789. bus = pdev->bus;
  790. rt_pci_device_remove(pdev);
  791. /* Just try to remove */
  792. rt_pci_bus_remove(bus);
  793. return RT_EOK;
  794. }
  795. static struct rt_bus pci_bus =
  796. {
  797. .name = "pci",
  798. .match = pci_match,
  799. .probe = pci_probe,
  800. .remove = pci_remove,
  801. .shutdown = pci_shutdown,
  802. };
  803. static int pci_bus_init(void)
  804. {
  805. rt_bus_register(&pci_bus);
  806. return 0;
  807. }
  808. INIT_CORE_EXPORT(pci_bus_init);