kfd.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. # mypy: ignore-errors
  2. # -*- coding: utf-8 -*-
  3. #
  4. # TARGET arch is: []
  5. # WORD_SIZE is: 8
  6. # POINTER_SIZE is: 8
  7. # LONGDOUBLE_SIZE is: 16
  8. #
  9. import ctypes, os
  10. class AsDictMixin:
  11. @classmethod
  12. def as_dict(cls, self):
  13. result = {}
  14. if not isinstance(self, AsDictMixin):
  15. # not a structure, assume it's already a python object
  16. return self
  17. if not hasattr(cls, "_fields_"):
  18. return result
  19. # sys.version_info >= (3, 5)
  20. # for (field, *_) in cls._fields_: # noqa
  21. for field_tuple in cls._fields_: # noqa
  22. field = field_tuple[0]
  23. if field.startswith('PADDING_'):
  24. continue
  25. value = getattr(self, field)
  26. type_ = type(value)
  27. if hasattr(value, "_length_") and hasattr(value, "_type_"):
  28. # array
  29. if not hasattr(type_, "as_dict"):
  30. value = [v for v in value]
  31. else:
  32. type_ = type_._type_
  33. value = [type_.as_dict(v) for v in value]
  34. elif hasattr(value, "contents") and hasattr(value, "_type_"):
  35. # pointer
  36. try:
  37. if not hasattr(type_, "as_dict"):
  38. value = value.contents
  39. else:
  40. type_ = type_._type_
  41. value = type_.as_dict(value.contents)
  42. except ValueError:
  43. # nullptr
  44. value = None
  45. elif isinstance(value, AsDictMixin):
  46. # other structure
  47. value = type_.as_dict(value)
  48. result[field] = value
  49. return result
  50. class Structure(ctypes.Structure, AsDictMixin):
  51. def __init__(self, *args, **kwds):
  52. # We don't want to use positional arguments fill PADDING_* fields
  53. args = dict(zip(self.__class__._field_names_(), args))
  54. args.update(kwds)
  55. super(Structure, self).__init__(**args)
  56. @classmethod
  57. def _field_names_(cls):
  58. if hasattr(cls, '_fields_'):
  59. return (f[0] for f in cls._fields_ if not f[0].startswith('PADDING'))
  60. else:
  61. return ()
  62. @classmethod
  63. def get_type(cls, field):
  64. for f in cls._fields_:
  65. if f[0] == field:
  66. return f[1]
  67. return None
  68. @classmethod
  69. def bind(cls, bound_fields):
  70. fields = {}
  71. for name, type_ in cls._fields_:
  72. if hasattr(type_, "restype"):
  73. if name in bound_fields:
  74. if bound_fields[name] is None:
  75. fields[name] = type_()
  76. else:
  77. # use a closure to capture the callback from the loop scope
  78. fields[name] = (
  79. type_((lambda callback: lambda *args: callback(*args))(
  80. bound_fields[name]))
  81. )
  82. del bound_fields[name]
  83. else:
  84. # default callback implementation (does nothing)
  85. try:
  86. default_ = type_(0).restype().value
  87. except TypeError:
  88. default_ = None
  89. fields[name] = type_((
  90. lambda default_: lambda *args: default_)(default_))
  91. else:
  92. # not a callback function, use default initialization
  93. if name in bound_fields:
  94. fields[name] = bound_fields[name]
  95. del bound_fields[name]
  96. else:
  97. fields[name] = type_()
  98. if len(bound_fields) != 0:
  99. raise ValueError(
  100. "Cannot bind the following unknown callback(s) {}.{}".format(
  101. cls.__name__, bound_fields.keys()
  102. ))
  103. return cls(**fields)
  104. class Union(ctypes.Union, AsDictMixin):
  105. pass
  106. KFD_IOCTL_H_INCLUDED = True # macro
  107. KFD_IOCTL_MAJOR_VERSION = 1 # macro
  108. KFD_IOCTL_MINOR_VERSION = 6 # macro
  109. KFD_IOC_QUEUE_TYPE_COMPUTE = 0x0 # macro
  110. KFD_IOC_QUEUE_TYPE_SDMA = 0x1 # macro
  111. KFD_IOC_QUEUE_TYPE_COMPUTE_AQL = 0x2 # macro
  112. KFD_IOC_QUEUE_TYPE_SDMA_XGMI = 0x3 # macro
  113. KFD_MAX_QUEUE_PERCENTAGE = 100 # macro
  114. KFD_MAX_QUEUE_PRIORITY = 15 # macro
  115. KFD_IOC_CACHE_POLICY_COHERENT = 0 # macro
  116. KFD_IOC_CACHE_POLICY_NONCOHERENT = 1 # macro
  117. NUM_OF_SUPPORTED_GPUS = 7 # macro
  118. MAX_ALLOWED_NUM_POINTS = 100 # macro
  119. MAX_ALLOWED_AW_BUFF_SIZE = 4096 # macro
  120. MAX_ALLOWED_WAC_BUFF_SIZE = 128 # macro
  121. KFD_IOC_EVENT_SIGNAL = 0 # macro
  122. KFD_IOC_EVENT_NODECHANGE = 1 # macro
  123. KFD_IOC_EVENT_DEVICESTATECHANGE = 2 # macro
  124. KFD_IOC_EVENT_HW_EXCEPTION = 3 # macro
  125. KFD_IOC_EVENT_SYSTEM_EVENT = 4 # macro
  126. KFD_IOC_EVENT_DEBUG_EVENT = 5 # macro
  127. KFD_IOC_EVENT_PROFILE_EVENT = 6 # macro
  128. KFD_IOC_EVENT_QUEUE_EVENT = 7 # macro
  129. KFD_IOC_EVENT_MEMORY = 8 # macro
  130. KFD_IOC_WAIT_RESULT_COMPLETE = 0 # macro
  131. KFD_IOC_WAIT_RESULT_TIMEOUT = 1 # macro
  132. KFD_IOC_WAIT_RESULT_FAIL = 2 # macro
  133. KFD_SIGNAL_EVENT_LIMIT = 4096 # macro
  134. KFD_HW_EXCEPTION_WHOLE_GPU_RESET = 0 # macro
  135. KFD_HW_EXCEPTION_PER_ENGINE_RESET = 1 # macro
  136. KFD_HW_EXCEPTION_GPU_HANG = 0 # macro
  137. KFD_HW_EXCEPTION_ECC = 1 # macro
  138. KFD_MEM_ERR_NO_RAS = 0 # macro
  139. KFD_MEM_ERR_SRAM_ECC = 1 # macro
  140. KFD_MEM_ERR_POISON_CONSUMED = 2 # macro
  141. KFD_MEM_ERR_GPU_HANG = 3 # macro
  142. KFD_IOC_ALLOC_MEM_FLAGS_VRAM = (1<<0) # macro
  143. KFD_IOC_ALLOC_MEM_FLAGS_GTT = (1<<1) # macro
  144. KFD_IOC_ALLOC_MEM_FLAGS_USERPTR = (1<<2) # macro
  145. KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL = (1<<3) # macro
  146. KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP = (1<<4) # macro
  147. KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE = (1<<31) # macro
  148. KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE = (1<<30) # macro
  149. KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC = (1<<29) # macro
  150. KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE = (1<<28) # macro
  151. KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM = (1<<27) # macro
  152. KFD_IOC_ALLOC_MEM_FLAGS_COHERENT = (1<<26) # macro
  153. KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED = (1<<25) # macro
  154. # def KFD_SMI_EVENT_MASK_FROM_INDEX(i): # macro
  155. # return (1<<((i)-1))
  156. KFD_IOCTL_SVM_FLAG_HOST_ACCESS = 0x00000001 # macro
  157. KFD_IOCTL_SVM_FLAG_COHERENT = 0x00000002 # macro
  158. KFD_IOCTL_SVM_FLAG_HIVE_LOCAL = 0x00000004 # macro
  159. KFD_IOCTL_SVM_FLAG_GPU_RO = 0x00000008 # macro
  160. KFD_IOCTL_SVM_FLAG_GPU_EXEC = 0x00000010 # macro
  161. KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY = 0x00000020 # macro
  162. AMDKFD_IOCTL_BASE = 'K' # macro
  163. # def AMDKFD_IO(nr): # macro
  164. # return _IO('K',nr)
  165. # def AMDKFD_IOR(nr, type): # macro
  166. # return _IOR('K',nr,type)
  167. # def AMDKFD_IOW(nr, type): # macro
  168. # return _IOW('K',nr,type)
  169. # def AMDKFD_IOWR(nr, type): # macro
  170. # return _IOWR('K',nr,type)
  171. # AMDKFD_IOC_GET_VERSION = _IOR('K',nr,type) ( 0x01 , struct kfd_ioctl_get_version_args ) # macro
  172. # AMDKFD_IOC_CREATE_QUEUE = _IOWR('K',nr,type) ( 0x02 , struct kfd_ioctl_create_queue_args ) # macro
  173. # AMDKFD_IOC_DESTROY_QUEUE = _IOWR('K',nr,type) ( 0x03 , struct kfd_ioctl_destroy_queue_args ) # macro
  174. # AMDKFD_IOC_SET_MEMORY_POLICY = _IOW('K',nr,type) ( 0x04 , struct kfd_ioctl_set_memory_policy_args ) # macro
  175. # AMDKFD_IOC_GET_CLOCK_COUNTERS = _IOWR('K',nr,type) ( 0x05 , struct kfd_ioctl_get_clock_counters_args ) # macro
  176. # AMDKFD_IOC_GET_PROCESS_APERTURES = _IOR('K',nr,type) ( 0x06 , struct kfd_ioctl_get_process_apertures_args ) # macro
  177. # AMDKFD_IOC_UPDATE_QUEUE = _IOW('K',nr,type) ( 0x07 , struct kfd_ioctl_update_queue_args ) # macro
  178. # AMDKFD_IOC_CREATE_EVENT = _IOWR('K',nr,type) ( 0x08 , struct kfd_ioctl_create_event_args ) # macro
  179. # AMDKFD_IOC_DESTROY_EVENT = _IOW('K',nr,type) ( 0x09 , struct kfd_ioctl_destroy_event_args ) # macro
  180. # AMDKFD_IOC_SET_EVENT = _IOW('K',nr,type) ( 0x0A , struct kfd_ioctl_set_event_args ) # macro
  181. # AMDKFD_IOC_RESET_EVENT = _IOW('K',nr,type) ( 0x0B , struct kfd_ioctl_reset_event_args ) # macro
  182. # AMDKFD_IOC_WAIT_EVENTS = _IOWR('K',nr,type) ( 0x0C , struct kfd_ioctl_wait_events_args ) # macro
  183. # AMDKFD_IOC_DBG_REGISTER = _IOW('K',nr,type) ( 0x0D , struct kfd_ioctl_dbg_register_args ) # macro
  184. # AMDKFD_IOC_DBG_UNREGISTER = _IOW('K',nr,type) ( 0x0E , struct kfd_ioctl_dbg_unregister_args ) # macro
  185. # AMDKFD_IOC_DBG_ADDRESS_WATCH = _IOW('K',nr,type) ( 0x0F , struct kfd_ioctl_dbg_address_watch_args ) # macro
  186. # AMDKFD_IOC_DBG_WAVE_CONTROL = _IOW('K',nr,type) ( 0x10 , struct kfd_ioctl_dbg_wave_control_args ) # macro
  187. # AMDKFD_IOC_SET_SCRATCH_BACKING_VA = _IOWR('K',nr,type) ( 0x11 , struct kfd_ioctl_set_scratch_backing_va_args ) # macro
  188. # AMDKFD_IOC_GET_TILE_CONFIG = _IOWR('K',nr,type) ( 0x12 , struct kfd_ioctl_get_tile_config_args ) # macro
  189. # AMDKFD_IOC_SET_TRAP_HANDLER = _IOW('K',nr,type) ( 0x13 , struct kfd_ioctl_set_trap_handler_args ) # macro
  190. # AMDKFD_IOC_GET_PROCESS_APERTURES_NEW = _IOWR('K',nr,type) ( 0x14 , struct kfd_ioctl_get_process_apertures_new_args ) # macro
  191. # AMDKFD_IOC_ACQUIRE_VM = _IOW('K',nr,type) ( 0x15 , struct kfd_ioctl_acquire_vm_args ) # macro
  192. # AMDKFD_IOC_ALLOC_MEMORY_OF_GPU = _IOWR('K',nr,type) ( 0x16 , struct kfd_ioctl_alloc_memory_of_gpu_args ) # macro
  193. # AMDKFD_IOC_FREE_MEMORY_OF_GPU = _IOW('K',nr,type) ( 0x17 , struct kfd_ioctl_free_memory_of_gpu_args ) # macro
  194. # AMDKFD_IOC_MAP_MEMORY_TO_GPU = _IOWR('K',nr,type) ( 0x18 , struct kfd_ioctl_map_memory_to_gpu_args ) # macro
  195. # AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU = _IOWR('K',nr,type) ( 0x19 , struct kfd_ioctl_unmap_memory_from_gpu_args ) # macro
  196. # AMDKFD_IOC_SET_CU_MASK = _IOW('K',nr,type) ( 0x1A , struct kfd_ioctl_set_cu_mask_args ) # macro
  197. # AMDKFD_IOC_GET_QUEUE_WAVE_STATE = _IOWR('K',nr,type) ( 0x1B , struct kfd_ioctl_get_queue_wave_state_args ) # macro
  198. # AMDKFD_IOC_GET_DMABUF_INFO = _IOWR('K',nr,type) ( 0x1C , struct kfd_ioctl_get_dmabuf_info_args ) # macro
  199. # AMDKFD_IOC_IMPORT_DMABUF = _IOWR('K',nr,type) ( 0x1D , struct kfd_ioctl_import_dmabuf_args ) # macro
  200. # AMDKFD_IOC_ALLOC_QUEUE_GWS = _IOWR('K',nr,type) ( 0x1E , struct kfd_ioctl_alloc_queue_gws_args ) # macro
  201. # AMDKFD_IOC_SMI_EVENTS = _IOWR('K',nr,type) ( 0x1F , struct kfd_ioctl_smi_events_args ) # macro
  202. # AMDKFD_IOC_SVM = _IOWR('K',nr,type) ( 0x20 , struct kfd_ioctl_svm_args ) # macro
  203. # AMDKFD_IOC_SET_XNACK_MODE = _IOWR('K',nr,type) ( 0x21 , struct kfd_ioctl_set_xnack_mode_args ) # macro
  204. AMDKFD_COMMAND_START = 0x01 # macro
  205. AMDKFD_COMMAND_END = 0x22 # macro
  206. class struct_kfd_ioctl_get_version_args(Structure):
  207. pass
  208. struct_kfd_ioctl_get_version_args._pack_ = 1 # source:False
  209. struct_kfd_ioctl_get_version_args._fields_ = [
  210. ('major_version', ctypes.c_uint32),
  211. ('minor_version', ctypes.c_uint32),
  212. ]
  213. class struct_kfd_ioctl_create_queue_args(Structure):
  214. pass
  215. struct_kfd_ioctl_create_queue_args._pack_ = 1 # source:False
  216. struct_kfd_ioctl_create_queue_args._fields_ = [
  217. ('ring_base_address', ctypes.c_uint64),
  218. ('write_pointer_address', ctypes.c_uint64),
  219. ('read_pointer_address', ctypes.c_uint64),
  220. ('doorbell_offset', ctypes.c_uint64),
  221. ('ring_size', ctypes.c_uint32),
  222. ('gpu_id', ctypes.c_uint32),
  223. ('queue_type', ctypes.c_uint32),
  224. ('queue_percentage', ctypes.c_uint32),
  225. ('queue_priority', ctypes.c_uint32),
  226. ('queue_id', ctypes.c_uint32),
  227. ('eop_buffer_address', ctypes.c_uint64),
  228. ('eop_buffer_size', ctypes.c_uint64),
  229. ('ctx_save_restore_address', ctypes.c_uint64),
  230. ('ctx_save_restore_size', ctypes.c_uint32),
  231. ('ctl_stack_size', ctypes.c_uint32),
  232. ]
  233. class struct_kfd_ioctl_destroy_queue_args(Structure):
  234. pass
  235. struct_kfd_ioctl_destroy_queue_args._pack_ = 1 # source:False
  236. struct_kfd_ioctl_destroy_queue_args._fields_ = [
  237. ('queue_id', ctypes.c_uint32),
  238. ('pad', ctypes.c_uint32),
  239. ]
  240. class struct_kfd_ioctl_update_queue_args(Structure):
  241. pass
  242. struct_kfd_ioctl_update_queue_args._pack_ = 1 # source:False
  243. struct_kfd_ioctl_update_queue_args._fields_ = [
  244. ('ring_base_address', ctypes.c_uint64),
  245. ('queue_id', ctypes.c_uint32),
  246. ('ring_size', ctypes.c_uint32),
  247. ('queue_percentage', ctypes.c_uint32),
  248. ('queue_priority', ctypes.c_uint32),
  249. ]
  250. class struct_kfd_ioctl_set_cu_mask_args(Structure):
  251. pass
  252. struct_kfd_ioctl_set_cu_mask_args._pack_ = 1 # source:False
  253. struct_kfd_ioctl_set_cu_mask_args._fields_ = [
  254. ('queue_id', ctypes.c_uint32),
  255. ('num_cu_mask', ctypes.c_uint32),
  256. ('cu_mask_ptr', ctypes.c_uint64),
  257. ]
  258. class struct_kfd_ioctl_get_queue_wave_state_args(Structure):
  259. pass
  260. struct_kfd_ioctl_get_queue_wave_state_args._pack_ = 1 # source:False
  261. struct_kfd_ioctl_get_queue_wave_state_args._fields_ = [
  262. ('ctl_stack_address', ctypes.c_uint64),
  263. ('ctl_stack_used_size', ctypes.c_uint32),
  264. ('save_area_used_size', ctypes.c_uint32),
  265. ('queue_id', ctypes.c_uint32),
  266. ('pad', ctypes.c_uint32),
  267. ]
  268. class struct_kfd_ioctl_set_memory_policy_args(Structure):
  269. pass
  270. struct_kfd_ioctl_set_memory_policy_args._pack_ = 1 # source:False
  271. struct_kfd_ioctl_set_memory_policy_args._fields_ = [
  272. ('alternate_aperture_base', ctypes.c_uint64),
  273. ('alternate_aperture_size', ctypes.c_uint64),
  274. ('gpu_id', ctypes.c_uint32),
  275. ('default_policy', ctypes.c_uint32),
  276. ('alternate_policy', ctypes.c_uint32),
  277. ('pad', ctypes.c_uint32),
  278. ]
  279. class struct_kfd_ioctl_get_clock_counters_args(Structure):
  280. pass
  281. struct_kfd_ioctl_get_clock_counters_args._pack_ = 1 # source:False
  282. struct_kfd_ioctl_get_clock_counters_args._fields_ = [
  283. ('gpu_clock_counter', ctypes.c_uint64),
  284. ('cpu_clock_counter', ctypes.c_uint64),
  285. ('system_clock_counter', ctypes.c_uint64),
  286. ('system_clock_freq', ctypes.c_uint64),
  287. ('gpu_id', ctypes.c_uint32),
  288. ('pad', ctypes.c_uint32),
  289. ]
  290. class struct_kfd_process_device_apertures(Structure):
  291. pass
  292. struct_kfd_process_device_apertures._pack_ = 1 # source:False
  293. struct_kfd_process_device_apertures._fields_ = [
  294. ('lds_base', ctypes.c_uint64),
  295. ('lds_limit', ctypes.c_uint64),
  296. ('scratch_base', ctypes.c_uint64),
  297. ('scratch_limit', ctypes.c_uint64),
  298. ('gpuvm_base', ctypes.c_uint64),
  299. ('gpuvm_limit', ctypes.c_uint64),
  300. ('gpu_id', ctypes.c_uint32),
  301. ('pad', ctypes.c_uint32),
  302. ]
  303. class struct_kfd_ioctl_get_process_apertures_args(Structure):
  304. pass
  305. struct_kfd_ioctl_get_process_apertures_args._pack_ = 1 # source:False
  306. struct_kfd_ioctl_get_process_apertures_args._fields_ = [
  307. ('process_apertures', struct_kfd_process_device_apertures * 7),
  308. ('num_of_nodes', ctypes.c_uint32),
  309. ('pad', ctypes.c_uint32),
  310. ]
  311. class struct_kfd_ioctl_get_process_apertures_new_args(Structure):
  312. pass
  313. struct_kfd_ioctl_get_process_apertures_new_args._pack_ = 1 # source:False
  314. struct_kfd_ioctl_get_process_apertures_new_args._fields_ = [
  315. ('kfd_process_device_apertures_ptr', ctypes.c_uint64),
  316. ('num_of_nodes', ctypes.c_uint32),
  317. ('pad', ctypes.c_uint32),
  318. ]
  319. class struct_kfd_ioctl_dbg_register_args(Structure):
  320. pass
  321. struct_kfd_ioctl_dbg_register_args._pack_ = 1 # source:False
  322. struct_kfd_ioctl_dbg_register_args._fields_ = [
  323. ('gpu_id', ctypes.c_uint32),
  324. ('pad', ctypes.c_uint32),
  325. ]
  326. class struct_kfd_ioctl_dbg_unregister_args(Structure):
  327. pass
  328. struct_kfd_ioctl_dbg_unregister_args._pack_ = 1 # source:False
  329. struct_kfd_ioctl_dbg_unregister_args._fields_ = [
  330. ('gpu_id', ctypes.c_uint32),
  331. ('pad', ctypes.c_uint32),
  332. ]
  333. class struct_kfd_ioctl_dbg_address_watch_args(Structure):
  334. pass
  335. struct_kfd_ioctl_dbg_address_watch_args._pack_ = 1 # source:False
  336. struct_kfd_ioctl_dbg_address_watch_args._fields_ = [
  337. ('content_ptr', ctypes.c_uint64),
  338. ('gpu_id', ctypes.c_uint32),
  339. ('buf_size_in_bytes', ctypes.c_uint32),
  340. ]
  341. class struct_kfd_ioctl_dbg_wave_control_args(Structure):
  342. pass
  343. struct_kfd_ioctl_dbg_wave_control_args._pack_ = 1 # source:False
  344. struct_kfd_ioctl_dbg_wave_control_args._fields_ = [
  345. ('content_ptr', ctypes.c_uint64),
  346. ('gpu_id', ctypes.c_uint32),
  347. ('buf_size_in_bytes', ctypes.c_uint32),
  348. ]
  349. class struct_kfd_ioctl_create_event_args(Structure):
  350. pass
  351. struct_kfd_ioctl_create_event_args._pack_ = 1 # source:False
  352. struct_kfd_ioctl_create_event_args._fields_ = [
  353. ('event_page_offset', ctypes.c_uint64),
  354. ('event_trigger_data', ctypes.c_uint32),
  355. ('event_type', ctypes.c_uint32),
  356. ('auto_reset', ctypes.c_uint32),
  357. ('node_id', ctypes.c_uint32),
  358. ('event_id', ctypes.c_uint32),
  359. ('event_slot_index', ctypes.c_uint32),
  360. ]
  361. class struct_kfd_ioctl_destroy_event_args(Structure):
  362. pass
  363. struct_kfd_ioctl_destroy_event_args._pack_ = 1 # source:False
  364. struct_kfd_ioctl_destroy_event_args._fields_ = [
  365. ('event_id', ctypes.c_uint32),
  366. ('pad', ctypes.c_uint32),
  367. ]
  368. class struct_kfd_ioctl_set_event_args(Structure):
  369. pass
  370. struct_kfd_ioctl_set_event_args._pack_ = 1 # source:False
  371. struct_kfd_ioctl_set_event_args._fields_ = [
  372. ('event_id', ctypes.c_uint32),
  373. ('pad', ctypes.c_uint32),
  374. ]
  375. class struct_kfd_ioctl_reset_event_args(Structure):
  376. pass
  377. struct_kfd_ioctl_reset_event_args._pack_ = 1 # source:False
  378. struct_kfd_ioctl_reset_event_args._fields_ = [
  379. ('event_id', ctypes.c_uint32),
  380. ('pad', ctypes.c_uint32),
  381. ]
  382. class struct_kfd_memory_exception_failure(Structure):
  383. pass
  384. struct_kfd_memory_exception_failure._pack_ = 1 # source:False
  385. struct_kfd_memory_exception_failure._fields_ = [
  386. ('NotPresent', ctypes.c_uint32),
  387. ('ReadOnly', ctypes.c_uint32),
  388. ('NoExecute', ctypes.c_uint32),
  389. ('imprecise', ctypes.c_uint32),
  390. ]
  391. class struct_kfd_hsa_memory_exception_data(Structure):
  392. pass
  393. struct_kfd_hsa_memory_exception_data._pack_ = 1 # source:False
  394. struct_kfd_hsa_memory_exception_data._fields_ = [
  395. ('failure', struct_kfd_memory_exception_failure),
  396. ('va', ctypes.c_uint64),
  397. ('gpu_id', ctypes.c_uint32),
  398. ('ErrorType', ctypes.c_uint32),
  399. ]
  400. class struct_kfd_hsa_hw_exception_data(Structure):
  401. pass
  402. struct_kfd_hsa_hw_exception_data._pack_ = 1 # source:False
  403. struct_kfd_hsa_hw_exception_data._fields_ = [
  404. ('reset_type', ctypes.c_uint32),
  405. ('reset_cause', ctypes.c_uint32),
  406. ('memory_lost', ctypes.c_uint32),
  407. ('gpu_id', ctypes.c_uint32),
  408. ]
  409. class struct_kfd_event_data(Structure):
  410. pass
  411. class union_kfd_event_data_0(Union):
  412. pass
  413. union_kfd_event_data_0._pack_ = 1 # source:False
  414. union_kfd_event_data_0._fields_ = [
  415. ('memory_exception_data', struct_kfd_hsa_memory_exception_data),
  416. ('hw_exception_data', struct_kfd_hsa_hw_exception_data),
  417. ('PADDING_0', ctypes.c_ubyte * 16),
  418. ]
  419. struct_kfd_event_data._pack_ = 1 # source:False
  420. struct_kfd_event_data._anonymous_ = ('_0',)
  421. struct_kfd_event_data._fields_ = [
  422. ('_0', union_kfd_event_data_0),
  423. ('kfd_event_data_ext', ctypes.c_uint64),
  424. ('event_id', ctypes.c_uint32),
  425. ('pad', ctypes.c_uint32),
  426. ]
  427. class struct_kfd_ioctl_wait_events_args(Structure):
  428. pass
  429. struct_kfd_ioctl_wait_events_args._pack_ = 1 # source:False
  430. struct_kfd_ioctl_wait_events_args._fields_ = [
  431. ('events_ptr', ctypes.c_uint64),
  432. ('num_events', ctypes.c_uint32),
  433. ('wait_for_all', ctypes.c_uint32),
  434. ('timeout', ctypes.c_uint32),
  435. ('wait_result', ctypes.c_uint32),
  436. ]
  437. class struct_kfd_ioctl_set_scratch_backing_va_args(Structure):
  438. pass
  439. struct_kfd_ioctl_set_scratch_backing_va_args._pack_ = 1 # source:False
  440. struct_kfd_ioctl_set_scratch_backing_va_args._fields_ = [
  441. ('va_addr', ctypes.c_uint64),
  442. ('gpu_id', ctypes.c_uint32),
  443. ('pad', ctypes.c_uint32),
  444. ]
  445. class struct_kfd_ioctl_get_tile_config_args(Structure):
  446. pass
  447. struct_kfd_ioctl_get_tile_config_args._pack_ = 1 # source:False
  448. struct_kfd_ioctl_get_tile_config_args._fields_ = [
  449. ('tile_config_ptr', ctypes.c_uint64),
  450. ('macro_tile_config_ptr', ctypes.c_uint64),
  451. ('num_tile_configs', ctypes.c_uint32),
  452. ('num_macro_tile_configs', ctypes.c_uint32),
  453. ('gpu_id', ctypes.c_uint32),
  454. ('gb_addr_config', ctypes.c_uint32),
  455. ('num_banks', ctypes.c_uint32),
  456. ('num_ranks', ctypes.c_uint32),
  457. ]
  458. class struct_kfd_ioctl_set_trap_handler_args(Structure):
  459. pass
  460. struct_kfd_ioctl_set_trap_handler_args._pack_ = 1 # source:False
  461. struct_kfd_ioctl_set_trap_handler_args._fields_ = [
  462. ('tba_addr', ctypes.c_uint64),
  463. ('tma_addr', ctypes.c_uint64),
  464. ('gpu_id', ctypes.c_uint32),
  465. ('pad', ctypes.c_uint32),
  466. ]
  467. class struct_kfd_ioctl_acquire_vm_args(Structure):
  468. pass
  469. struct_kfd_ioctl_acquire_vm_args._pack_ = 1 # source:False
  470. struct_kfd_ioctl_acquire_vm_args._fields_ = [
  471. ('drm_fd', ctypes.c_uint32),
  472. ('gpu_id', ctypes.c_uint32),
  473. ]
  474. class struct_kfd_ioctl_alloc_memory_of_gpu_args(Structure):
  475. pass
  476. struct_kfd_ioctl_alloc_memory_of_gpu_args._pack_ = 1 # source:False
  477. struct_kfd_ioctl_alloc_memory_of_gpu_args._fields_ = [
  478. ('va_addr', ctypes.c_uint64),
  479. ('size', ctypes.c_uint64),
  480. ('handle', ctypes.c_uint64),
  481. ('mmap_offset', ctypes.c_uint64),
  482. ('gpu_id', ctypes.c_uint32),
  483. ('flags', ctypes.c_uint32),
  484. ]
  485. class struct_kfd_ioctl_free_memory_of_gpu_args(Structure):
  486. pass
  487. struct_kfd_ioctl_free_memory_of_gpu_args._pack_ = 1 # source:False
  488. struct_kfd_ioctl_free_memory_of_gpu_args._fields_ = [
  489. ('handle', ctypes.c_uint64),
  490. ]
  491. class struct_kfd_ioctl_map_memory_to_gpu_args(Structure):
  492. pass
  493. struct_kfd_ioctl_map_memory_to_gpu_args._pack_ = 1 # source:False
  494. struct_kfd_ioctl_map_memory_to_gpu_args._fields_ = [
  495. ('handle', ctypes.c_uint64),
  496. ('device_ids_array_ptr', ctypes.c_uint64),
  497. ('n_devices', ctypes.c_uint32),
  498. ('n_success', ctypes.c_uint32),
  499. ]
  500. class struct_kfd_ioctl_unmap_memory_from_gpu_args(Structure):
  501. pass
  502. struct_kfd_ioctl_unmap_memory_from_gpu_args._pack_ = 1 # source:False
  503. struct_kfd_ioctl_unmap_memory_from_gpu_args._fields_ = [
  504. ('handle', ctypes.c_uint64),
  505. ('device_ids_array_ptr', ctypes.c_uint64),
  506. ('n_devices', ctypes.c_uint32),
  507. ('n_success', ctypes.c_uint32),
  508. ]
  509. class struct_kfd_ioctl_alloc_queue_gws_args(Structure):
  510. pass
  511. struct_kfd_ioctl_alloc_queue_gws_args._pack_ = 1 # source:False
  512. struct_kfd_ioctl_alloc_queue_gws_args._fields_ = [
  513. ('queue_id', ctypes.c_uint32),
  514. ('num_gws', ctypes.c_uint32),
  515. ('first_gws', ctypes.c_uint32),
  516. ('pad', ctypes.c_uint32),
  517. ]
  518. class struct_kfd_ioctl_get_dmabuf_info_args(Structure):
  519. pass
  520. struct_kfd_ioctl_get_dmabuf_info_args._pack_ = 1 # source:False
  521. struct_kfd_ioctl_get_dmabuf_info_args._fields_ = [
  522. ('size', ctypes.c_uint64),
  523. ('metadata_ptr', ctypes.c_uint64),
  524. ('metadata_size', ctypes.c_uint32),
  525. ('gpu_id', ctypes.c_uint32),
  526. ('flags', ctypes.c_uint32),
  527. ('dmabuf_fd', ctypes.c_uint32),
  528. ]
  529. class struct_kfd_ioctl_import_dmabuf_args(Structure):
  530. pass
  531. struct_kfd_ioctl_import_dmabuf_args._pack_ = 1 # source:False
  532. struct_kfd_ioctl_import_dmabuf_args._fields_ = [
  533. ('va_addr', ctypes.c_uint64),
  534. ('handle', ctypes.c_uint64),
  535. ('gpu_id', ctypes.c_uint32),
  536. ('dmabuf_fd', ctypes.c_uint32),
  537. ]
  538. # values for enumeration 'kfd_smi_event'
  539. kfd_smi_event__enumvalues = {
  540. 0: 'KFD_SMI_EVENT_NONE',
  541. 1: 'KFD_SMI_EVENT_VMFAULT',
  542. 2: 'KFD_SMI_EVENT_THERMAL_THROTTLE',
  543. 3: 'KFD_SMI_EVENT_GPU_PRE_RESET',
  544. 4: 'KFD_SMI_EVENT_GPU_POST_RESET',
  545. }
  546. KFD_SMI_EVENT_NONE = 0
  547. KFD_SMI_EVENT_VMFAULT = 1
  548. KFD_SMI_EVENT_THERMAL_THROTTLE = 2
  549. KFD_SMI_EVENT_GPU_PRE_RESET = 3
  550. KFD_SMI_EVENT_GPU_POST_RESET = 4
  551. kfd_smi_event = ctypes.c_uint32 # enum
  552. class struct_kfd_ioctl_smi_events_args(Structure):
  553. pass
  554. struct_kfd_ioctl_smi_events_args._pack_ = 1 # source:False
  555. struct_kfd_ioctl_smi_events_args._fields_ = [
  556. ('gpuid', ctypes.c_uint32),
  557. ('anon_fd', ctypes.c_uint32),
  558. ]
  559. # values for enumeration 'kfd_mmio_remap'
  560. kfd_mmio_remap__enumvalues = {
  561. 0: 'KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL',
  562. 4: 'KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL',
  563. }
  564. KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0
  565. KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4
  566. kfd_mmio_remap = ctypes.c_uint32 # enum
  567. # values for enumeration 'kfd_ioctl_svm_op'
  568. kfd_ioctl_svm_op__enumvalues = {
  569. 0: 'KFD_IOCTL_SVM_OP_SET_ATTR',
  570. 1: 'KFD_IOCTL_SVM_OP_GET_ATTR',
  571. }
  572. KFD_IOCTL_SVM_OP_SET_ATTR = 0
  573. KFD_IOCTL_SVM_OP_GET_ATTR = 1
  574. kfd_ioctl_svm_op = ctypes.c_uint32 # enum
  575. # values for enumeration 'kfd_ioctl_svm_location'
  576. kfd_ioctl_svm_location__enumvalues = {
  577. 0: 'KFD_IOCTL_SVM_LOCATION_SYSMEM',
  578. 4294967295: 'KFD_IOCTL_SVM_LOCATION_UNDEFINED',
  579. }
  580. KFD_IOCTL_SVM_LOCATION_SYSMEM = 0
  581. KFD_IOCTL_SVM_LOCATION_UNDEFINED = 4294967295
  582. kfd_ioctl_svm_location = ctypes.c_uint32 # enum
  583. # values for enumeration 'kfd_ioctl_svm_attr_type'
  584. kfd_ioctl_svm_attr_type__enumvalues = {
  585. 0: 'KFD_IOCTL_SVM_ATTR_PREFERRED_LOC',
  586. 1: 'KFD_IOCTL_SVM_ATTR_PREFETCH_LOC',
  587. 2: 'KFD_IOCTL_SVM_ATTR_ACCESS',
  588. 3: 'KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE',
  589. 4: 'KFD_IOCTL_SVM_ATTR_NO_ACCESS',
  590. 5: 'KFD_IOCTL_SVM_ATTR_SET_FLAGS',
  591. 6: 'KFD_IOCTL_SVM_ATTR_CLR_FLAGS',
  592. 7: 'KFD_IOCTL_SVM_ATTR_GRANULARITY',
  593. }
  594. KFD_IOCTL_SVM_ATTR_PREFERRED_LOC = 0
  595. KFD_IOCTL_SVM_ATTR_PREFETCH_LOC = 1
  596. KFD_IOCTL_SVM_ATTR_ACCESS = 2
  597. KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE = 3
  598. KFD_IOCTL_SVM_ATTR_NO_ACCESS = 4
  599. KFD_IOCTL_SVM_ATTR_SET_FLAGS = 5
  600. KFD_IOCTL_SVM_ATTR_CLR_FLAGS = 6
  601. KFD_IOCTL_SVM_ATTR_GRANULARITY = 7
  602. kfd_ioctl_svm_attr_type = ctypes.c_uint32 # enum
  603. class struct_kfd_ioctl_svm_attribute(Structure):
  604. pass
  605. struct_kfd_ioctl_svm_attribute._pack_ = 1 # source:False
  606. struct_kfd_ioctl_svm_attribute._fields_ = [
  607. ('type', ctypes.c_uint32),
  608. ('value', ctypes.c_uint32),
  609. ]
  610. class struct_kfd_ioctl_svm_args(Structure):
  611. pass
  612. struct_kfd_ioctl_svm_args._pack_ = 1 # source:False
  613. struct_kfd_ioctl_svm_args._fields_ = [
  614. ('start_addr', ctypes.c_uint64),
  615. ('size', ctypes.c_uint64),
  616. ('op', ctypes.c_uint32),
  617. ('nattr', ctypes.c_uint32),
  618. ('attrs', struct_kfd_ioctl_svm_attribute * 0),
  619. ]
  620. class struct_kfd_ioctl_set_xnack_mode_args(Structure):
  621. pass
  622. struct_kfd_ioctl_set_xnack_mode_args._pack_ = 1 # source:False
  623. struct_kfd_ioctl_set_xnack_mode_args._fields_ = [
  624. ('xnack_enabled', ctypes.c_int32),
  625. ]
  626. __all__ = \
  627. ['AMDKFD_COMMAND_END', 'AMDKFD_COMMAND_START',
  628. 'AMDKFD_IOCTL_BASE', 'KFD_HW_EXCEPTION_ECC',
  629. 'KFD_HW_EXCEPTION_GPU_HANG', 'KFD_HW_EXCEPTION_PER_ENGINE_RESET',
  630. 'KFD_HW_EXCEPTION_WHOLE_GPU_RESET', 'KFD_IOCTL_H_INCLUDED',
  631. 'KFD_IOCTL_MAJOR_VERSION', 'KFD_IOCTL_MINOR_VERSION',
  632. 'KFD_IOCTL_SVM_ATTR_ACCESS', 'KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE',
  633. 'KFD_IOCTL_SVM_ATTR_CLR_FLAGS', 'KFD_IOCTL_SVM_ATTR_GRANULARITY',
  634. 'KFD_IOCTL_SVM_ATTR_NO_ACCESS',
  635. 'KFD_IOCTL_SVM_ATTR_PREFERRED_LOC',
  636. 'KFD_IOCTL_SVM_ATTR_PREFETCH_LOC', 'KFD_IOCTL_SVM_ATTR_SET_FLAGS',
  637. 'KFD_IOCTL_SVM_FLAG_COHERENT', 'KFD_IOCTL_SVM_FLAG_GPU_EXEC',
  638. 'KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY', 'KFD_IOCTL_SVM_FLAG_GPU_RO',
  639. 'KFD_IOCTL_SVM_FLAG_HIVE_LOCAL', 'KFD_IOCTL_SVM_FLAG_HOST_ACCESS',
  640. 'KFD_IOCTL_SVM_LOCATION_SYSMEM',
  641. 'KFD_IOCTL_SVM_LOCATION_UNDEFINED', 'KFD_IOCTL_SVM_OP_GET_ATTR',
  642. 'KFD_IOCTL_SVM_OP_SET_ATTR',
  643. 'KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM',
  644. 'KFD_IOC_ALLOC_MEM_FLAGS_COHERENT',
  645. 'KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL',
  646. 'KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE',
  647. 'KFD_IOC_ALLOC_MEM_FLAGS_GTT',
  648. 'KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP',
  649. 'KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE',
  650. 'KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC',
  651. 'KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED',
  652. 'KFD_IOC_ALLOC_MEM_FLAGS_USERPTR', 'KFD_IOC_ALLOC_MEM_FLAGS_VRAM',
  653. 'KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE',
  654. 'KFD_IOC_CACHE_POLICY_COHERENT',
  655. 'KFD_IOC_CACHE_POLICY_NONCOHERENT', 'KFD_IOC_EVENT_DEBUG_EVENT',
  656. 'KFD_IOC_EVENT_DEVICESTATECHANGE', 'KFD_IOC_EVENT_HW_EXCEPTION',
  657. 'KFD_IOC_EVENT_MEMORY', 'KFD_IOC_EVENT_NODECHANGE',
  658. 'KFD_IOC_EVENT_PROFILE_EVENT', 'KFD_IOC_EVENT_QUEUE_EVENT',
  659. 'KFD_IOC_EVENT_SIGNAL', 'KFD_IOC_EVENT_SYSTEM_EVENT',
  660. 'KFD_IOC_QUEUE_TYPE_COMPUTE', 'KFD_IOC_QUEUE_TYPE_COMPUTE_AQL',
  661. 'KFD_IOC_QUEUE_TYPE_SDMA', 'KFD_IOC_QUEUE_TYPE_SDMA_XGMI',
  662. 'KFD_IOC_WAIT_RESULT_COMPLETE', 'KFD_IOC_WAIT_RESULT_FAIL',
  663. 'KFD_IOC_WAIT_RESULT_TIMEOUT', 'KFD_MAX_QUEUE_PERCENTAGE',
  664. 'KFD_MAX_QUEUE_PRIORITY', 'KFD_MEM_ERR_GPU_HANG',
  665. 'KFD_MEM_ERR_NO_RAS', 'KFD_MEM_ERR_POISON_CONSUMED',
  666. 'KFD_MEM_ERR_SRAM_ECC', 'KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL',
  667. 'KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL', 'KFD_SIGNAL_EVENT_LIMIT',
  668. 'KFD_SMI_EVENT_GPU_POST_RESET', 'KFD_SMI_EVENT_GPU_PRE_RESET',
  669. 'KFD_SMI_EVENT_NONE', 'KFD_SMI_EVENT_THERMAL_THROTTLE',
  670. 'KFD_SMI_EVENT_VMFAULT', 'MAX_ALLOWED_AW_BUFF_SIZE',
  671. 'MAX_ALLOWED_NUM_POINTS', 'MAX_ALLOWED_WAC_BUFF_SIZE',
  672. 'NUM_OF_SUPPORTED_GPUS', 'kfd_ioctl_svm_attr_type',
  673. 'kfd_ioctl_svm_location', 'kfd_ioctl_svm_op', 'kfd_mmio_remap',
  674. 'kfd_smi_event', 'struct_kfd_event_data',
  675. 'struct_kfd_hsa_hw_exception_data',
  676. 'struct_kfd_hsa_memory_exception_data',
  677. 'struct_kfd_ioctl_acquire_vm_args',
  678. 'struct_kfd_ioctl_alloc_memory_of_gpu_args',
  679. 'struct_kfd_ioctl_alloc_queue_gws_args',
  680. 'struct_kfd_ioctl_create_event_args',
  681. 'struct_kfd_ioctl_create_queue_args',
  682. 'struct_kfd_ioctl_dbg_address_watch_args',
  683. 'struct_kfd_ioctl_dbg_register_args',
  684. 'struct_kfd_ioctl_dbg_unregister_args',
  685. 'struct_kfd_ioctl_dbg_wave_control_args',
  686. 'struct_kfd_ioctl_destroy_event_args',
  687. 'struct_kfd_ioctl_destroy_queue_args',
  688. 'struct_kfd_ioctl_free_memory_of_gpu_args',
  689. 'struct_kfd_ioctl_get_clock_counters_args',
  690. 'struct_kfd_ioctl_get_dmabuf_info_args',
  691. 'struct_kfd_ioctl_get_process_apertures_args',
  692. 'struct_kfd_ioctl_get_process_apertures_new_args',
  693. 'struct_kfd_ioctl_get_queue_wave_state_args',
  694. 'struct_kfd_ioctl_get_tile_config_args',
  695. 'struct_kfd_ioctl_get_version_args',
  696. 'struct_kfd_ioctl_import_dmabuf_args',
  697. 'struct_kfd_ioctl_map_memory_to_gpu_args',
  698. 'struct_kfd_ioctl_reset_event_args',
  699. 'struct_kfd_ioctl_set_cu_mask_args',
  700. 'struct_kfd_ioctl_set_event_args',
  701. 'struct_kfd_ioctl_set_memory_policy_args',
  702. 'struct_kfd_ioctl_set_scratch_backing_va_args',
  703. 'struct_kfd_ioctl_set_trap_handler_args',
  704. 'struct_kfd_ioctl_set_xnack_mode_args',
  705. 'struct_kfd_ioctl_smi_events_args', 'struct_kfd_ioctl_svm_args',
  706. 'struct_kfd_ioctl_svm_attribute',
  707. 'struct_kfd_ioctl_unmap_memory_from_gpu_args',
  708. 'struct_kfd_ioctl_update_queue_args',
  709. 'struct_kfd_ioctl_wait_events_args',
  710. 'struct_kfd_memory_exception_failure',
  711. 'struct_kfd_process_device_apertures', 'union_kfd_event_data_0']