ops_nv.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. from __future__ import annotations
  2. import os, ctypes, contextlib, pathlib, re, fcntl, functools, mmap, struct, tempfile, hashlib, subprocess, time, array
  3. from typing import Tuple, List, Any, cast, Union, Dict
  4. from dataclasses import dataclass
  5. from tinygrad.device import HCQCompatCompiled, HCQCompatAllocator, HCQCompatAllocRes, HWCommandQueue, HWComputeQueue, HWCopyQueue, hcq_command, \
  6. HCQCompatProgram, hcq_profile, Compiler, CompileError, BufferOptions
  7. from tinygrad.helpers import getenv, mv_address, init_c_struct_t, to_mv, round_up, to_char_p_p, DEBUG, prod, PROFILE
  8. from tinygrad.renderer.cstyle import NVRenderer
  9. from tinygrad.runtime.ops_cuda import check as cuda_check, _get_bytes, CUDACompiler, PTXCompiler, PTX
  10. import tinygrad.runtime.autogen.nv_gpu as nv_gpu
  11. import tinygrad.runtime.autogen.nvrtc as nvrtc
  12. from tinygrad.renderer.assembly import PTXRenderer
  13. import tinygrad.runtime.autogen.libc as libc
  14. from tinygrad.runtime.support.elf import elf_loader
  15. if getenv("IOCTL"): import extra.nv_gpu_driver.nv_ioctl # noqa: F401 # pylint: disable=unused-import
  16. if MOCKGPU:=getenv("MOCKGPU"): import extra.mockgpu.mockgpu # noqa: F401 # pylint: disable=unused-import
  17. def nv_iowr(fd, nr, args):
  18. ret = fcntl.ioctl(fd, (3 << 30) | (ctypes.sizeof(args) & 0x1FFF) << 16 | (ord('F') & 0xFF) << 8 | (nr & 0xFF), args)
  19. if ret != 0: raise RuntimeError(f"ioctl returned {ret}")
  20. def rm_alloc(fd, clss, root, parant, params):
  21. made = nv_gpu.NVOS21_PARAMETERS(hRoot=root, hObjectParent=parant, hClass=clss,
  22. pAllocParms=ctypes.cast(ctypes.byref(params), ctypes.POINTER(None)) if params is not None else None) # type: ignore
  23. nv_iowr(fd, nv_gpu.NV_ESC_RM_ALLOC, made)
  24. if made.status != 0: raise RuntimeError(f"rm_alloc returned {made.status}: {nv_gpu.nv_status_codes.get(made.status, 'Unknown error')}")
  25. return made
  26. def rm_control(cmd, sttyp, fd, client, obj, **kwargs):
  27. made = nv_gpu.NVOS54_PARAMETERS(hClient=client, hObject=obj, cmd=cmd, paramsSize=ctypes.sizeof(params:=sttyp(**kwargs)),
  28. params=ctypes.cast(ctypes.byref(params), ctypes.POINTER(None)) if params is not None else None) # type: ignore
  29. nv_iowr(fd, nv_gpu.NV_ESC_RM_CONTROL, made)
  30. if made.status != 0: raise RuntimeError(f"rm_control returned {made.status}: {nv_gpu.nv_status_codes.get(made.status, 'Unknown error')}")
  31. return params
  32. def make_rmctrl_type():
  33. return type("NVRMCTRL", (object,), {name[name.find("_CTRL_CMD_")+10:].lower(): functools.partial(rm_control, dt, sttyp)
  34. for name,dt in nv_gpu.__dict__.items() if name.find("_CTRL_CMD_")>=0 and
  35. (sttyp:=getattr(nv_gpu, name.replace("_CTRL_CMD_", "_CTRL_")+"_PARAMS", getattr(nv_gpu, name+"_PARAMS", None)))})
  36. rmctrl = make_rmctrl_type()
  37. def uvm_ioctl(cmd, sttyp, fd, **kwargs):
  38. ret = fcntl.ioctl(fd, cmd, made:=sttyp(**kwargs))
  39. if ret != 0: raise RuntimeError(f"ioctl(uvm) returned {ret}")
  40. if made.rmStatus != 0: raise RuntimeError(f"uvm_ioctl returned {made.rmStatus}: {nv_gpu.nv_status_codes.get(made.rmStatus, 'Unknown error')}")
  41. return made
  42. def make_uvm_type():
  43. return type("NVUVM", (object,), {name.replace("UVM_", "").lower(): functools.partial(uvm_ioctl, dt, getattr(nv_gpu, name+"_PARAMS"))
  44. for name,dt in nv_gpu.__dict__.items() if name.startswith("UVM_") and nv_gpu.__dict__.get(name+"_PARAMS")})
  45. uvm = make_uvm_type()
  46. def make_qmd_struct_type():
  47. fields = []
  48. bits = [(name,dt) for name,dt in nv_gpu.__dict__.items() if name.startswith("NVC6C0_QMDV03_00") and isinstance(dt, tuple)]
  49. bits += [(name+f"_{i}",dt(i)) for name,dt in nv_gpu.__dict__.items() for i in range(8) if name.startswith("NVC6C0_QMDV03_00") and callable(dt)]
  50. bits = sorted(bits, key=lambda x: x[1][1])
  51. for i,(name, data) in enumerate(bits):
  52. if i > 0 and (gap:=(data[1] - bits[i-1][1][0] - 1)) != 0: fields.append((f"_reserved{i}", ctypes.c_uint32, gap))
  53. fields.append((name.replace("NVC6C0_QMDV03_00_", "").lower(), ctypes.c_uint32, data[0]-data[1]+1))
  54. return init_c_struct_t(tuple(fields))
  55. qmd_struct_t = make_qmd_struct_type()
  56. assert ctypes.sizeof(qmd_struct_t) == 0x40 * 4
  57. def nvmethod(subc, mthd, size, typ=2): return (typ << 28) | (size << 16) | (subc << 13) | (mthd >> 2)
  58. def nvdata64(data): return (data >> 32, data & 0xFFFFFFFF)
  59. def nvdata64_le(data): return (data & 0xFFFFFFFF, data >> 32)
  60. class NVCompiler(Compiler):
  61. def __init__(self, arch:str):
  62. self.arch, self.compile_options = arch, [f'--gpu-architecture={arch}', "-I/usr/local/cuda/include", "-I/usr/include", "-I/opt/cuda/include/"]
  63. cuda_check(nvrtc.nvrtcVersion((nvrtcMajor := ctypes.c_int()), (nvrtcMinor := ctypes.c_int())))
  64. if (nvrtcMajor.value, nvrtcMinor.value) >= (12, 4): self.compile_options.append("--minimal")
  65. super().__init__(f"compile_nv_{self.arch}")
  66. def compile(self, src:str) -> bytes:
  67. cuda_check(nvrtc.nvrtcCreateProgram(ctypes.byref(prog := nvrtc.nvrtcProgram()), src.encode(), "<null>".encode(), 0, None, None))
  68. status = nvrtc.nvrtcCompileProgram(prog, len(self.compile_options), to_char_p_p([o.encode() for o in self.compile_options]))
  69. if status != 0:
  70. raise CompileError(f"compile failed: {_get_bytes(prog, nvrtc.nvrtcGetProgramLog, nvrtc.nvrtcGetProgramLogSize, cuda_check).decode()}")
  71. return _get_bytes(prog, nvrtc.nvrtcGetCUBIN, nvrtc.nvrtcGetCUBINSize, cuda_check)
  72. def jitlink_check(status):
  73. if status != 0: raise CompileError(f"NvJitLink Error {status}, {nvrtc.nvJitLinkResult__enumvalues.get(status, 'Unknown')}")
  74. class NVPTXCompiler(NVCompiler):
  75. def compile(self, src:str) -> bytes:
  76. ptxsrc = src.replace("TARGET", self.arch).replace("VERSION", "7.8" if self.arch >= "sm_89" else "7.5")
  77. jitlink_check(nvrtc.nvJitLinkCreate(handle := nvrtc.nvJitLinkHandle(), 1, to_char_p_p([f'-arch={self.arch}'.encode()])))
  78. jitlink_check(nvrtc.nvJitLinkAddData(handle, nvrtc.NVJITLINK_INPUT_PTX, ptxsrc.encode(), len(ptxsrc), "<null>".encode()))
  79. if nvrtc.nvJitLinkComplete(handle) != 0:
  80. raise CompileError(f"compile failed: {_get_bytes(handle, nvrtc.nvJitLinkGetErrorLog, nvrtc.nvJitLinkGetErrorLogSize, jitlink_check).decode()}")
  81. return _get_bytes(handle, nvrtc.nvJitLinkGetLinkedCubin, nvrtc.nvJitLinkGetLinkedCubinSize, jitlink_check)
  82. class NVCommandQueue(HWCommandQueue): # pylint: disable=abstract-method
  83. def __del__(self):
  84. if self.binded_device is not None:
  85. self.binded_device.synchronize() # Synchronize to ensure the buffer is no longer in use.
  86. self.binded_device._gpu_free(self.hw_page)
  87. @hcq_command
  88. def setup(self, compute_class=None, copy_class=None, local_mem_window=None, shared_mem_window=None, local_mem=None, local_mem_tpc_bytes=None):
  89. if compute_class: self.q += [nvmethod(1, nv_gpu.NVC6C0_SET_OBJECT, 1), compute_class]
  90. if copy_class: self.q += [nvmethod(4, nv_gpu.NVC6C0_SET_OBJECT, 1), copy_class]
  91. if local_mem_window: self.q += [nvmethod(1, nv_gpu.NVC6C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A, 2), *nvdata64(local_mem_window)]
  92. if shared_mem_window: self.q += [nvmethod(1, nv_gpu.NVC6C0_SET_SHADER_SHARED_MEMORY_WINDOW_A, 2), *nvdata64(shared_mem_window)]
  93. if local_mem: self.q += [nvmethod(1, nv_gpu.NVC6C0_SET_SHADER_LOCAL_MEMORY_A, 2), *nvdata64(local_mem)]
  94. if local_mem_tpc_bytes: self.q += [nvmethod(1, nv_gpu.NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A, 3), *nvdata64(local_mem_tpc_bytes), 0x40]
  95. def _wait(self, signal, value=0):
  96. self.q += [nvmethod(0, nv_gpu.NVC56F_SEM_ADDR_LO, 5), *nvdata64_le(mv_address(signal)), *nvdata64_le(value),
  97. (3 << 0) | (1 << 24)] # ACQUIRE | PAYLOAD_SIZE_64BIT
  98. def _signal(self, signal, value=0, timestamp=False):
  99. self.q += [nvmethod(0, nv_gpu.NVC56F_SEM_ADDR_LO, 5), *nvdata64_le(mv_address(signal)), *nvdata64_le(value),
  100. (1 << 0) | (1 << 20) | (1 << 24) | ((1 << 25) if timestamp else 0)] # RELEASE | RELEASE_WFI | PAYLOAD_SIZE_64BIT | RELEASE_TIMESTAMP
  101. self.q += [nvmethod(0, nv_gpu.NVC56F_NON_STALL_INTERRUPT, 1), 0x0]
  102. def _timestamp(self, signal): return NVCommandQueue._signal(self, signal, timestamp=True)
  103. def _update_signal(self, cmd_idx, signal=None, value=None): return self._update_wait(cmd_idx, signal, value) # the same offsets and commands
  104. def _update_wait(self, cmd_idx, signal=None, value=None):
  105. if signal is not None: self.q[(sigoff:=self.cmds_offset[cmd_idx]+1):sigoff+2] = array.array('I', nvdata64_le(mv_address(signal)))
  106. if value is not None: self.q[(valoff:=self.cmds_offset[cmd_idx]+3):valoff+2] = array.array('I', nvdata64_le(value))
  107. def bind(self, device: NVDevice):
  108. self.binded_device = device
  109. self.hw_page = device._gpu_alloc(len(self.q) * 4, map_to_cpu=True)
  110. hw_view = to_mv(self.hw_page.va_addr, self.hw_page.size).cast("I")
  111. for i, value in enumerate(self.q): hw_view[i] = value
  112. # From now on, the queue is on the device for faster submission.
  113. self.q = hw_view # type: ignore
  114. def _submit_to_gpfifo(self, dev, gpfifo:GPFifo):
  115. if len(self.q) == 0: return
  116. if dev == self.binded_device: cmdq_addr = self.hw_page.va_addr
  117. else:
  118. if dev.cmdq_wptr + len(self.q) * 4 > dev.cmdq_page.size:
  119. assert (gpfifo.ring[gpfifo.controls.GPGet] & 0xFFFFFFFFFC) >= dev.cmdq_page.va_addr + len(self.q) * 4 or \
  120. gpfifo.controls.GPGet == gpfifo.controls.GPPut, "cmdq overrun"
  121. dev.cmdq_wptr = 0
  122. dev.cmdq[dev.cmdq_wptr//4:dev.cmdq_wptr//4+len(self.q)] = array.array('I', self.q)
  123. cmdq_addr = dev.cmdq_page.va_addr+dev.cmdq_wptr
  124. dev.cmdq_wptr += len(self.q) * 4
  125. gpfifo.ring[gpfifo.put_value % gpfifo.entries_count] = (cmdq_addr//4 << 2) | (len(self.q) << 42) | (1 << 41)
  126. gpfifo.controls.GPPut = (gpfifo.put_value + 1) % gpfifo.entries_count
  127. dev.gpu_mmio[0x90 // 4] = gpfifo.token
  128. gpfifo.put_value += 1
  129. class NVComputeQueue(NVCommandQueue, HWComputeQueue):
  130. def __init__(self):
  131. self.cmd_idx_to_qmd, self.cmd_idx_to_global_dims, self.cmd_idx_to_local_dims = {}, {}, {}
  132. super().__init__()
  133. def _exec(self, prg, kernargs, global_size, local_size):
  134. cmd_idx = len(self) - 1
  135. ctypes.memmove(qmd_addr:=(kernargs + round_up(prg.constbufs[0][1], 1 << 8)), ctypes.addressof(prg.qmd), 0x40 * 4)
  136. self.cmd_idx_to_qmd[cmd_idx] = qmd = qmd_struct_t.from_address(qmd_addr) # Save qmd for later update
  137. self.cmd_idx_to_global_dims[cmd_idx] = to_mv(qmd_addr + nv_gpu.NVC6C0_QMDV03_00_CTA_RASTER_WIDTH[1] // 8, 12).cast('I')
  138. self.cmd_idx_to_local_dims[cmd_idx] = to_mv(qmd_addr + nv_gpu.NVC6C0_QMDV03_00_CTA_THREAD_DIMENSION0[1] // 8, 6).cast('H')
  139. qmd.cta_raster_width, qmd.cta_raster_height, qmd.cta_raster_depth = global_size
  140. qmd.cta_thread_dimension0, qmd.cta_thread_dimension1, qmd.cta_thread_dimension2 = local_size
  141. qmd.constant_buffer_addr_upper_0, qmd.constant_buffer_addr_lower_0 = nvdata64(kernargs)
  142. if (prev_qmd:=self.cmd_idx_to_qmd.get(cmd_idx - 1)) is None:
  143. self.q += [nvmethod(1, nv_gpu.NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI, 1), (1 << 12) | (1 << 4) | (1 << 0)]
  144. self.q += [nvmethod(1, nv_gpu.NVC6C0_SEND_PCAS_A, 0x1), qmd_addr >> 8]
  145. self.q += [nvmethod(1, nv_gpu.NVC6C0_SEND_SIGNALING_PCAS2_B, 0x1), 9]
  146. else:
  147. prev_qmd.dependent_qmd0_pointer = qmd_addr >> 8
  148. prev_qmd.dependent_qmd0_action = 1
  149. prev_qmd.dependent_qmd0_prefetch = 1
  150. prev_qmd.dependent_qmd0_enable = 1
  151. def _update_exec(self, cmd_idx, global_size, local_size):
  152. # Patch the exec cmd with new launch dims
  153. self.cmd_idx_to_global_dims[cmd_idx][:] = array.array('I', global_size)
  154. self.cmd_idx_to_local_dims[cmd_idx][:] = array.array('H', local_size)
  155. def _signal(self, signal, value=0):
  156. if (prev_qmd:=self.cmd_idx_to_qmd.get(len(self) - 2)) is None or prev_qmd.release0_enable == 1: return super()._signal(signal, value)
  157. prev_qmd.release0_address_upper, prev_qmd.release0_address_lower = nvdata64(mv_address(signal))
  158. prev_qmd.release0_payload_upper, prev_qmd.release0_payload_lower = nvdata64(value)
  159. prev_qmd.release0_enable = 1
  160. self.cmd_idx_to_qmd[len(self) - 1] = prev_qmd # this command is embedded into qmd.
  161. def _update_signal(self, cmd_idx, signal=None, value=None):
  162. if (qmd:=self.cmd_idx_to_qmd.get(cmd_idx)) is None: return super()._update_signal(cmd_idx, signal, value)
  163. if signal is not None: qmd.release0_address_upper, qmd.release0_address_lower = nvdata64(mv_address(signal))
  164. if value is not None: qmd.release0_payload_upper, qmd.release0_payload_lower = nvdata64(value)
  165. def _submit(self, device): self._submit_to_gpfifo(device, cast(NVDevice, device).compute_gpfifo)
  166. class NVCopyQueue(NVCommandQueue, HWCopyQueue):
  167. def _copy(self, dest, src, copy_size):
  168. self.q += [nvmethod(4, nv_gpu.NVC6B5_OFFSET_IN_UPPER, 4), *nvdata64(src), *nvdata64(dest)]
  169. self.q += [nvmethod(4, nv_gpu.NVC6B5_LINE_LENGTH_IN, 1), copy_size]
  170. self.q += [nvmethod(4, nv_gpu.NVC6B5_LAUNCH_DMA, 1), 0x182] # TRANSFER_TYPE_NON_PIPELINED | DST_MEMORY_LAYOUT_PITCH | SRC_MEMORY_LAYOUT_PITCH
  171. def _update_copy(self, cmd_idx, dest=None, src=None):
  172. if dest is not None: self._patch(cmd_idx, offset=3, data=nvdata64(dest))
  173. if src is not None: self._patch(cmd_idx, offset=1, data=nvdata64(src))
  174. def _signal(self, signal, value=0):
  175. self.q += [nvmethod(4, nv_gpu.NVC6B5_SET_SEMAPHORE_A, 4), *nvdata64(mv_address(signal)), value, 4]
  176. self.q += [nvmethod(4, nv_gpu.NVC6B5_LAUNCH_DMA, 1), 0x14]
  177. def _update_signal(self, cmd_idx, signal=None, value=None):
  178. if signal is not None: self._patch(cmd_idx, offset=1, data=nvdata64(mv_address(signal)))
  179. if value is not None: self._patch(cmd_idx, offset=3, data=[value])
  180. def _submit(self, device): self._submit_to_gpfifo(device, cast(NVDevice, device).dma_gpfifo)
  181. class NVProgram(HCQCompatProgram):
  182. def __init__(self, device:NVDevice, name:str, lib:bytes):
  183. self.device, self.name, self.lib = device, name, lib
  184. if DEBUG >= 6:
  185. try:
  186. fn = (pathlib.Path(tempfile.gettempdir()) / f"tinycuda_{hashlib.md5(lib).hexdigest()}").as_posix()
  187. with open(fn + ".cubin", "wb") as f: f.write(lib)
  188. print(subprocess.check_output(["nvdisasm", fn+".cubin"]).decode('utf-8'))
  189. except Exception as e: print("failed to disasm cubin", str(e))
  190. if MOCKGPU: image, sections, relocs = memoryview(bytearray(lib) + b'\x00' * (4 - len(lib)%4)).cast("I"), [], [] # type: ignore
  191. else: image, sections, relocs = elf_loader(self.lib, force_section_align=128)
  192. # NOTE: Ensure at least 4KB of space after the program to mitigate prefetch memory faults.
  193. self.lib_gpu = self.device.allocator.alloc(round_up(image.nbytes, 0x1000) + 0x1000, BufferOptions(cpu_access=True))
  194. self.program_addr, self.program_sz, self.registers_usage, self.shmem_usage = self.lib_gpu.va_addr, image.nbytes, 0, 0
  195. self.constbufs: Dict[int, Tuple[int, int]] = {0: (0, 0x160)} # Dict[constbuf index, Tuple[va_addr, size]]
  196. for sh in sections:
  197. if sh.name == f".nv.shared.{self.name}": self.shmem_usage = sh.header.sh_size
  198. if sh.name == f".text.{self.name}":
  199. self.program_addr, self.program_sz, self.registers_usage = self.lib_gpu.va_addr+sh.header.sh_addr, sh.header.sh_size, sh.header.sh_info>>24
  200. elif m:=re.match(r'\.nv\.constant(\d+)', sh.name): self.constbufs[int(m.group(1))] = (self.lib_gpu.va_addr+sh.header.sh_addr, sh.header.sh_size)
  201. elif sh.name == ".nv.info":
  202. for off in range(0, sh.header.sh_size, 12):
  203. typ, _, val = struct.unpack_from("III", sh.content, off)
  204. if typ & 0xffff == 0x1204: self.device._ensure_has_local_memory(val + 0x240)
  205. # Apply relocs
  206. for apply_image_offset, rel_sym_offset, typ, _ in relocs:
  207. # These types are CUDA-specific, applying them here
  208. if typ == 2: image[apply_image_offset:apply_image_offset+8] = struct.pack('<Q', self.lib_gpu.va_addr + rel_sym_offset) # R_CUDA_64
  209. elif typ == 0x38: image[apply_image_offset+4:apply_image_offset+8] = struct.pack('<I', (self.lib_gpu.va_addr + rel_sym_offset) & 0xffffffff)
  210. elif typ == 0x39: image[apply_image_offset+4:apply_image_offset+8] = struct.pack('<I', (self.lib_gpu.va_addr + rel_sym_offset) >> 32)
  211. else: raise RuntimeError(f"unknown NV reloc {typ}")
  212. ctypes.memmove(self.lib_gpu.va_addr, mv_address(image), image.nbytes)
  213. self.constbuffer_0 = [0] * 88
  214. self.constbuffer_0[6:12] = [*nvdata64_le(self.device.shared_mem_window), *nvdata64_le(self.device.local_mem_window), *nvdata64_le(0xfffdc0)]
  215. smem_config = min(shmem_conf * 1024 for shmem_conf in [32, 64, 100] if shmem_conf * 1024 >= self.shmem_usage) // 4096 + 1
  216. self.qmd = qmd_struct_t(qmd_group_id=0x3f, sm_global_caching_enable=1, invalidate_texture_header_cache=1, invalidate_texture_sampler_cache=1,
  217. invalidate_texture_data_cache=1, invalidate_shader_data_cache=1, api_visible_call_limit=1, sampler_index=1,
  218. cwd_membar_type=nv_gpu.NVC6C0_QMDV03_00_CWD_MEMBAR_TYPE_L1_SYSMEMBAR, qmd_major_version=3, constant_buffer_invalidate_0=1,
  219. shared_memory_size=max(0x400, round_up(self.shmem_usage, 0x100)), min_sm_config_shared_mem_size=smem_config,
  220. max_sm_config_shared_mem_size=0x1a, register_count_v=self.registers_usage, target_sm_config_shared_mem_size=smem_config,
  221. barrier_count=1, shader_local_memory_high_size=self.device.slm_per_thread, program_prefetch_size=self.program_sz>>8,
  222. program_address_lower=self.program_addr&0xffffffff, program_address_upper=self.program_addr>>32, sass_version=0x89,
  223. program_prefetch_addr_lower_shifted=self.program_addr>>8, program_prefetch_addr_upper_shifted=self.program_addr>>40)
  224. for i,(addr,sz) in self.constbufs.items():
  225. self.qmd.__setattr__(f'constant_buffer_addr_upper_{i}', (addr) >> 32)
  226. self.qmd.__setattr__(f'constant_buffer_addr_lower_{i}', (addr) & 0xffffffff)
  227. self.qmd.__setattr__(f'constant_buffer_size_shifted4_{i}', sz)
  228. self.qmd.__setattr__(f'constant_buffer_valid_{i}', 1)
  229. # Registers allocation granularity per warp is 256, warp allocaiton granularity is 4. Register file size is 65536.
  230. self.max_threads = ((65536 // round_up(max(1, self.registers_usage) * 32, 256)) // 4) * 4 * 32
  231. # NV's kernargs is constbuffer (size 0x160), then arguments to the kernel follows. Kernargs also appends QMD at the end of the kernel.
  232. super().__init__(kernargs_alloc_size=round_up(self.constbufs[0][1], 1 << 8) + (8 << 8), kernargs_args_offset=0x160)
  233. def __del__(self):
  234. if hasattr(self, 'lib_gpu'): self.device.allocator.free(self.lib_gpu, self.lib_gpu.size, BufferOptions(cpu_access=True))
  235. def fill_kernargs(self, kernargs_ptr:int, bufs:Tuple[Any, ...], vals:Tuple[int, ...]=()):
  236. # HACK: Save counts of args and vars to "unused" constbuffer for later extraction in mockgpu to pass into gpuocelot.
  237. if MOCKGPU: self.constbuffer_0[0:2] = [len(bufs), len(vals)]
  238. kernargs = [arg_half for arg in bufs for arg_half in nvdata64_le(arg.va_addr)] + list(vals)
  239. to_mv(kernargs_ptr, (len(self.constbuffer_0) + len(kernargs)) * 4).cast('I')[:] = array.array('I', self.constbuffer_0 + kernargs)
  240. def __call__(self, *args, global_size:Tuple[int,int,int]=(1,1,1), local_size:Tuple[int,int,int]=(1,1,1), vals:Tuple[int, ...]=(), wait=False):
  241. if prod(local_size) > 1024 or self.max_threads < prod(local_size): raise RuntimeError("Too many resources requsted for launch")
  242. if any(cur > mx for cur,mx in zip(global_size, [2147483647, 65535, 65535])) or any(cur > mx for cur,mx in zip(local_size, [1024, 1024, 64])):
  243. raise RuntimeError(f"Invalid global/local dims {global_size=}, {local_size=}")
  244. if self.device.kernargs_ptr >= (self.device.kernargs_page.va_addr + self.device.kernargs_page.size - self.kernargs_alloc_size):
  245. self.device.kernargs_ptr = self.device.kernargs_page.va_addr
  246. self.fill_kernargs(self.device.kernargs_ptr, args, vals)
  247. q = NVComputeQueue().wait(self.device.timeline_signal, self.device.timeline_value - 1)
  248. with hcq_profile(self.device, queue=q, desc=self.name, enabled=wait or PROFILE) as (sig_st, sig_en):
  249. q.exec(self, self.device.kernargs_ptr, global_size, local_size)
  250. q.signal(self.device.timeline_signal, self.device.timeline_value).submit(self.device)
  251. self.device.timeline_value += 1
  252. self.device.kernargs_ptr += self.kernargs_alloc_size
  253. if wait:
  254. self.device._wait_signal(self.device.timeline_signal, self.device.timeline_value - 1)
  255. if not PROFILE: self.device.signals_pool += [sig_st, sig_en]
  256. return (sig_en[1] - sig_st[1]) / 1e9
  257. class NVAllocator(HCQCompatAllocator):
  258. def __init__(self, device:NVDevice): super().__init__(device)
  259. def _alloc(self, size:int, options:BufferOptions) -> HCQCompatAllocRes:
  260. if options.host: return self.device._gpu_host_alloc(size)
  261. return self.device._gpu_alloc(size, map_to_cpu=options.cpu_access, huge_page=(size > (16 << 20)))
  262. def _free(self, opaque, options:BufferOptions):
  263. self.device.synchronize()
  264. if options.host: self.device._gpu_host_free(opaque)
  265. else: self.device._gpu_free(opaque)
  266. @dataclass
  267. class GPFifo:
  268. ring: memoryview
  269. controls: nv_gpu.AmpereAControlGPFifo
  270. entries_count: int
  271. token: int
  272. put_value: int = 0
  273. MAP_FIXED, MAP_NORESERVE = 0x10, 0x400
  274. class NVDevice(HCQCompatCompiled):
  275. root = None
  276. fd_ctl: int = -1
  277. fd_uvm: int = -1
  278. gpus_info:Union[List, ctypes.Array] = []
  279. signals_page:Any = None
  280. signals_pool: List[Any] = []
  281. uvm_vaddr: int = 0x1000000000
  282. host_object_enumerator: int = 0x1000
  283. devices: List[NVDevice] = []
  284. def _new_gpu_fd(self):
  285. fd_dev = os.open(f"/dev/nvidia{self.gpu_info.deviceInstance}", os.O_RDWR | os.O_CLOEXEC)
  286. nv_iowr(fd_dev, nv_gpu.NV_ESC_REGISTER_FD, nv_gpu.nv_ioctl_register_fd_t(ctl_fd=self.fd_ctl))
  287. return fd_dev
  288. def _gpu_map_to_cpu(self, memory_handle, size, target=None, flags=0, system=False):
  289. fd_dev = self._new_gpu_fd() if not system else os.open("/dev/nvidiactl", os.O_RDWR | os.O_CLOEXEC)
  290. made = nv_gpu.nv_ioctl_nvos33_parameters_with_fd(fd=fd_dev,
  291. params=nv_gpu.NVOS33_PARAMETERS(hClient=self.root, hDevice=self.device, hMemory=memory_handle, length=size, flags=flags))
  292. nv_iowr(self.fd_ctl, nv_gpu.NV_ESC_RM_MAP_MEMORY, made)
  293. if made.params.status != 0: raise RuntimeError(f"_gpu_map_to_cpu returned {made.params.status}")
  294. res = libc.mmap(target, size, mmap.PROT_READ|mmap.PROT_WRITE, mmap.MAP_SHARED | (MAP_FIXED if target is not None else 0), fd_dev, 0)
  295. os.close(fd_dev)
  296. return res
  297. def _gpu_alloc(self, size:int, contig=False, huge_page=False, va_addr=None, map_to_cpu=False, map_flags=0):
  298. size = round_up(size, align:=((2 << 20) if huge_page else (4 << 10)))
  299. alloc_params = nv_gpu.NV_MEMORY_ALLOCATION_PARAMS(owner=self.root, alignment=align, offset=0, limit=size-1, format=6, size=size,
  300. attr=(((nv_gpu.NVOS32_ATTR_PAGE_SIZE_HUGE << 23) if huge_page else 0) |
  301. ((nv_gpu.NVOS32_ATTR_PHYSICALITY_CONTIGUOUS if contig else nv_gpu.NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS) << 27)),
  302. attr2=((nv_gpu.NVOS32_ATTR2_ZBC_PREFER_NO_ZBC << 0) | (nv_gpu.NVOS32_ATTR2_GPU_CACHEABLE_YES << 2) |
  303. ((nv_gpu.NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB << 20) if huge_page else 0)),
  304. flags=(nv_gpu.NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | nv_gpu.NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM | nv_gpu.NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED |
  305. nv_gpu.NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT | nv_gpu.NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED))
  306. mem_handle = rm_alloc(self.fd_ctl, nv_gpu.NV1_MEMORY_USER, self.root, self.device, alloc_params).hObjectNew
  307. if va_addr is None: va_addr = self._alloc_gpu_vaddr(size, alignment=align)
  308. if map_to_cpu: va_addr = self._gpu_map_to_cpu(mem_handle, size, target=va_addr, flags=map_flags)
  309. return self._gpu_uvm_map(va_addr, size, mem_handle)
  310. def _gpu_system_alloc(self, size:int, va_addr=None, map_to_cpu=False, map_flags=0):
  311. alloc_params = nv_gpu.NV_MEMORY_ALLOCATION_PARAMS(owner=self.root, type=13,
  312. attr=(nv_gpu.NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS << 27) | (nv_gpu.NVOS32_ATTR_LOCATION_PCI << 25),
  313. attr2=(nv_gpu.NVOS32_ATTR2_ZBC_PREFER_NO_ZBC << 0) | (nv_gpu.NVOS32_ATTR2_GPU_CACHEABLE_NO << 2),
  314. flags=(nv_gpu.NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT | nv_gpu.NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED |
  315. nv_gpu.NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED), format=6, size=size, alignment=(4<<10), offset=0, limit=size-1)
  316. mem_handle = rm_alloc(self.fd_ctl, nv_gpu.NV1_MEMORY_SYSTEM, self.root, self.device, alloc_params).hObjectNew
  317. if va_addr is None: va_addr = self._alloc_gpu_vaddr(size)
  318. if map_to_cpu: va_addr = self._gpu_map_to_cpu(mem_handle, size, target=va_addr, flags=map_flags, system=True)
  319. return self._gpu_uvm_map(va_addr, size, mem_handle)
  320. def _gpu_host_alloc(self, size):
  321. va_base = self._alloc_gpu_vaddr(sz:=round_up(size, 4 << 10))
  322. libc.mmap(va_base, sz, mmap.PROT_READ|mmap.PROT_WRITE, MAP_FIXED|mmap.MAP_SHARED|mmap.MAP_ANONYMOUS, -1, 0)
  323. return self._map_to_gpu(va_base, sz)
  324. def _gpu_free(self, mem):
  325. made = nv_gpu.NVOS00_PARAMETERS(hRoot=self.root, hObjectParent=self.device, hObjectOld=mem.hMemory)
  326. nv_iowr(self.fd_ctl, nv_gpu.NV_ESC_RM_FREE, made)
  327. if made.status != 0: raise RuntimeError(f"_gpu_free returned {made.status}")
  328. uvm.free(self.fd_uvm, base=mem.va_addr, length=mem.size)
  329. def _gpu_host_free(self, mem):
  330. uvm.free(self.fd_uvm, base=mem.va_addr, length=mem.size)
  331. libc.munmap(mem.va_addr, mem.size)
  332. def _map_to_gpu(self, va_base, size):
  333. NVDevice.host_object_enumerator += 1
  334. flags = ((nv_gpu.NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS << 4) | (nv_gpu.NVOS02_FLAGS_COHERENCY_CACHED << 12) |
  335. (nv_gpu.NVOS02_FLAGS_MAPPING_NO_MAP << 30))
  336. made = nv_gpu.nv_ioctl_nvos02_parameters_with_fd(params=nv_gpu.NVOS02_PARAMETERS(hRoot=self.root, hObjectParent=self.device, flags=flags,
  337. hObjectNew=NVDevice.host_object_enumerator, hClass=nv_gpu.NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, pMemory=va_base, limit=size-1), fd=-1)
  338. nv_iowr(self.fd_dev, nv_gpu.NV_ESC_RM_ALLOC_MEMORY, made)
  339. if made.params.status != 0: raise RuntimeError(f"_map_to_gpu returned {made.params.status}")
  340. return self._gpu_uvm_map(va_base, size, made.params.hObjectNew)
  341. def _gpu_uvm_map(self, va_base, size, mem_handle, create_range=True) -> nv_gpu.UVM_MAP_EXTERNAL_ALLOCATION_PARAMS:
  342. if create_range: uvm.create_external_range(self.fd_uvm, base=va_base, length=size)
  343. gpu_attrs = (nv_gpu.struct_c__SA_UvmGpuMappingAttributes*256)(
  344. nv_gpu.struct_c__SA_UvmGpuMappingAttributes(gpuUuid=nv_gpu.struct_nv_uuid(uuid=self.gpu_uuid), gpuMappingType = 1))
  345. # NOTE: va_addr is set to make rawbufs compatable with AMD.
  346. return uvm.map_external_allocation(self.fd_uvm, base=va_base, length=size, rmCtrlFd=self.fd_ctl, hClient=self.root, hMemory=mem_handle,
  347. gpuAttributesCount=1, perGpuAttributes=gpu_attrs, va_addr=va_base, size=size, mapped_gpu_ids=[self.gpu_uuid])
  348. def _gpu_map(self, mem):
  349. mem = mem._base if hasattr(mem, '_base') else mem
  350. if self.gpu_uuid in mem.mapped_gpu_ids: return
  351. mem.mapped_gpu_ids.append(self.gpu_uuid)
  352. self._gpu_uvm_map(mem.va_addr, mem.size, mem.hMemory, create_range=False)
  353. def _alloc_gpu_vaddr(self, size, alignment=(4 << 10)):
  354. NVDevice.uvm_vaddr = (res_va:=round_up(NVDevice.uvm_vaddr, alignment)) + size
  355. return res_va
  356. def _setup_nvclasses(self):
  357. clsinfo = rmctrl.gpu_get_classlist_v2(self.fd_ctl, self.root, self.device)
  358. self.nvclasses = {clsinfo.classList[i] for i in range(clsinfo.numClasses)}
  359. self.compute_class = next(clss for clss in [nv_gpu.ADA_COMPUTE_A, nv_gpu.AMPERE_COMPUTE_B] if clss in self.nvclasses)
  360. def __init__(self, device:str=""):
  361. if NVDevice.root is None:
  362. NVDevice.fd_ctl = os.open("/dev/nvidiactl", os.O_RDWR | os.O_CLOEXEC)
  363. NVDevice.fd_uvm = os.open("/dev/nvidia-uvm", os.O_RDWR | os.O_CLOEXEC)
  364. fd_uvm_2 = os.open("/dev/nvidia-uvm", os.O_RDWR | os.O_CLOEXEC)
  365. NVDevice.root = rm_alloc(self.fd_ctl, nv_gpu.NV01_ROOT_CLIENT, 0, 0, None).hObjectNew
  366. uvm.initialize(self.fd_uvm)
  367. with contextlib.suppress(RuntimeError): uvm.mm_initialize(fd_uvm_2, uvmFd=self.fd_uvm) # this error is okay, CUDA hits it too
  368. nv_iowr(NVDevice.fd_ctl, nv_gpu.NV_ESC_CARD_INFO, gpus_info:=(nv_gpu.nv_ioctl_card_info_t*64)())
  369. visible_devices = [int(x) for x in (getenv('VISIBLE_DEVICES', getenv('CUDA_VISIBLE_DEVICES', ''))).split(',') if x.strip()]
  370. NVDevice.gpus_info = [gpus_info[x] for x in visible_devices] if visible_devices else gpus_info
  371. self.device_id = int(device.split(":")[1]) if ":" in device else 0
  372. if self.device_id >= len(NVDevice.gpus_info) or not NVDevice.gpus_info[self.device_id].valid:
  373. raise RuntimeError(f"No device found for {device}. Requesting more devices than the system has?")
  374. self.gpu_info = rmctrl.gpu_get_id_info_v2(self.fd_ctl, self.root, self.root, gpuId=NVDevice.gpus_info[self.device_id].gpu_id)
  375. self.fd_dev = self._new_gpu_fd()
  376. device_params = nv_gpu.NV0080_ALLOC_PARAMETERS(deviceId=self.gpu_info.deviceInstance, hClientShare=self.root,
  377. vaMode=nv_gpu.NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES)
  378. self.device = rm_alloc(self.fd_ctl, nv_gpu.NV01_DEVICE_0, self.root, self.root, device_params).hObjectNew
  379. self.subdevice = rm_alloc(self.fd_ctl, nv_gpu.NV20_SUBDEVICE_0, self.root, self.device, None).hObjectNew
  380. self.usermode = rm_alloc(self.fd_ctl, nv_gpu.TURING_USERMODE_A, self.root, self.subdevice, None).hObjectNew
  381. self.gpu_mmio = to_mv(self._gpu_map_to_cpu(self.usermode, mmio_sz:=0x10000, flags=2), mmio_sz).cast("I")
  382. self._setup_nvclasses()
  383. rmctrl.perf_boost(self.fd_ctl, self.root, self.subdevice, duration=0xffffffff, flags=((nv_gpu.NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_YES << 4) | \
  384. (nv_gpu.NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_PRIORITY_HIGH << 6) | (nv_gpu.NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_TO_MAX << 0)))
  385. vaspace_params = nv_gpu.NV_VASPACE_ALLOCATION_PARAMETERS(vaBase=0x1000, vaSize=0x1fffffb000000,
  386. flags=nv_gpu.NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING | nv_gpu.NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED)
  387. vaspace = rm_alloc(self.fd_ctl, nv_gpu.FERMI_VASPACE_A, self.root, self.device, vaspace_params).hObjectNew
  388. raw_uuid = rmctrl.gpu_get_gid_info(self.fd_ctl, self.root, self.subdevice, flags=nv_gpu.NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY, length=16)
  389. self.gpu_uuid = (ctypes.c_ubyte*16)(*[raw_uuid.data[i] for i in range(16)])
  390. uvm.register_gpu(self.fd_uvm, rmCtrlFd=-1, gpu_uuid=nv_gpu.struct_nv_uuid(uuid=self.gpu_uuid))
  391. uvm.register_gpu_vaspace(self.fd_uvm, gpuUuid=nv_gpu.struct_nv_uuid(uuid=self.gpu_uuid), rmCtrlFd=self.fd_ctl,
  392. hClient=self.root, hVaSpace=vaspace)
  393. for dev in self.devices:
  394. uvm.enable_peer_access(self.fd_uvm, gpuUuidA=nv_gpu.struct_nv_uuid(uuid=self.gpu_uuid), gpuUuidB=nv_gpu.struct_nv_uuid(uuid=dev.gpu_uuid))
  395. if NVDevice.signals_page is None:
  396. NVDevice.signals_page = self._gpu_system_alloc(16 * 65536, map_to_cpu=True)
  397. NVDevice.signals_pool = [to_mv(self.signals_page.va_addr + off, 16).cast("Q") for off in range(0, NVDevice.signals_page.size, 16)]
  398. else: self._gpu_map(NVDevice.signals_page)
  399. channel_params = nv_gpu.NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS(engineType=nv_gpu.NV2080_ENGINE_TYPE_GRAPHICS)
  400. channel_group = rm_alloc(self.fd_ctl, nv_gpu.KEPLER_CHANNEL_GROUP_A, self.root, self.device, channel_params).hObjectNew
  401. gpfifo_area = self._gpu_alloc(0x200000, contig=True, huge_page=True, map_to_cpu=True, map_flags=0x10d0000)
  402. ctxshare_params = nv_gpu.NV_CTXSHARE_ALLOCATION_PARAMETERS(hVASpace=vaspace, flags=nv_gpu.NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC)
  403. ctxshare = rm_alloc(self.fd_ctl, nv_gpu.FERMI_CONTEXT_SHARE_A, self.root, channel_group, ctxshare_params).hObjectNew
  404. self.compute_gpfifo = self._new_gpu_fifo(gpfifo_area, ctxshare, channel_group, offset=0, entries=0x10000)
  405. self.dma_gpfifo = self._new_gpu_fifo(gpfifo_area, ctxshare, channel_group, offset=0x100000, entries=0x10000)
  406. rmctrl.gpfifo_schedule(self.fd_ctl, self.root, channel_group, bEnable=1)
  407. self.cmdq_page: nv_gpu.UVM_MAP_EXTERNAL_ALLOCATION_PARAMS = self._gpu_alloc(0x200000, map_to_cpu=True, huge_page=True)
  408. self.cmdq: memoryview = to_mv(self.cmdq_page.va_addr, 0x200000).cast("I")
  409. self.cmdq_wptr: int = 0 # in bytes
  410. self.kernargs_page: nv_gpu.UVM_MAP_EXTERNAL_ALLOCATION_PARAMS = self._gpu_alloc(0x4000000, map_to_cpu=True)
  411. self.kernargs_ptr: int = self.kernargs_page.va_addr
  412. sm_info = nv_gpu.NV2080_CTRL_GR_INFO(index=nv_gpu.NV2080_CTRL_GR_INFO_INDEX_SM_VERSION)
  413. rmctrl.gr_get_info(self.fd_ctl, self.root, self.subdevice, grInfoListSize=1, grInfoList=ctypes.addressof(sm_info))
  414. self.arch: str = f"sm_{(sm_info.data>>8)&0xff}{(val>>4) if (val:=sm_info.data&0xff) > 0xf else val}"
  415. compiler_t = (PTXCompiler if PTX else CUDACompiler) if MOCKGPU else (NVPTXCompiler if PTX else NVCompiler)
  416. super().__init__(device, NVAllocator(self), PTXRenderer(self.arch, device="NV") if PTX else NVRenderer(self.arch), compiler_t(self.arch),
  417. functools.partial(NVProgram, self), NVComputeQueue, NVCopyQueue, timeline_signals=(self._alloc_signal(), self._alloc_signal()))
  418. self._setup_gpfifos()
  419. NVDevice.devices.append(self)
  420. @classmethod
  421. def _read_signal(self, signal): return signal[0]
  422. @classmethod
  423. def _read_timestamp(self, signal): return signal[1]
  424. @classmethod
  425. def _set_signal(self, signal, value): signal[0] = value
  426. @classmethod
  427. def _alloc_signal(self, value=0, **kwargs) -> memoryview:
  428. self._set_signal(sig := self.signals_pool.pop(), value)
  429. return sig
  430. @classmethod
  431. def _free_signal(self, signal): self.signals_pool.append(signal)
  432. @classmethod
  433. def _wait_signal(self, signal, value=0, timeout=10000):
  434. start_time = time.time() * 1000
  435. while time.time() * 1000 - start_time < timeout:
  436. if signal[0] >= value: return
  437. raise RuntimeError(f"wait_result: {timeout} ms TIMEOUT!")
  438. def _gpu2cpu_time(self, gpu_time, is_copy): return self.cpu_start_time + (gpu_time - self.gpu_start_time) / 1e3
  439. def synchronize(self):
  440. NVDevice._wait_signal(self.timeline_signal, self.timeline_value - 1)
  441. self.cmdq_wptr = 0
  442. if self.timeline_value > (1 << 31): self._wrap_timeline_signal()
  443. if PROFILE: self._prof_process_events()
  444. def _new_gpu_fifo(self, gpfifo_area, ctxshare, channel_group, offset=0, entries=0x400) -> GPFifo:
  445. notifier = self._gpu_system_alloc(48 << 20)
  446. params = nv_gpu.NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS(hObjectError=notifier.hMemory, hObjectBuffer=gpfifo_area.hMemory,
  447. gpFifoOffset=gpfifo_area.va_addr+offset, gpFifoEntries=entries, hContextShare=ctxshare,
  448. hUserdMemory=(ctypes.c_uint32*8)(gpfifo_area.hMemory), userdOffset=(ctypes.c_uint64*8)(entries*8+offset))
  449. gpfifo = rm_alloc(self.fd_ctl, nv_gpu.AMPERE_CHANNEL_GPFIFO_A, self.root, channel_group, params).hObjectNew
  450. rm_alloc(self.fd_ctl, self.compute_class, self.root, gpfifo, None)
  451. rm_alloc(self.fd_ctl, nv_gpu.AMPERE_DMA_COPY_B, self.root, gpfifo, None)
  452. ws_token_params = rmctrl.gpfifo_get_work_submit_token(self.fd_ctl, self.root, gpfifo, workSubmitToken=-1)
  453. assert ws_token_params.workSubmitToken != -1
  454. channel_base = self._alloc_gpu_vaddr(0x4000000)
  455. uvm.register_channel(self.fd_uvm, gpuUuid=nv_gpu.struct_nv_uuid(uuid=self.gpu_uuid), rmCtrlFd=self.fd_ctl, hClient=self.root,
  456. hChannel=gpfifo, base=channel_base, length=0x4000000)
  457. return GPFifo(ring=to_mv(gpfifo_area.va_addr + offset, entries * 8).cast("Q"), entries_count=entries, token=ws_token_params.workSubmitToken,
  458. controls=nv_gpu.AmpereAControlGPFifo.from_address(gpfifo_area.va_addr + offset + entries * 8))
  459. def _setup_gpfifos(self):
  460. # Set windows addresses to not collide with other allocated buffers.
  461. self.shared_mem_window, self.local_mem_window, self.slm_per_thread = 0xfe000000, 0xff000000, 0
  462. NVComputeQueue().setup(compute_class=self.compute_class, local_mem_window=self.local_mem_window, shared_mem_window=self.shared_mem_window) \
  463. .signal(self.timeline_signal, self.timeline_value).submit(self)
  464. NVCopyQueue().wait(self.timeline_signal, self.timeline_value) \
  465. .setup(copy_class=nv_gpu.AMPERE_DMA_COPY_B) \
  466. .signal(self.timeline_signal, self.timeline_value + 1).submit(self)
  467. self.timeline_value += 2
  468. def _ensure_has_local_memory(self, required):
  469. if self.slm_per_thread >= required: return
  470. self.synchronize()
  471. if hasattr(self, 'shader_local_mem'): self._gpu_free(self.shader_local_mem) # type: ignore # pylint: disable=access-member-before-definition
  472. self.slm_per_thread = round_up(required, 32)
  473. bytes_per_warp = round_up(self.slm_per_thread * 32, 0x200)
  474. bytes_per_tpc = round_up(bytes_per_warp * 48 * 2, 0x8000)
  475. self.shader_local_mem = self._gpu_alloc(round_up(bytes_per_tpc * 64, 0x20000), huge_page=True, contig=True)
  476. NVComputeQueue().setup(local_mem=self.shader_local_mem.va_addr, local_mem_tpc_bytes=bytes_per_tpc) \
  477. .signal(self.timeline_signal, self.timeline_value).submit(self)
  478. self.timeline_value += 1