gemm.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. // single: clang -O2 -march=native gemm.c
  2. // multi: clang -O2 -march=native gemm.c -DNTHREADS=32 -lpthread
  3. #define _GNU_SOURCE
  4. // https://en.wikichip.org/wiki/amd/microarchitectures/zen_2
  5. #include <stdint.h>
  6. #include <time.h>
  7. #include <sched.h>
  8. #include <stdio.h>
  9. #include <assert.h>
  10. #include <math.h>
  11. #include <string.h>
  12. #include <immintrin.h>
  13. #include <pthread.h>
  14. #include <unistd.h>
  15. #include <stdatomic.h>
  16. //#define DEBUG
  17. #ifdef DEBUG
  18. #define N 8
  19. #endif
  20. #ifndef N
  21. // NOTE: if you change this you have to rerun gemm.py
  22. #define N 512
  23. #endif
  24. #ifndef NTHREADS
  25. #define NTHREADS 1
  26. #endif
  27. // aligned?
  28. float A[N*N] __attribute__ ((aligned (64)));
  29. float B[N*N] __attribute__ ((aligned (64)));
  30. float C[N*N] __attribute__ ((aligned (64)));
  31. float val[N*N] __attribute__ ((aligned (64)));
  32. __m256 *Am = (__m256*)A;
  33. __m256 *Bm = (__m256*)B;
  34. __m256 *Cm = (__m256*)C;
  35. uint64_t nanos() {
  36. struct timespec start;
  37. clock_gettime(CLOCK_MONOTONIC_RAW, &start);
  38. return (uint64_t)start.tv_sec*1000000000 + (uint64_t)start.tv_nsec;
  39. }
  40. float Bf[N*N] __attribute__ ((aligned (64)));
  41. __m256 *Bfm = (__m256*)Bf;
  42. #define BLOCK 8
  43. #define BLOCK_Y 4
  44. #define BLOCK_X 2
  45. void matmul(int sy, int ey) {
  46. // 136.77 GFLOPS on single core numpy
  47. // 4.9 GHz is max boost for 5950X
  48. // 32 FLOPS/cycle (16 FMAs, aka 2x 8 single wide / 32 byte FMAs)
  49. // theoretical max is 156.8 GFLOPS, we see 150
  50. // multicore theo max = 2508.8 GFLOPS, we see 1501.434299
  51. // Bf = (y/8, k, 8)
  52. for (int y = sy; y < ey; y+=BLOCK_Y) {
  53. for (int x = 0; x < N; x+=BLOCK*BLOCK_X) {
  54. __m256 acc[BLOCK_Y][BLOCK_X] = {};
  55. for (int k = 0; k < N; k++) {
  56. for (int iy = 0; iy < BLOCK_Y; iy++) {
  57. __m256 ta = _mm256_broadcast_ss(&A[(y+iy)*N + k]);
  58. for (int ix = 0; ix < BLOCK_X; ix++) {
  59. acc[iy][ix] = _mm256_fmadd_ps(ta, Bfm[((x+ix*BLOCK)*N + k*8)/8], acc[iy][ix]);
  60. }
  61. }
  62. }
  63. for (int iy = 0; iy < BLOCK_Y; iy++) {
  64. for (int ix = 0; ix < BLOCK_X; ix++) {
  65. Cm[((y+iy)*N + x + ix * BLOCK)/8] = acc[iy][ix];
  66. }
  67. }
  68. }
  69. }
  70. }
  71. pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
  72. atomic_int nready = 0;
  73. atomic_int ndone = 0;
  74. void *matmul_thread(void *n) {
  75. int k = (int)(int64_t)n;
  76. int sy = (N/NTHREADS) * k;
  77. int ey = (N/NTHREADS) * (k+1);
  78. cpu_set_t set;
  79. CPU_ZERO(&set);
  80. CPU_SET(k,&set);
  81. pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
  82. nready++;
  83. // gotta have main lock once to signal start
  84. pthread_mutex_lock(&lock);
  85. pthread_mutex_unlock(&lock);
  86. matmul(sy, ey);
  87. // we done
  88. ndone++;
  89. return NULL;
  90. }
  91. int main() {
  92. printf("hello with %d threads\n", NTHREADS);
  93. #ifdef DEBUG
  94. for (int i = 0; i < N*N; i++) A[i] = i;
  95. for (int i = 0; i < N*N; i++) B[i] = i;
  96. #else
  97. FILE *f = fopen("/tmp/matmul", "rb");
  98. if (f == NULL) {
  99. printf("please pregenerate python /tmp/matmul file\n");
  100. return -1;
  101. }
  102. fread(A, 1, sizeof(float)*N*N, f);
  103. fread(B, 1, sizeof(float)*N*N, f);
  104. fread(val, 1, sizeof(float)*N*N, f);
  105. fclose(f);
  106. #endif
  107. // preswizzle
  108. for (int y = 0; y < N; y+=8) {
  109. for (int x = 0; x < N; x++) {
  110. for (int iy = 0; iy < 8; iy++) {
  111. Bf[y*N + x*8 + iy] = B[(y+iy)*N + x];
  112. }
  113. }
  114. }
  115. for (int i = 0; i < 10; i++) {
  116. memset(C, 0, N*N*sizeof(float));
  117. #if NTHREADS != 1
  118. nready = 0;
  119. ndone = 0;
  120. pthread_mutex_lock(&lock);
  121. pthread_t threads[NTHREADS];
  122. for (int j = 0; j < NTHREADS; j++) {
  123. pthread_create(&threads[j], NULL, matmul_thread, (void *)(uint64_t)j);
  124. }
  125. while (nready != NTHREADS) usleep(1);
  126. #endif
  127. uint64_t start = nanos();
  128. #if NTHREADS == 1
  129. matmul(0, N);
  130. #else
  131. // unlocking mutex starts threads
  132. pthread_mutex_unlock(&lock);
  133. while (ndone != NTHREADS) usleep(1);
  134. #endif
  135. uint64_t end = nanos();
  136. #if NTHREADS != 1
  137. for (int j = 0; j < NTHREADS; j++) {
  138. pthread_join(threads[j], NULL);
  139. }
  140. #endif
  141. double gflop = (2.0*N*N*N)*1e-9;
  142. double s = (end-start)*1e-9;
  143. printf("%f GFLOP/S -- %.2f ms\n", gflop/s, s*1e3);
  144. // hack around throttling
  145. //if (i%4 == 0) sleep(1);
  146. }
  147. #ifdef DEBUG
  148. for (int i = 0; i < N*N; i++) {
  149. if (i%N == 0 && i != 0) printf("\n");
  150. printf("%f ", C[i]);
  151. }
  152. printf("\n");
  153. #else
  154. for (int k = 0; k < N*N; k++) {
  155. if (fabsf(C[k] - val[k]) > 1e-3) {
  156. printf("MISMATCH AT %d, %f != %f\n", k, C[k], val[k]);
  157. return -1;
  158. }
  159. }
  160. printf("match\n");
  161. #endif
  162. return 0;
  163. }