Coverage Report

Created: 2025-12-31 06:27

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/xnnpack/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-sse41-ld64.c
Line
Count
Source
1
// clang-format off
2
// Auto-generated file. Do not edit!
3
//   Template: src/qs8-igemm/MRx4c8-sse.c.in
4
//   Generator: tools/xngen
5
//
6
// Copyright 2020 Google LLC
7
//
8
// This source code is licensed under the BSD-style license found in the
9
// LICENSE file in the root directory of this source tree.
10
11
#include <assert.h>
12
#include <stddef.h>
13
#include <stdint.h>
14
15
#include <smmintrin.h>
16
17
#include "src/xnnpack/common.h"
18
#include "src/xnnpack/igemm.h"
19
#include "src/xnnpack/math.h"
20
#include "src/xnnpack/microparams.h"
21
#include "src/xnnpack/unaligned.h"
22
23
24
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld64(
25
    size_t mr,
26
    size_t nc,
27
    size_t kc,
28
    size_t ks,
29
    const uint8_t** restrict a,
30
    const void* restrict w,
31
    uint8_t* restrict c,
32
    size_t cm_stride,
33
    size_t cn_stride,
34
    size_t a_offset,
35
    const uint8_t* zero,
36
    const union xnn_qu8_conv_minmax_params* restrict params) XNN_OOB_READS
37
0
{
38
0
  assert(mr != 0);
39
0
  assert(mr <= 3);
40
0
  assert(nc != 0);
41
0
  assert(kc != 0);
42
0
  assert(ks != 0);
43
0
  assert(ks % (3 * sizeof(void*)) == 0);
44
0
  assert(a_offset % sizeof(uint8_t) == 0);
45
0
  assert(a != NULL);
46
0
  assert(w != NULL);
47
0
  assert(c != NULL);
48
49
0
  kc = round_up_po2(kc, 8 * sizeof(uint8_t));
50
0
  uint8_t* c0 = c;
51
0
  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
52
0
  if XNN_UNPREDICTABLE(mr < 2) {
53
0
    c1 = c0;
54
0
  }
55
0
  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
56
0
  if XNN_UNPREDICTABLE(mr <= 2) {
57
0
    c2 = c1;
58
0
  }
59
60
0
  const __m128 vscale = _mm_set1_ps(params->fp32_scalar.scale);
61
0
  XNN_FORCE_REALIZATION(vscale);
62
63
0
  const __m128 voutput_max_less_zero_point = _mm_set1_ps((int32_t) params->fp32_scalar.output_max - (int32_t) params->fp32_scalar.output_zero_point);
64
0
  const __m128i voutput_zero_point = _mm_set1_epi16(params->fp32_scalar.output_zero_point);
65
0
  const __m128i voutput_min = _mm_set1_epi8(params->fp32_scalar.output_min);
66
0
  XNN_FORCE_REALIZATION(voutput_max_less_zero_point);
67
0
  XNN_FORCE_REALIZATION(voutput_zero_point);
68
0
  XNN_FORCE_REALIZATION(voutput_min);
69
70
0
  const __m128i vb_zero_point = _mm_set1_epi16(params->fp32_scalar.kernel_zero_point);
71
0
  XNN_FORCE_REALIZATION(vb_zero_point);
72
73
0
  do {
74
0
    __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
75
0
    __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
76
0
    __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
77
0
    __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
78
0
    __m128i vacc1x0 = vacc0x0;
79
0
    __m128i vacc1x1 = vacc0x1;
80
0
    __m128i vacc1x2 = vacc0x2;
81
0
    __m128i vacc1x3 = vacc0x3;
82
0
    __m128i vacc2x0 = vacc0x0;
83
0
    __m128i vacc2x1 = vacc0x1;
84
0
    __m128i vacc2x2 = vacc0x2;
85
0
    __m128i vacc2x3 = vacc0x3;
86
0
    w = (const int32_t*) w + 4;
87
88
0
    size_t p = ks;
89
0
    do {
90
0
      const uint8_t* restrict a0 = a[0];
91
0
      if XNN_UNPREDICTABLE(a0 != zero) {
92
0
        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
93
0
      }
94
0
      const uint8_t* restrict a1 = a[1];
95
0
      if XNN_UNPREDICTABLE(a1 != zero) {
96
0
        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
97
0
      }
98
0
      const uint8_t* restrict a2 = a[2];
99
0
      if XNN_UNPREDICTABLE(a2 != zero) {
100
0
        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
101
0
      }
102
0
      a += 3;
103
104
0
      size_t k = 0;
105
0
      while (k < kc) {
106
0
        const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
107
0
        const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
108
0
        a0 += 8;
109
0
        const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
110
0
        const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
111
0
        a1 += 8;
112
0
        const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
113
0
        const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
114
0
        a2 += 8;
115
116
0
        const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
117
0
        const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
118
119
0
        vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
120
0
        vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
121
0
        vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
122
0
        const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
123
0
        const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
124
125
0
        vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
126
0
        vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
127
0
        vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
128
0
        const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
129
0
        const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
130
131
0
        vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
132
0
        vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
133
0
        vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
134
0
        const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
135
0
        const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
136
137
0
        vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
138
0
        vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
139
0
        vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
140
141
0
        w = (const void*) ((const uint8_t*) w + 32);
142
0
        k += 8 * sizeof(uint8_t);
143
0
      }
144
0
      p -= 3 * sizeof(void*);
145
0
    } while (p != 0);
146
147
0
    const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
148
0
    const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
149
0
    const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
150
0
    const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
151
0
    const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
152
0
    const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
153
154
0
    __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
155
0
    __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
156
0
    __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
157
158
0
    __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
159
0
    __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
160
0
    __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
161
162
0
    vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
163
0
    vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
164
0
    vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
165
166
0
    vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
167
0
    vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
168
0
    vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
169
170
0
    vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
171
0
    vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
172
0
    vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
173
174
0
    __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
175
0
    __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
176
177
0
    __m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
178
179
0
    vout = _mm_max_epu8(vout, voutput_min);
180
181
0
    if (nc >= 4) {
182
0
      unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
183
0
      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
184
0
      unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
185
0
      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
186
0
      unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
187
0
      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
188
189
0
      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
190
191
0
      nc -= 4;
192
0
    } else {
193
0
      if (nc & 2) {
194
0
        unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
195
0
        c2 += 2;
196
0
        unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
197
0
        c1 += 2;
198
0
        unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
199
0
        c0 += 2;
200
0
        vout = _mm_srli_epi32(vout, 16);
201
0
      }
202
0
      if (nc & 1) {
203
0
        *c2 = (uint8_t) _mm_extract_epi8(vout, 8);
204
0
        *c1 = (uint8_t) _mm_extract_epi8(vout, 4);
205
0
        *c0 = (uint8_t) _mm_extract_epi8(vout, 0);
206
0
      }
207
208
0
      nc = 0;
209
0
    }
210
0
  } while (nc != 0);
211
0
}