ncnn
ncnn copied to clipboard
im2col_sgemm_packn_fp16sa_rvv函数加速卷积困惑
const int packn = csrr_vlenb() / 2; //8
const size_t vl = vsetvl_e16m1(packn); //8
// Mat bottom_im2col(size, maxk, inch, 2u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 2u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 2u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
#ifdef RVV_SPEC_0_7
asm volatile(
"mv t3, %[LEN] \n\t"
"mv t1, %[SRC] \n\t"
"mv t2, %[TMP] \n\t"
"slli t3, t3, 1 \n\t"
"vle.v v0, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v1, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v2, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v3, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v4, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v5, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v6, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v7, (t1) \n\t"
"add t1, t1, t3 \n\t"
" .v v0, (t2) \n\t"
:
: [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "t1", "t2", "t3");
img0 += size * packn;
tmpptr += packn * 8;
#else
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#endif
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(img0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(img0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(img0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(img0 + packn * 7, vl);
vsseg8e16_v_f16m1(tmpptr, _val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7, vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
#ifdef RVV_SPEC_0_7
asm volatile(
"mv t3, %[LEN] \n\t"
"mv t1, %[SRC] \n\t"
"mv t2, %[TMP] \n\t"
"slli t3, t3, 1 \n\t"
"vle.v v0, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v1, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v2, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v3, (t1) \n\t"
"vsseg4e.v v0, (t2) \n\t"
:
: [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "t1", "t2", "t3");
img0 += size * packn;
tmpptr += packn * 4;
#else
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#endif
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(img0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(img0 + packn * 3, vl);
vsseg4e16_v_f16m1(tmpptr, _val0, _val1, _val2, _val3, vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
#ifdef RVV_SPEC_0_7
asm volatile(
"mv t3, %[LEN] \n\t"
"mv t1, %[SRC] \n\t"
"mv t2, %[TMP] \n\t"
"slli t3, t3, 1 \n\t"
"vle.v v0, (t1) \n\t"
"add t1, t1, t3 \n\t"
"vle.v v1, (t1) \n\t"
"add t1, t1, t3 \n\t"
:
: [LEN] "r"(packn), [SRC] "r"(img0), [TMP] "r"(tmpptr)
: "cc", "memory", "v0", "v1", "t1", "t2", "t3");
img0 += size * packn;
tmpptr += packn * 2;
#else
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#endif
#else
vfloat16m1_t _val0 = vle16_v_f16m1(img0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(img0 + packn, vl);
vsseg2e16_v_f16m1(tmpptr, _val0, _val1, vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat16m1_t _val = vle16_v_f16m1(img0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
nihui,您好! 我在学习RVV加速卷积的代码,在src/layer/riscv/convolution_sgemm_packn_fp16s.h中有如上一段代码,bottom_im2col不是已经im2col展平后的矩阵,按理说应该只有二个维度,为什么还需要
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
另外,tmp的维度是怎么确定的呀,上面的代码是内存重排加速运算吗? 有内存重排相关的资料可以推荐的吗? 感谢!