Add and enable u32x4_extend_to_u64x2_high for aarch64 NEON intrinsics.

This is the high version of extendto. This function accomplishes the
same task as both shuffling and extending done by SSE intrinsics.
This enables the NEON version for buffer indexes to buffer pointer
translation.

Change-Id: I52d7bbf3d76ba69c9acb0e518ff4bc6abf3bbbd4
Signed-off-by: Sirshak Das <sirshak.das@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Yi He <yi.he@arm.com>
Verified-by: Lijian Zhang <lijian.zhang@arm.com>
This commit is contained in:
Sirshak Das
2018-08-22 08:46:52 +08:00
committed by Damjan Marion
parent dbecf18b8a
commit 759226e668
2 changed files with 11 additions and 1 deletions

View File

@ -97,12 +97,16 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
/* shift and add to get vlib_buffer_t pointer */
u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
#elif defined (CLIB_HAVE_VEC128) && defined (__x86_64__)
#elif defined (CLIB_HAVE_VEC128)
u64x2 off = u64x2_splat (buffer_main.buffer_mem_start + offset);
u32x4 bi4 = u32x4_load_unaligned (bi);
u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
#if defined (__aarch64__)
u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
#else
bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
#endif
u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
#else

View File

@ -142,6 +142,12 @@ u32x4_extend_to_u64x2 (u32x4 v)
return vmovl_u32 (vget_low_u32 (v));
}
static_always_inline u64x2
u32x4_extend_to_u64x2_high (u32x4 v)
{
return vmovl_high_u32 (vrev64q_u32 (v));
}
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */