Skip to content

Instantly share code, notes, and snippets.

@MarekKnapek
Created September 19, 2025 02:41
Show Gist options
  • Select an option

  • Save MarekKnapek/bcf4bd06e44f8e9e447ad3e98ab3402b to your computer and use it in GitHub Desktop.

Select an option

Save MarekKnapek/bcf4bd06e44f8e9e447ad3e98ab3402b to your computer and use it in GitHub Desktop.
SHA-256 x86
#include <assert.h> /* assert */
#include <stdint.h> /* uintptr_t */
#include <string.h> /* memset memcpy */
#include <emmintrin.h> /* SSE2 _mm_add_epi32 _mm_load_si128 _mm_set_epi64x _mm_shuffle_epi32 _mm_store_si128 */
#include <tmmintrin.h> /* SSSE3 _mm_alignr_epi8 _mm_shuffle_epi8 */
#include <smmintrin.h> /* SSE4.1 _mm_blend_epi16 */
#include <immintrin.h> /* SHA _mm_sha256msg1_epu32 _mm_sha256msg2_epu32 _mm_sha256rnds2_epu32 */
#if defined _MSC_VER
#if _MSC_VER >= 1900l
#pragma intrinsic(_mm_add_epi32)
#pragma intrinsic(_mm_alignr_epi8)
#pragma intrinsic(_mm_blend_epi16)
#pragma intrinsic(_mm_blend_epi32)
#pragma intrinsic(_mm_load_si128)
#pragma intrinsic(_mm_set_epi64x)
#pragma intrinsic(_mm_sha256msg1_epu32)
#pragma intrinsic(_mm_sha256msg2_epu32)
#pragma intrinsic(_mm_sha256rnds2_epu32)
#pragma intrinsic(_mm_shuffle_epi32)
#pragma intrinsic(_mm_shuffle_epi8)
#pragma intrinsic(_mm_store_si128)
#endif
#endif
typedef unsigned char u8_t;
typedef unsigned int u32_t;
typedef unsigned long long int u64_t;
void u32_to_u8s_be(u32_t const* const u32, u8_t* const u8s)
{
assert(u32);
assert(u8s);
u8s[0] = ((u8_t)(((*u32) >> (3 * 8)) & 0xff));
u8s[1] = ((u8_t)(((*u32) >> (2 * 8)) & 0xff));
u8s[2] = ((u8_t)(((*u32) >> (1 * 8)) & 0xff));
u8s[3] = ((u8_t)(((*u32) >> (0 * 8)) & 0xff));
}
void u64_to_u8s_be(u64_t const* const u64, u8_t* const u8s)
{
assert(u64);
assert(u8s);
u8s[0] = ((u8_t)(((*u64) >> (7 * 8)) & 0xff));
u8s[1] = ((u8_t)(((*u64) >> (6 * 8)) & 0xff));
u8s[2] = ((u8_t)(((*u64) >> (5 * 8)) & 0xff));
u8s[3] = ((u8_t)(((*u64) >> (4 * 8)) & 0xff));
u8s[4] = ((u8_t)(((*u64) >> (3 * 8)) & 0xff));
u8s[5] = ((u8_t)(((*u64) >> (2 * 8)) & 0xff));
u8s[6] = ((u8_t)(((*u64) >> (1 * 8)) & 0xff));
u8s[7] = ((u8_t)(((*u64) >> (0 * 8)) & 0xff));
}
int mk_min(int const a, int const b)
{
return b < a ? b : a;
}
static u32_t const iv[] =
{
((u32_t)(0x6a09e667ul)),
((u32_t)(0xbb67ae85ul)),
((u32_t)(0x3c6ef372ul)),
((u32_t)(0xa54ff53aul)),
((u32_t)(0x510e527ful)),
((u32_t)(0x9b05688cul)),
((u32_t)(0x1f83d9abul)),
((u32_t)(0x5be0cd19ul)),
};
alignas(16) static u32_t const table[] =
{
((u32_t)(0x428a2f98ul)), ((u32_t)(0x71374491ul)), ((u32_t)(0xb5c0fbcful)), ((u32_t)(0xe9b5dba5ul)),
((u32_t)(0x3956c25bul)), ((u32_t)(0x59f111f1ul)), ((u32_t)(0x923f82a4ul)), ((u32_t)(0xab1c5ed5ul)),
((u32_t)(0xd807aa98ul)), ((u32_t)(0x12835b01ul)), ((u32_t)(0x243185beul)), ((u32_t)(0x550c7dc3ul)),
((u32_t)(0x72be5d74ul)), ((u32_t)(0x80deb1feul)), ((u32_t)(0x9bdc06a7ul)), ((u32_t)(0xc19bf174ul)),
((u32_t)(0xe49b69c1ul)), ((u32_t)(0xefbe4786ul)), ((u32_t)(0x0fc19dc6ul)), ((u32_t)(0x240ca1ccul)),
((u32_t)(0x2de92c6ful)), ((u32_t)(0x4a7484aaul)), ((u32_t)(0x5cb0a9dcul)), ((u32_t)(0x76f988daul)),
((u32_t)(0x983e5152ul)), ((u32_t)(0xa831c66dul)), ((u32_t)(0xb00327c8ul)), ((u32_t)(0xbf597fc7ul)),
((u32_t)(0xc6e00bf3ul)), ((u32_t)(0xd5a79147ul)), ((u32_t)(0x06ca6351ul)), ((u32_t)(0x14292967ul)),
((u32_t)(0x27b70a85ul)), ((u32_t)(0x2e1b2138ul)), ((u32_t)(0x4d2c6dfcul)), ((u32_t)(0x53380d13ul)),
((u32_t)(0x650a7354ul)), ((u32_t)(0x766a0abbul)), ((u32_t)(0x81c2c92eul)), ((u32_t)(0x92722c85ul)),
((u32_t)(0xa2bfe8a1ul)), ((u32_t)(0xa81a664bul)), ((u32_t)(0xc24b8b70ul)), ((u32_t)(0xc76c51a3ul)),
((u32_t)(0xd192e819ul)), ((u32_t)(0xd6990624ul)), ((u32_t)(0xf40e3585ul)), ((u32_t)(0x106aa070ul)),
((u32_t)(0x19a4c116ul)), ((u32_t)(0x1e376c08ul)), ((u32_t)(0x2748774cul)), ((u32_t)(0x34b0bcb5ul)),
((u32_t)(0x391c0cb3ul)), ((u32_t)(0x4ed8aa4aul)), ((u32_t)(0x5b9cca4ful)), ((u32_t)(0x682e6ff3ul)),
((u32_t)(0x748f82eeul)), ((u32_t)(0x78a5636ful)), ((u32_t)(0x84c87814ul)), ((u32_t)(0x8cc70208ul)),
((u32_t)(0x90befffaul)), ((u32_t)(0xa4506cebul)), ((u32_t)(0xbef9a3f7ul)), ((u32_t)(0xc67178f2ul)),
};
struct sha2_block_s
{
alignas(16) u32_t m_state[8];
u64_t m_blocks;
};
typedef struct sha2_block_s sha2_block_t;
void sha2_block_init(sha2_block_t* const sha2)
{
int n;
int i;
assert(sha2);
assert((((uintptr_t)(&sha2->m_state[0])) & 0xf) == 0);
n = 8;
for(i = 0; i != n; ++i)
{
sha2->m_state[i] = iv[i];
}
sha2->m_blocks = 0;
}
void sha2_block_append_block(sha2_block_t* const sha2, u8_t const* const block)
{
#define x86_shuffle_epi32(a, b, c, d) ((((a) & 0x3) << (3 * 2)) | (((b) & 0x3) << (2 * 2)) | (((c) & 0x3) << (1 * 2)) | (((d) & 0x3) << (0 * 2)))
#define x86_blend_epi32(a, b, c, d) ((((a) & 0x1) << (3 * 1)) | (((b) & 0x1) << (2 * 1)) | (((c) & 0x1) << (1 * 1)) | (((d) & 0x1) << (0 * 1)))
#define x86_alignr_epi8(a) (((a) & 0x3) * 4)
#define x86_blend_epi16(a, b, c, d, e, f, g, h) ((((a) & 0x1) << 7) | (((b) & 0x1) << 6) | (((c) & 0x1) << 5) | (((d) & 0x1) << 4) | (((e) & 0x1) << 3) | (((f) & 0x1) << 2) | (((g) & 0x1) << 1) | (((h) & 0x1) << 0))
#define mk_mm_blend_epi32(a, b, c) _mm_blend_epi16((a), (b), x86_blend_epi16((((c) >> 3) & 0x1), (((c) >> 3) & 0x1), (((c) >> 2) & 0x1), (((c) >> 2) & 0x1), (((c) >> 1) & 0x1), (((c) >> 1) & 0x1), (((c) >> 0) & 0x1), (((c) >> 0) & 0x1)))
u8_t const* ptr;
__m128i reverse;
__m128i state_0;
__m128i state_1;
__m128i tmp;
__m128i old_0;
__m128i old_1;
__m128i msg_0;
__m128i msg;
__m128i msg_1;
__m128i msg_2;
__m128i msg_3;
assert(sha2);
assert(block);
assert((((uintptr_t)(&sha2->m_state[0])) & 0xf) == 0);
assert((((uintptr_t)(block)) & 0xf) == 0);
assert((((uintptr_t)(&table[0])) & 0xf) == 0);
ptr = block;
sha2->m_blocks += 1;
reverse = _mm_set_epi64x(0x0c0d0e0f08090a0bull, 0x0405060700010203ull);
state_0 = _mm_load_si128(((__m128i const*)(&sha2->m_state[0])));
state_1 = _mm_load_si128(((__m128i const*)(&sha2->m_state[4])));
tmp = _mm_shuffle_epi32(state_0, x86_shuffle_epi32(0x2, 0x3, 0x0, 0x1));
state_1 = _mm_shuffle_epi32(state_1, x86_shuffle_epi32(0x0, 0x1, 0x2, 0x3));
state_0 = _mm_alignr_epi8(tmp, state_1, x86_alignr_epi8(2));
state_1 = mk_mm_blend_epi32(state_1, tmp, x86_blend_epi32(0x1, 0x1, 0x0, 0x0));
old_0 = state_0;
old_1 = state_1;
msg_0 = _mm_load_si128(((__m128i const*)(&ptr[0 * 16])));
msg_0 = _mm_shuffle_epi8(msg_0, reverse);
tmp = _mm_load_si128(((__m128i const*)(&table[0 * 4])));
msg = _mm_add_epi32(msg_0, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_1 = _mm_load_si128(((__m128i const*)(&ptr[1 * 16])));
msg_1 = _mm_shuffle_epi8(msg_1, reverse);
tmp = _mm_load_si128(((__m128i const*)(&table[1 * 4])));
msg = _mm_add_epi32(msg_1, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_2 = _mm_load_si128(((__m128i const*)(&ptr[2 * 16])));
msg_2 = _mm_shuffle_epi8(msg_2, reverse);
tmp = _mm_load_si128(((__m128i const*)(&table[2 * 4])));
msg = _mm_add_epi32(msg_2, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_3 = _mm_load_si128(((__m128i const*)(&ptr[3 * 16])));
msg_3 = _mm_shuffle_epi8(msg_3, reverse);
tmp = _mm_load_si128(((__m128i const*)(&table[3 * 4])));
msg = _mm_add_epi32(msg_3, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_0 = _mm_sha256msg1_epu32(msg_0, msg_1);
tmp = _mm_alignr_epi8(msg_3, msg_2, x86_alignr_epi8(1));
msg_0 = _mm_add_epi32(msg_0, tmp);
msg_0 = _mm_sha256msg2_epu32(msg_0, msg_3);
tmp = _mm_load_si128(((__m128i const*)(&table[4 * 4])));
msg = _mm_add_epi32(msg_0, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_1 = _mm_sha256msg1_epu32(msg_1, msg_2);
tmp = _mm_alignr_epi8(msg_0, msg_3, x86_alignr_epi8(1));
msg_1 = _mm_add_epi32(msg_1, tmp);
msg_1 = _mm_sha256msg2_epu32(msg_1, msg_0);
tmp = _mm_load_si128(((__m128i const*)(&table[5 * 4])));
msg = _mm_add_epi32(msg_1, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_2 = _mm_sha256msg1_epu32(msg_2, msg_3);
tmp = _mm_alignr_epi8(msg_1, msg_0, x86_alignr_epi8(1));
msg_2 = _mm_add_epi32(msg_2, tmp);
msg_2 = _mm_sha256msg2_epu32(msg_2, msg_1);
tmp = _mm_load_si128(((__m128i const*)(&table[6 * 4])));
msg = _mm_add_epi32(msg_2, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_3 = _mm_sha256msg1_epu32(msg_3, msg_0);
tmp = _mm_alignr_epi8(msg_2, msg_1, x86_alignr_epi8(1));
msg_3 = _mm_add_epi32(msg_3, tmp);
msg_3 = _mm_sha256msg2_epu32(msg_3, msg_2);
tmp = _mm_load_si128(((__m128i const*)(&table[7 * 4])));
msg = _mm_add_epi32(msg_3, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_0 = _mm_sha256msg1_epu32(msg_0, msg_1);
tmp = _mm_alignr_epi8(msg_3, msg_2, x86_alignr_epi8(1));
msg_0 = _mm_add_epi32(msg_0, tmp);
msg_0 = _mm_sha256msg2_epu32(msg_0, msg_3);
tmp = _mm_load_si128(((__m128i const*)(&table[8 * 4])));
msg = _mm_add_epi32(msg_0, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_1 = _mm_sha256msg1_epu32(msg_1, msg_2);
tmp = _mm_alignr_epi8(msg_0, msg_3, x86_alignr_epi8(1));
msg_1 = _mm_add_epi32(msg_1, tmp);
msg_1 = _mm_sha256msg2_epu32(msg_1, msg_0);
tmp = _mm_load_si128(((__m128i const*)(&table[9 * 4])));
msg = _mm_add_epi32(msg_1, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_2 = _mm_sha256msg1_epu32(msg_2, msg_3);
tmp = _mm_alignr_epi8(msg_1, msg_0, x86_alignr_epi8(1));
msg_2 = _mm_add_epi32(msg_2, tmp);
msg_2 = _mm_sha256msg2_epu32(msg_2, msg_1);
tmp = _mm_load_si128(((__m128i const*)(&table[10 * 4])));
msg = _mm_add_epi32(msg_2, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_3 = _mm_sha256msg1_epu32(msg_3, msg_0);
tmp = _mm_alignr_epi8(msg_2, msg_1, x86_alignr_epi8(1));
msg_3 = _mm_add_epi32(msg_3, tmp);
msg_3 = _mm_sha256msg2_epu32(msg_3, msg_2);
tmp = _mm_load_si128(((__m128i const*)(&table[11 * 4])));
msg = _mm_add_epi32(msg_3, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_0 = _mm_sha256msg1_epu32(msg_0, msg_1);
tmp = _mm_alignr_epi8(msg_3, msg_2, x86_alignr_epi8(1));
msg_0 = _mm_add_epi32(msg_0, tmp);
msg_0 = _mm_sha256msg2_epu32(msg_0, msg_3);
tmp = _mm_load_si128(((__m128i const*)(&table[12 * 4])));
msg = _mm_add_epi32(msg_0, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_1 = _mm_sha256msg1_epu32(msg_1, msg_2);
tmp = _mm_alignr_epi8(msg_0, msg_3, x86_alignr_epi8(1));
msg_1 = _mm_add_epi32(msg_1, tmp);
msg_1 = _mm_sha256msg2_epu32(msg_1, msg_0);
tmp = _mm_load_si128(((__m128i const*)(&table[13 * 4])));
msg = _mm_add_epi32(msg_1, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_2 = _mm_sha256msg1_epu32(msg_2, msg_3);
tmp = _mm_alignr_epi8(msg_1, msg_0, x86_alignr_epi8(1));
msg_2 = _mm_add_epi32(msg_2, tmp);
msg_2 = _mm_sha256msg2_epu32(msg_2, msg_1);
tmp = _mm_load_si128(((__m128i const*)(&table[14 * 4])));
msg = _mm_add_epi32(msg_2, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
msg_3 = _mm_sha256msg1_epu32(msg_3, msg_0);
tmp = _mm_alignr_epi8(msg_2, msg_1, x86_alignr_epi8(1));
msg_3 = _mm_add_epi32(msg_3, tmp);
msg_3 = _mm_sha256msg2_epu32(msg_3, msg_2);
tmp = _mm_load_si128(((__m128i const*)(&table[15 * 4])));
msg = _mm_add_epi32(msg_3, tmp);
state_1 = _mm_sha256rnds2_epu32(state_1, state_0, msg);
msg = _mm_shuffle_epi32(msg, x86_shuffle_epi32(0x1, 0x0, 0x3, 0x2));
state_0 = _mm_sha256rnds2_epu32(state_0, state_1, msg);
state_0 = _mm_add_epi32(state_0, old_0);
state_1 = _mm_add_epi32(state_1, old_1);
tmp = _mm_shuffle_epi32(state_0, x86_shuffle_epi32(0x0, 0x1, 0x2, 0x3));
state_1 = _mm_shuffle_epi32(state_1, x86_shuffle_epi32(0x2, 0x3, 0x0, 0x1));
state_0 = mk_mm_blend_epi32(tmp, state_1, x86_blend_epi32(0x1, 0x1, 0x0, 0x0));
state_1 = _mm_alignr_epi8(state_1, tmp, x86_alignr_epi8(2));
_mm_store_si128(((__m128i*)(&sha2->m_state[0])), state_0);
_mm_store_si128(((__m128i*)(&sha2->m_state[4])), state_1);
}
void sha2_block_finish(sha2_block_t* const sha2, u8_t* const partial_block, int const len, u8_t* const digest)
{
u64_t bytes;
u64_t bits;
int rem;
u8_t* beg;
int n;
int i;
assert(sha2);
assert(partial_block || len == 0);
assert(len >= 0);
assert(len < 64);
assert(digest);
assert((((uintptr_t)(&sha2->m_state[0])) & 0xf) == 0);
assert((((uintptr_t)(partial_block)) & 0xf) == 0);
bytes = (sha2->m_blocks << 6) + len;
bits = bytes << 3;
partial_block[len] = 0x80;
rem = 64 - len - 1;
if(rem >= 8)
{
beg = &partial_block[len + 1]; n = rem - 8; memset(beg, 0x00, n);
}
else
{
beg = &partial_block[len + 1]; n = rem; memset(beg, 0x00, n);
sha2_block_append_block(sha2, partial_block);
beg = &partial_block[0]; n = 64 - 8; memset(beg, 0x00, n);
}
u64_to_u8s_be(&bits, &partial_block[64 - 8]);
sha2_block_append_block(sha2, partial_block);
n = 8;
for(i = 0; i != n; ++i)
{
u32_to_u8s_be(&sha2->m_state[i], &digest[i * 4]);
}
}
struct sha2_stream_s
{
sha2_block_t m_base;
int m_len;
alignas(16) u8_t m_block[64];
};
typedef struct sha2_stream_s sha2_stream_t;
void sha2_stream_init(sha2_stream_t* const sha2)
{
sha2_block_init(&sha2->m_base);
sha2->m_len = 0;
}
void sha2_stream_append_bytes(sha2_stream_t* const sha2, u8_t const* const data_ptr, int const data_len)
{
u8_t const* ptr;
int rem;
int space;
int to_copy;
assert(sha2);
assert(data_ptr || data_len == 0);
assert(data_len >= 0);
ptr = data_ptr;
rem = data_len;
while(rem != 0)
{
space = 64 - sha2->m_len;
to_copy = mk_min(rem, space);
memcpy(&sha2->m_block[sha2->m_len], ptr, to_copy);
ptr += to_copy;
rem -= to_copy;
sha2->m_len += to_copy;
if(sha2->m_len == 64)
{
sha2->m_len = 0;
sha2_block_append_block(&sha2->m_base, &sha2->m_block[0]);
}
}
}
void sha2_stream_finish(sha2_stream_t* const sha2, u8_t* const digest)
{
assert(sha2);
assert(digest);
sha2_block_finish(&sha2->m_base, &sha2->m_block[0], sha2->m_len, digest);
}
int main(void)
{
sha2_stream_t sha2;
u8_t digest[32];
sha2_stream_init(&sha2);
sha2_stream_append_bytes(&sha2, ((u8_t const*)("1234567890")), 10);
sha2_stream_append_bytes(&sha2, ((u8_t const*)("1234567890")), 10);
sha2_stream_append_bytes(&sha2, ((u8_t const*)("1234567890")), 10);
sha2_stream_append_bytes(&sha2, ((u8_t const*)("1234567890")), 10);
sha2_stream_append_bytes(&sha2, ((u8_t const*)("1234567890123456789012345678901234567890")), 40);
sha2_stream_finish(&sha2, &digest[0]);
assert(digest[ 0] == 0xf3);
assert(digest[ 1] == 0x71);
assert(digest[ 2] == 0xbc);
assert(digest[ 3] == 0x4a);
assert(digest[ 4] == 0x31);
assert(digest[ 5] == 0x1f);
assert(digest[ 6] == 0x2b);
assert(digest[ 7] == 0x00);
assert(digest[ 8] == 0x9e);
assert(digest[ 9] == 0xef);
assert(digest[10] == 0x95);
assert(digest[11] == 0x2d);
assert(digest[12] == 0xd8);
assert(digest[13] == 0x3c);
assert(digest[14] == 0xa8);
assert(digest[15] == 0x0e);
assert(digest[16] == 0x2b);
assert(digest[17] == 0x60);
assert(digest[18] == 0x02);
assert(digest[19] == 0x6c);
assert(digest[20] == 0x8e);
assert(digest[21] == 0x93);
assert(digest[22] == 0x55);
assert(digest[23] == 0x92);
assert(digest[24] == 0xd0);
assert(digest[25] == 0xf9);
assert(digest[26] == 0xc3);
assert(digest[27] == 0x08);
assert(digest[28] == 0x45);
assert(digest[29] == 0x3c);
assert(digest[30] == 0x81);
assert(digest[31] == 0x3e);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment