Advertisement
AnthonyCagliano

Untitled

Apr 1st, 2023
51
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.79 KB | None | 0 0
  1. #define ghash_start(buf) memset((buf), 0, 16)
  2.  
  3. void ghash(aes_ctx *ctx, uint8_t *out_buf, const uint8_t *data, size_t len)
  4. {
  5. uint8_t tbuf[AES_BLOCK_SIZE];
  6. size_t data_offset = 0;
  7.  
  8. // the cache allows for incomplete blocks to be queued
  9. // the next call to update will concat the queue and new aad
  10. if(ctx->mode.gcm.aad_cache_len){
  11. size_t cache_len = ctx->mode.gcm.aad_cache_len;
  12. data_offset = MIN(len, AES_BLOCK_SIZE - cache_len);
  13. if(data_offset + cache_len < AES_BLOCK_SIZE){
  14. // if new aad is not enough to fill a block, update queue and stop w/o processing
  15. memcpy(&ctx->mode.gcm.aad_cache[cache_len], data, data_offset);
  16. ctx->mode.gcm.aad_cache_len += data_offset;
  17. return;
  18. }
  19. else {
  20. // if new aad is enough to fill a block, concat queue and rest of block from aad
  21. // then update hash
  22. memcpy(tbuf, ctx->mode.gcm.aad_cache, cache_len);
  23. memcpy(&tbuf[cache_len], data, data_offset);
  24. xor_buf(tbuf, out_buf, AES_BLOCK_SIZE);
  25. aes_gf2_mul(out_buf, out_buf, ctx->mode.gcm.ghash_key);
  26. ctx->mode.gcm.aad_cache_len = 0;
  27. }
  28. }
  29.  
  30. // now process any remaining aad data
  31. for(uint24_t idx = data_offset; idx < len; idx += AES_BLOCK_SIZE){
  32. size_t bytes_copy = MIN(AES_BLOCK_SIZE, len - idx);
  33. if(bytes_copy < AES_BLOCK_SIZE){
  34. // if aad_len < block size, write bytes to queue.
  35. // no return here because this condition should just exit out next loop
  36. memcpy(ctx->mode.gcm.aad_cache, &data[idx], bytes_copy);
  37. ctx->mode.gcm.aad_cache_len = bytes_copy;
  38. }
  39. else {
  40. // if aad_len >= block size, update hash for block
  41. memcpy(tbuf, &data[idx], AES_BLOCK_SIZE);
  42. xor_buf(tbuf, out_buf, AES_BLOCK_SIZE);
  43. aes_gf2_mul(out_buf, out_buf, ctx->mode.gcm.ghash_key);
  44. }
  45. }
  46. }
  47.  
  48. void aes_gcm_prepare_iv(aes_ctx *ctx, const uint8_t *iv, size_t iv_len)
  49. {
  50. uint8_t tbuf[AES_BLOCK_SIZE];
  51. // memset(ctx->iv, 0, AES_BLOCK_SIZE);
  52. // ^^ this should already be zero'd from aes_init
  53.  
  54. if (iv_len == 12) {
  55. /* Prepare block J_0 = IV || 0^31 || 1 [len(IV) = 96] */
  56. // memcpy(ctx->iv, iv, iv_len);
  57. // ^^ this should already be done by aes_init
  58. ctx->iv[AES_BLOCK_SIZE - 1] = 0x01;
  59. } else {
  60. /*
  61. * s = 128 * ceil(len(IV)/128) - len(IV)
  62. * J_0 = GHASH_H(IV || 0^(s+64) || [len(IV)]_64)
  63. */
  64. // hash the IV. Pad to block size
  65. memset(tbuf, 0, AES_BLOCK_SIZE);
  66. memcpy(tbuf, iv, iv_len);
  67. ghash(ctx, ctx->iv, tbuf, sizeof(tbuf));
  68.  
  69. memset(tbuf, 0, AES_BLOCK_SIZE>>1);
  70. bytelen_to_bitlen(iv_len, &tbuf[8]); // outputs in BE
  71. ll_byte_swap(&tbuf[8]);
  72.  
  73. ghash(ctx, ctx->iv, tbuf, sizeof(tbuf));
  74. }
  75. }
  76.  
  77. // flag definition [unused][ 0000 counter len ][ 0000 counter start pos ][ 00 padding mode][ 00 cipher mode ]
  78.  
  79. // Performs the action of generating the keys that will be used in every round of
  80. // encryption. "key" is the user-supplied input key, "w" is the output key schedule,
  81. // "keysize" is the length in bits of "key", must be 128, 192, or 256.
  82. aes_error_t hashlib_AESLoadKey(aes_ctx* ctx, const BYTE key[], size_t keysize, uint8_t* iv, size_t iv_len, uint24_t cipher_flags)
  83. {
  84. int Nb=4,Nr,Nk,idx;
  85. WORD temp,Rcon[]={0x01000000,0x02000000,0x04000000,0x08000000,0x10000000,0x20000000,
  86. 0x40000000,0x80000000,0x1b000000,0x36000000,0x6c000000,0xd8000000,
  87. 0xab000000,0x4d000000,0x9a000000};
  88. uint8_t mode = (cipher_flags & 3);
  89. if(mode>AES_GCM) return AES_INVALID_CIPHERMODE;
  90. if(iv_len>AES_BLOCK_SIZE) return AES_INVALID_ARG;
  91. memset(ctx, 0, sizeof(aes_ctx));
  92. ctx->cipher_mode = mode;
  93. keysize<<=3;
  94. switch (keysize) {
  95. case 128: Nr = 10; Nk = 4; break;
  96. case 192: Nr = 12; Nk = 6; break;
  97. case 256: Nr = 14; Nk = 8; break;
  98. default: return AES_INVALID_ARG;
  99. }
  100.  
  101. memcpy(ctx->iv, iv, iv_len);
  102. memset(&ctx->iv[iv_len], 0, 16-iv_len);
  103. ctx->keysize = keysize;
  104. for (idx=0; idx < Nk; ++idx) {
  105. ctx->round_keys[idx] = ((uint32_t)(key[4 * idx]) << 24) | ((uint32_t)(key[4 * idx + 1]) << 16) |
  106. ((uint32_t)(key[4 * idx + 2]) << 8) | ((uint32_t)(key[4 * idx + 3]));
  107. }
  108.  
  109. for (idx = Nk; idx < Nb * (Nr+1); ++idx) {
  110. temp = ctx->round_keys[idx - 1];
  111. if ((idx % Nk) == 0)
  112. temp = aes_SubWord(KE_ROTWORD(temp)) ^ Rcon[(idx-1)/Nk];
  113. else if (Nk > 6 && (idx % Nk) == 4)
  114. temp = aes_SubWord(temp);
  115. ctx->round_keys[idx] = ctx->round_keys[idx-Nk] ^ temp;
  116. }
  117.  
  118. if(mode == AES_CBC) ctx->mode.cbc.padding_mode = ((uint8_t)(cipher_flags>>2) & FLAGS_GET_LSB2);
  119. if(mode == AES_CTR) {
  120. uint8_t ctr_pos = ((uint8_t)(cipher_flags>>4) & FLAGS_GET_LSB4);
  121. uint8_t ctr_len = ((uint8_t)(cipher_flags>>8) & FLAGS_GET_LSB4);
  122. if((!ctr_len) && (!ctr_pos)) {ctr_pos = 8; ctr_len = 8;}
  123. else if(ctr_len && (!ctr_pos)) ctr_pos = AES_BLOCK_SIZE - ctr_len;
  124. else if(ctr_pos && (!ctr_len)) ctr_len = AES_BLOCK_SIZE - ctr_pos;
  125. if(ctr_len + ctr_pos > 16) return AES_INVALID_ARG;
  126. ctx->mode.ctr.counter_pos_start = ctr_pos;
  127. ctx->mode.ctr.counter_len = ctr_len;
  128. }
  129. if(mode == AES_GCM){
  130.  
  131. // generate ghash key
  132. uint8_t tmp[16] = {0};
  133. aes_encrypt_block(tmp, ctx->mode.gcm.ghash_key, ctx);
  134.  
  135. // sort out IV wonkiness in GCM mode
  136. aes_gcm_prepare_iv(ctx, iv, iv_len);
  137. memset(ctx->mode.gcm.auth_tag, 0, AES_BLOCK_SIZE);
  138. }
  139.  
  140. return AES_OK;
  141. }
  142.  
  143. aes_error_t cryptx_aes_update_aad(aes_ctx* ctx, uint8_t *aad, size_t aad_len){
  144. if(ctx->cipher_mode != AES_GCM) return AES_INVALID_OPERATION;
  145.  
  146. // update the tag for full blocks of aad in input, cache any partial blocks
  147. ghash(ctx, ctx->mode.gcm.auth_tag, aad, aad_len);
  148. ctx->mode.gcm.aad_len += aad_len;
  149. return AES_OK;
  150. }
  151.  
  152. aes_error_t cryptx_aes_digest(aes_ctx* ctx, uint8_t* digest){
  153. if( (ctx == NULL) || (digest == NULL)) return AES_INVALID_ARG;
  154. if(ctx->cipher_mode != AES_GCM) return AES_INVALID_CIPHERMODE;
  155. memcpy(digest, ctx->mode.gcm.auth_digest, AES_BLOCK_SIZE);
  156. return AES_OK;
  157. }
  158.  
  159.  
  160. aes_error_t cryptx_aes_encrypt_and_digest(
  161. aes_ctx *ctx,
  162. uint8_t *plaintxt, size_t plaintxt_len,
  163. uint8_t *aad, size_t aad_len,
  164. uint8_t *ciphertxt, uint8_t *digest
  165. ){
  166.  
  167. if(
  168. (ctx==NULL) ||
  169. (plaintxt==NULL) ||
  170. (plaintxt_len==0) ||
  171. (ciphertxt==NULL) ||
  172. (digest==NULL)) return AES_INVALID_ARG;
  173.  
  174. if(ctx->cipher_mode != AES_GCM) return AES_INVALID_CIPHERMODE;
  175. aes_error_t internal_resp;
  176.  
  177. cryptx_aes_update_aad(ctx, aad, aad_len);
  178. internal_resp = hashlib_AESEncrypt(ctx, plaintxt, plaintxt_len, ciphertxt);
  179. memcpy(digest, ctx->mode.gcm.auth_digest, AES_BLOCK_SIZE);
  180.  
  181. return internal_resp;
  182. }
  183.  
  184.  
  185. #define AES_BLOCKSIZE 16
  186. static const char iso_pad[] = {0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
  187. aes_error_t hashlib_AESEncrypt(aes_ctx* ctx, const BYTE in[], size_t in_len, BYTE out[])
  188. {
  189. BYTE buf[AES_BLOCK_SIZE];
  190. uint8_t* iv = ctx->iv;
  191. //int keysize = key->keysize;
  192. //uint32_t *round_keys = key->round_keys;
  193. int blocks, idx;
  194.  
  195. if(ctx->op_assoc == AES_OP_DECRYPT) return AES_INVALID_OPERATION;
  196. ctx->op_assoc = AES_OP_ENCRYPT;
  197.  
  198. if(in==NULL || out==NULL || ctx==NULL) return AES_INVALID_ARG;
  199. if(in_len == 0) return AES_INVALID_MSG;
  200. blocks = (in_len / AES_BLOCK_SIZE);
  201.  
  202. switch(ctx->cipher_mode){
  203. case AES_CBC:
  204. {
  205. size_t bytes_to_copy, bytes_to_pad;
  206. for(idx = 0; idx <= blocks; idx++){
  207. bytes_to_copy = MIN(AES_BLOCK_SIZE, in_len - (idx * AES_BLOCK_SIZE));
  208. bytes_to_pad = AES_BLOCK_SIZE-bytes_to_copy;
  209. memcpy(buf, &in[idx*AES_BLOCK_SIZE], bytes_to_copy);
  210. if(idx==blocks){
  211. if(ctx->mode.cbc.padding_mode == SCHM_PKCS7) memset(&buf[bytes_to_copy], bytes_to_pad, bytes_to_pad); // applies PKCS7 padding scheme
  212. if(ctx->mode.cbc.padding_mode == SCHM_ISO2) memcpy(&buf[bytes_to_copy], iso_pad, bytes_to_pad); // applies ISO-9797 M2 padding scheme
  213. }
  214. xor_buf(iv, buf, AES_BLOCK_SIZE);
  215. aes_encrypt_block(buf, &out[idx * AES_BLOCK_SIZE], ctx);
  216. memcpy(iv, &out[idx * AES_BLOCK_SIZE], AES_BLOCK_SIZE); // iv needs to update for continued use
  217. }
  218. break;
  219. }
  220. case AES_CTR:
  221. {
  222. size_t bytes_to_copy = ctx->mode.ctr.last_block_stop;
  223. size_t bytes_offset = 0;
  224.  
  225. // xor remaining bytes from last encrypt operation into new plaintext
  226. if(bytes_to_copy%AES_BLOCK_SIZE){
  227. bytes_offset = AES_BLOCK_SIZE - bytes_to_copy;
  228. memcpy(out, in, bytes_offset);
  229. xor_buf(&ctx->mode.ctr.last_block[bytes_to_copy], out, bytes_offset);
  230. blocks = ((in_len - bytes_offset) / AES_BLOCK_SIZE);
  231.  
  232. }
  233. // encrypt message in CTR mode
  234. for(idx = 0; idx <= blocks; idx++){
  235. bytes_to_copy = MIN(AES_BLOCK_SIZE, in_len - (idx * AES_BLOCK_SIZE));
  236. //bytes_to_pad = AES_BLOCK_SIZE-bytes_to_copy;
  237. memcpy(&out[idx*AES_BLOCK_SIZE+bytes_offset], &in[idx*AES_BLOCK_SIZE+bytes_offset], bytes_to_copy);
  238. // memset(&buf[bytes_to_copy], 0, bytes_to_pad); // if bytes_to_copy is less than blocksize, do nothing, since msg is truncated anyway
  239. aes_encrypt_block(iv, buf, ctx);
  240. xor_buf(buf, &out[idx*AES_BLOCK_SIZE+bytes_offset], bytes_to_copy);
  241. increment_iv(iv, ctx->mode.ctr.counter_pos_start, ctx->mode.ctr.counter_len); // increment iv for continued use
  242. if(idx==blocks){
  243. memcpy(ctx->mode.ctr.last_block, buf, AES_BLOCK_SIZE);
  244. ctx->mode.ctr.last_block_stop = bytes_to_copy;
  245. }
  246. }
  247. break;
  248. }
  249. case AES_GCM:
  250. {
  251. #define AES_GCM_NONCE_LEN 12
  252. #define AES_GCM_CTR_LEN 4
  253. uint8_t tbuf[AES_BLOCK_SIZE];
  254. uint8_t c0[AES_BLOCK_SIZE];
  255. uint8_t *tag = ctx->mode.gcm.auth_tag;
  256. size_t bytes_to_copy = ctx->mode.gcm.last_block_stop;
  257. size_t bytes_offset = 0;
  258.  
  259. // save a copy of CTR0
  260. memcpy(c0, iv, AES_BLOCK_SIZE);
  261.  
  262. // pad rest of aad cache with 0's
  263. memset(tbuf, 0, AES_BLOCK_SIZE);
  264. ghash(ctx, tag, tbuf, AES_BLOCK_SIZE - ctx->mode.gcm.aad_cache_len);
  265.  
  266. // xor last bytes of encryption buf w/ new plaintext for new ciphertext
  267. if(bytes_to_copy%AES_BLOCK_SIZE){
  268. bytes_offset = AES_BLOCK_SIZE - bytes_to_copy;
  269. memcpy(out, in, bytes_offset);
  270. xor_buf(&ctx->mode.gcm.last_block[bytes_to_copy], out, bytes_offset);
  271. blocks = ((in_len - bytes_offset) / AES_BLOCK_SIZE);
  272.  
  273. }
  274.  
  275. // start at CTR1
  276. increment_iv(iv, AES_GCM_NONCE_LEN, AES_GCM_CTR_LEN);
  277.  
  278. // encrypt remaining plaintext
  279. for(idx = 0; idx <= blocks; idx++){
  280. bytes_to_copy = MIN(AES_BLOCK_SIZE, in_len - (idx * AES_BLOCK_SIZE));
  281. //bytes_to_pad = AES_BLOCK_SIZE-bytes_to_copy;
  282. memcpy(&out[idx*AES_BLOCK_SIZE+bytes_offset], &in[idx*AES_BLOCK_SIZE+bytes_offset], bytes_to_copy);
  283. // memset(&buf[bytes_to_copy], 0, bytes_to_pad); // if bytes_to_copy is less than blocksize, do nothing, since msg is truncated anyway
  284. aes_encrypt_block(iv, tbuf, ctx);
  285. xor_buf(tbuf, &out[idx*AES_BLOCK_SIZE+bytes_offset], bytes_to_copy);
  286. increment_iv(iv, AES_GCM_NONCE_LEN, AES_GCM_CTR_LEN); // increment iv for continued use
  287. if(idx==blocks){
  288. memcpy(ctx->mode.gcm.last_block, buf, AES_BLOCK_SIZE);
  289. ctx->mode.gcm.last_block_stop = bytes_to_copy;
  290. }
  291. }
  292.  
  293. // authenticate the ciphertext
  294. ghash(ctx, tag, out, in_len);
  295.  
  296. // pad rest of ciphertext cache with 0s
  297. memset(tbuf, 0, AES_BLOCK_SIZE);
  298. ghash(ctx, tag, tbuf, AES_BLOCK_SIZE - ctx->mode.gcm.aad_cache_len);
  299. // at this point, tag should be GHASH(0-padded aad || 0-padded ciphertext)
  300.  
  301. // final tag computed as GHASH( 0-padded aad || 0-padded ciphertext || u64-be-aad-len || u64-be-ciphertext-len)
  302. bytelen_to_bitlen(ctx->mode.gcm.aad_len, tbuf);
  303. ll_byte_swap(tbuf);
  304. bytelen_to_bitlen(in_len, &tbuf[8]);
  305. ll_byte_swap(&tbuf[8]);
  306. ghash(ctx, tag, tbuf, AES_BLOCK_SIZE);
  307.  
  308. // encrypt auth tag with CTR0
  309. aes_encrypt_block(c0, c0, ctx);
  310. memcpy(ctx->mode.gcm.auth_digest, tag, AES_BLOCK_SIZE);
  311. xor_buf(c0, ctx->mode.gcm.auth_digest, AES_BLOCK_SIZE);
  312.  
  313. break;
  314.  
  315. }
  316.  
  317. default: return AES_INVALID_CIPHERMODE;
  318.  
  319. }
  320. //memcpy(iv, iv_buf, sizeof iv_buf);
  321. return AES_OK;
  322. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement