00001
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037 #ifndef POLARSSL_BN_MUL_H
00038 #define POLARSSL_BN_MUL_H
00039
00040 #include "polarssl/config.h"
00041
00042 #if defined(POLARSSL_HAVE_ASM)
00043
00044 #if defined(__GNUC__)
00045 #if defined(__i386__)
00046
00047 #define MULADDC_INIT \
00048 asm( " \
00049 movl %%ebx, %0; \
00050 movl %5, %%esi; \
00051 movl %6, %%edi; \
00052 movl %7, %%ecx; \
00053 movl %8, %%ebx; \
00054 "
00055
00056 #define MULADDC_CORE \
00057 " \
00058 lodsl; \
00059 mull %%ebx; \
00060 addl %%ecx, %%eax; \
00061 adcl $0, %%edx; \
00062 addl (%%edi), %%eax; \
00063 adcl $0, %%edx; \
00064 movl %%edx, %%ecx; \
00065 stosl; \
00066 "
00067
00068 #if defined(POLARSSL_HAVE_SSE2)
00069
00070 #define MULADDC_HUIT \
00071 " \
00072 movd %%ecx, %%mm1; \
00073 movd %%ebx, %%mm0; \
00074 movd (%%edi), %%mm3; \
00075 paddq %%mm3, %%mm1; \
00076 movd (%%esi), %%mm2; \
00077 pmuludq %%mm0, %%mm2; \
00078 movd 4(%%esi), %%mm4; \
00079 pmuludq %%mm0, %%mm4; \
00080 movd 8(%%esi), %%mm6; \
00081 pmuludq %%mm0, %%mm6; \
00082 movd 12(%%esi), %%mm7; \
00083 pmuludq %%mm0, %%mm7; \
00084 paddq %%mm2, %%mm1; \
00085 movd 4(%%edi), %%mm3; \
00086 paddq %%mm4, %%mm3; \
00087 movd 8(%%edi), %%mm5; \
00088 paddq %%mm6, %%mm5; \
00089 movd 12(%%edi), %%mm4; \
00090 paddq %%mm4, %%mm7; \
00091 movd %%mm1, (%%edi); \
00092 movd 16(%%esi), %%mm2; \
00093 pmuludq %%mm0, %%mm2; \
00094 psrlq $32, %%mm1; \
00095 movd 20(%%esi), %%mm4; \
00096 pmuludq %%mm0, %%mm4; \
00097 paddq %%mm3, %%mm1; \
00098 movd 24(%%esi), %%mm6; \
00099 pmuludq %%mm0, %%mm6; \
00100 movd %%mm1, 4(%%edi); \
00101 psrlq $32, %%mm1; \
00102 movd 28(%%esi), %%mm3; \
00103 pmuludq %%mm0, %%mm3; \
00104 paddq %%mm5, %%mm1; \
00105 movd 16(%%edi), %%mm5; \
00106 paddq %%mm5, %%mm2; \
00107 movd %%mm1, 8(%%edi); \
00108 psrlq $32, %%mm1; \
00109 paddq %%mm7, %%mm1; \
00110 movd 20(%%edi), %%mm5; \
00111 paddq %%mm5, %%mm4; \
00112 movd %%mm1, 12(%%edi); \
00113 psrlq $32, %%mm1; \
00114 paddq %%mm2, %%mm1; \
00115 movd 24(%%edi), %%mm5; \
00116 paddq %%mm5, %%mm6; \
00117 movd %%mm1, 16(%%edi); \
00118 psrlq $32, %%mm1; \
00119 paddq %%mm4, %%mm1; \
00120 movd 28(%%edi), %%mm5; \
00121 paddq %%mm5, %%mm3; \
00122 movd %%mm1, 20(%%edi); \
00123 psrlq $32, %%mm1; \
00124 paddq %%mm6, %%mm1; \
00125 movd %%mm1, 24(%%edi); \
00126 psrlq $32, %%mm1; \
00127 paddq %%mm3, %%mm1; \
00128 movd %%mm1, 28(%%edi); \
00129 addl $32, %%edi; \
00130 addl $32, %%esi; \
00131 psrlq $32, %%mm1; \
00132 movd %%mm1, %%ecx; \
00133 "
00134
00135 #define MULADDC_STOP \
00136 " \
00137 emms; \
00138 movl %4, %%ebx; \
00139 movl %%ecx, %1; \
00140 movl %%edi, %2; \
00141 movl %%esi, %3; \
00142 " \
00143 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
00144 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
00145 : "eax", "ecx", "edx", "esi", "edi" \
00146 );
00147
00148 #else
00149
00150 #define MULADDC_STOP \
00151 " \
00152 movl %4, %%ebx; \
00153 movl %%ecx, %1; \
00154 movl %%edi, %2; \
00155 movl %%esi, %3; \
00156 " \
00157 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
00158 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
00159 : "eax", "ecx", "edx", "esi", "edi" \
00160 );
00161 #endif
00162 #endif
00163
00164 #if defined(__amd64__) || defined (__x86_64__)
00165
00166 #define MULADDC_INIT \
00167 asm( "movq %0, %%rsi " :: "m" (s)); \
00168 asm( "movq %0, %%rdi " :: "m" (d)); \
00169 asm( "movq %0, %%rcx " :: "m" (c)); \
00170 asm( "movq %0, %%rbx " :: "m" (b)); \
00171 asm( "xorq %r8, %r8 " );
00172
00173 #define MULADDC_CORE \
00174 asm( "movq (%rsi),%rax " ); \
00175 asm( "mulq %rbx " ); \
00176 asm( "addq $8, %rsi " ); \
00177 asm( "addq %rcx, %rax " ); \
00178 asm( "movq %r8, %rcx " ); \
00179 asm( "adcq $0, %rdx " ); \
00180 asm( "nop " ); \
00181 asm( "addq %rax, (%rdi) " ); \
00182 asm( "adcq %rdx, %rcx " ); \
00183 asm( "addq $8, %rdi " );
00184
00185 #define MULADDC_STOP \
00186 asm( "movq %%rcx, %0 " : "=m" (c)); \
00187 asm( "movq %%rdi, %0 " : "=m" (d)); \
00188 asm( "movq %%rsi, %0 " : "=m" (s) :: \
00189 "rax", "rcx", "rdx", "rbx", "rsi", "rdi", "r8" );
00190
00191 #endif
00192
00193 #if defined(__mc68020__) || defined(__mcpu32__)
00194
00195 #define MULADDC_INIT \
00196 asm( "movl %0, %%a2 " :: "m" (s)); \
00197 asm( "movl %0, %%a3 " :: "m" (d)); \
00198 asm( "movl %0, %%d3 " :: "m" (c)); \
00199 asm( "movl %0, %%d2 " :: "m" (b)); \
00200 asm( "moveq #0, %d0 " );
00201
00202 #define MULADDC_CORE \
00203 asm( "movel %a2@+, %d1 " ); \
00204 asm( "mulul %d2, %d4:%d1 " ); \
00205 asm( "addl %d3, %d1 " ); \
00206 asm( "addxl %d0, %d4 " ); \
00207 asm( "moveq #0, %d3 " ); \
00208 asm( "addl %d1, %a3@+ " ); \
00209 asm( "addxl %d4, %d3 " );
00210
00211 #define MULADDC_STOP \
00212 asm( "movl %%d3, %0 " : "=m" (c)); \
00213 asm( "movl %%a3, %0 " : "=m" (d)); \
00214 asm( "movl %%a2, %0 " : "=m" (s) :: \
00215 "d0", "d1", "d2", "d3", "d4", "a2", "a3" );
00216
00217 #define MULADDC_HUIT \
00218 asm( "movel %a2@+, %d1 " ); \
00219 asm( "mulul %d2, %d4:%d1 " ); \
00220 asm( "addxl %d3, %d1 " ); \
00221 asm( "addxl %d0, %d4 " ); \
00222 asm( "addl %d1, %a3@+ " ); \
00223 asm( "movel %a2@+, %d1 " ); \
00224 asm( "mulul %d2, %d3:%d1 " ); \
00225 asm( "addxl %d4, %d1 " ); \
00226 asm( "addxl %d0, %d3 " ); \
00227 asm( "addl %d1, %a3@+ " ); \
00228 asm( "movel %a2@+, %d1 " ); \
00229 asm( "mulul %d2, %d4:%d1 " ); \
00230 asm( "addxl %d3, %d1 " ); \
00231 asm( "addxl %d0, %d4 " ); \
00232 asm( "addl %d1, %a3@+ " ); \
00233 asm( "movel %a2@+, %d1 " ); \
00234 asm( "mulul %d2, %d3:%d1 " ); \
00235 asm( "addxl %d4, %d1 " ); \
00236 asm( "addxl %d0, %d3 " ); \
00237 asm( "addl %d1, %a3@+ " ); \
00238 asm( "movel %a2@+, %d1 " ); \
00239 asm( "mulul %d2, %d4:%d1 " ); \
00240 asm( "addxl %d3, %d1 " ); \
00241 asm( "addxl %d0, %d4 " ); \
00242 asm( "addl %d1, %a3@+ " ); \
00243 asm( "movel %a2@+, %d1 " ); \
00244 asm( "mulul %d2, %d3:%d1 " ); \
00245 asm( "addxl %d4, %d1 " ); \
00246 asm( "addxl %d0, %d3 " ); \
00247 asm( "addl %d1, %a3@+ " ); \
00248 asm( "movel %a2@+, %d1 " ); \
00249 asm( "mulul %d2, %d4:%d1 " ); \
00250 asm( "addxl %d3, %d1 " ); \
00251 asm( "addxl %d0, %d4 " ); \
00252 asm( "addl %d1, %a3@+ " ); \
00253 asm( "movel %a2@+, %d1 " ); \
00254 asm( "mulul %d2, %d3:%d1 " ); \
00255 asm( "addxl %d4, %d1 " ); \
00256 asm( "addxl %d0, %d3 " ); \
00257 asm( "addl %d1, %a3@+ " ); \
00258 asm( "addxl %d0, %d3 " );
00259
00260 #endif
00261
00262 #if defined(__powerpc__) || defined(__ppc__)
00263 #if defined(__powerpc64__) || defined(__ppc64__)
00264
00265 #if defined(__MACH__) && defined(__APPLE__)
00266
00267 #define MULADDC_INIT \
00268 asm( "ld r3, %0 " :: "m" (s)); \
00269 asm( "ld r4, %0 " :: "m" (d)); \
00270 asm( "ld r5, %0 " :: "m" (c)); \
00271 asm( "ld r6, %0 " :: "m" (b)); \
00272 asm( "addi r3, r3, -8 " ); \
00273 asm( "addi r4, r4, -8 " ); \
00274 asm( "addic r5, r5, 0 " );
00275
00276 #define MULADDC_CORE \
00277 asm( "ldu r7, 8(r3) " ); \
00278 asm( "mulld r8, r7, r6 " ); \
00279 asm( "mulhdu r9, r7, r6 " ); \
00280 asm( "adde r8, r8, r5 " ); \
00281 asm( "ld r7, 8(r4) " ); \
00282 asm( "addze r5, r9 " ); \
00283 asm( "addc r8, r8, r7 " ); \
00284 asm( "stdu r8, 8(r4) " );
00285
00286 #define MULADDC_STOP \
00287 asm( "addze r5, r5 " ); \
00288 asm( "addi r4, r4, 8 " ); \
00289 asm( "addi r3, r3, 8 " ); \
00290 asm( "std r5, %0 " : "=m" (c)); \
00291 asm( "std r4, %0 " : "=m" (d)); \
00292 asm( "std r3, %0 " : "=m" (s) :: \
00293 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00294
00295 #else
00296
00297 #define MULADDC_INIT \
00298 asm( "ld %%r3, %0 " :: "m" (s)); \
00299 asm( "ld %%r4, %0 " :: "m" (d)); \
00300 asm( "ld %%r5, %0 " :: "m" (c)); \
00301 asm( "ld %%r6, %0 " :: "m" (b)); \
00302 asm( "addi %r3, %r3, -8 " ); \
00303 asm( "addi %r4, %r4, -8 " ); \
00304 asm( "addic %r5, %r5, 0 " );
00305
00306 #define MULADDC_CORE \
00307 asm( "ldu %r7, 8(%r3) " ); \
00308 asm( "mulld %r8, %r7, %r6 " ); \
00309 asm( "mulhdu %r9, %r7, %r6 " ); \
00310 asm( "adde %r8, %r8, %r5 " ); \
00311 asm( "ld %r7, 8(%r4) " ); \
00312 asm( "addze %r5, %r9 " ); \
00313 asm( "addc %r8, %r8, %r7 " ); \
00314 asm( "stdu %r8, 8(%r4) " );
00315
00316 #define MULADDC_STOP \
00317 asm( "addze %r5, %r5 " ); \
00318 asm( "addi %r4, %r4, 8 " ); \
00319 asm( "addi %r3, %r3, 8 " ); \
00320 asm( "std %%r5, %0 " : "=m" (c)); \
00321 asm( "std %%r4, %0 " : "=m" (d)); \
00322 asm( "std %%r3, %0 " : "=m" (s) :: \
00323 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00324
00325 #endif
00326
00327 #else
00328
00329 #if defined(__MACH__) && defined(__APPLE__)
00330
00331 #define MULADDC_INIT \
00332 asm( "lwz r3, %0 " :: "m" (s)); \
00333 asm( "lwz r4, %0 " :: "m" (d)); \
00334 asm( "lwz r5, %0 " :: "m" (c)); \
00335 asm( "lwz r6, %0 " :: "m" (b)); \
00336 asm( "addi r3, r3, -4 " ); \
00337 asm( "addi r4, r4, -4 " ); \
00338 asm( "addic r5, r5, 0 " );
00339
00340 #define MULADDC_CORE \
00341 asm( "lwzu r7, 4(r3) " ); \
00342 asm( "mullw r8, r7, r6 " ); \
00343 asm( "mulhwu r9, r7, r6 " ); \
00344 asm( "adde r8, r8, r5 " ); \
00345 asm( "lwz r7, 4(r4) " ); \
00346 asm( "addze r5, r9 " ); \
00347 asm( "addc r8, r8, r7 " ); \
00348 asm( "stwu r8, 4(r4) " );
00349
00350 #define MULADDC_STOP \
00351 asm( "addze r5, r5 " ); \
00352 asm( "addi r4, r4, 4 " ); \
00353 asm( "addi r3, r3, 4 " ); \
00354 asm( "stw r5, %0 " : "=m" (c)); \
00355 asm( "stw r4, %0 " : "=m" (d)); \
00356 asm( "stw r3, %0 " : "=m" (s) :: \
00357 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00358
00359 #else
00360
00361 #define MULADDC_INIT \
00362 asm( "lwz %%r3, %0 " :: "m" (s)); \
00363 asm( "lwz %%r4, %0 " :: "m" (d)); \
00364 asm( "lwz %%r5, %0 " :: "m" (c)); \
00365 asm( "lwz %%r6, %0 " :: "m" (b)); \
00366 asm( "addi %r3, %r3, -4 " ); \
00367 asm( "addi %r4, %r4, -4 " ); \
00368 asm( "addic %r5, %r5, 0 " );
00369
00370 #define MULADDC_CORE \
00371 asm( "lwzu %r7, 4(%r3) " ); \
00372 asm( "mullw %r8, %r7, %r6 " ); \
00373 asm( "mulhwu %r9, %r7, %r6 " ); \
00374 asm( "adde %r8, %r8, %r5 " ); \
00375 asm( "lwz %r7, 4(%r4) " ); \
00376 asm( "addze %r5, %r9 " ); \
00377 asm( "addc %r8, %r8, %r7 " ); \
00378 asm( "stwu %r8, 4(%r4) " );
00379
00380 #define MULADDC_STOP \
00381 asm( "addze %r5, %r5 " ); \
00382 asm( "addi %r4, %r4, 4 " ); \
00383 asm( "addi %r3, %r3, 4 " ); \
00384 asm( "stw %%r5, %0 " : "=m" (c)); \
00385 asm( "stw %%r4, %0 " : "=m" (d)); \
00386 asm( "stw %%r3, %0 " : "=m" (s) :: \
00387 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00388
00389 #endif
00390
00391 #endif
00392 #endif
00393
00394 #if defined(__sparc__)
00395
00396 #define MULADDC_INIT \
00397 asm( "ld %0, %%o0 " :: "m" (s)); \
00398 asm( "ld %0, %%o1 " :: "m" (d)); \
00399 asm( "ld %0, %%o2 " :: "m" (c)); \
00400 asm( "ld %0, %%o3 " :: "m" (b));
00401
00402 #define MULADDC_CORE \
00403 asm( "ld [%o0], %o4 " ); \
00404 asm( "inc 4, %o0 " ); \
00405 asm( "ld [%o1], %o5 " ); \
00406 asm( "umul %o3, %o4, %o4 " ); \
00407 asm( "addcc %o4, %o2, %o4 " ); \
00408 asm( "rd %y, %g1 " ); \
00409 asm( "addx %g1, 0, %g1 " ); \
00410 asm( "addcc %o4, %o5, %o4 " ); \
00411 asm( "st %o4, [%o1] " ); \
00412 asm( "addx %g1, 0, %o2 " ); \
00413 asm( "inc 4, %o1 " );
00414
00415 #define MULADDC_STOP \
00416 asm( "st %%o2, %0 " : "=m" (c)); \
00417 asm( "st %%o1, %0 " : "=m" (d)); \
00418 asm( "st %%o0, %0 " : "=m" (s) :: \
00419 "g1", "o0", "o1", "o2", "o3", "o4", "o5" );
00420
00421 #endif
00422
00423 #if defined(__microblaze__) || defined(microblaze)
00424
00425 #define MULADDC_INIT \
00426 asm( "lwi r3, %0 " :: "m" (s)); \
00427 asm( "lwi r4, %0 " :: "m" (d)); \
00428 asm( "lwi r5, %0 " :: "m" (c)); \
00429 asm( "lwi r6, %0 " :: "m" (b)); \
00430 asm( "andi r7, r6, 0xffff" ); \
00431 asm( "bsrli r6, r6, 16 " );
00432
00433 #define MULADDC_CORE \
00434 asm( "lhui r8, r3, 0 " ); \
00435 asm( "addi r3, r3, 2 " ); \
00436 asm( "lhui r9, r3, 0 " ); \
00437 asm( "addi r3, r3, 2 " ); \
00438 asm( "mul r10, r9, r6 " ); \
00439 asm( "mul r11, r8, r7 " ); \
00440 asm( "mul r12, r9, r7 " ); \
00441 asm( "mul r13, r8, r6 " ); \
00442 asm( "bsrli r8, r10, 16 " ); \
00443 asm( "bsrli r9, r11, 16 " ); \
00444 asm( "add r13, r13, r8 " ); \
00445 asm( "add r13, r13, r9 " ); \
00446 asm( "bslli r10, r10, 16 " ); \
00447 asm( "bslli r11, r11, 16 " ); \
00448 asm( "add r12, r12, r10 " ); \
00449 asm( "addc r13, r13, r0 " ); \
00450 asm( "add r12, r12, r11 " ); \
00451 asm( "addc r13, r13, r0 " ); \
00452 asm( "lwi r10, r4, 0 " ); \
00453 asm( "add r12, r12, r10 " ); \
00454 asm( "addc r13, r13, r0 " ); \
00455 asm( "add r12, r12, r5 " ); \
00456 asm( "addc r5, r13, r0 " ); \
00457 asm( "swi r12, r4, 0 " ); \
00458 asm( "addi r4, r4, 4 " );
00459
00460 #define MULADDC_STOP \
00461 asm( "swi r5, %0 " : "=m" (c)); \
00462 asm( "swi r4, %0 " : "=m" (d)); \
00463 asm( "swi r3, %0 " : "=m" (s) :: \
00464 "r3", "r4" , "r5" , "r6" , "r7" , "r8" , \
00465 "r9", "r10", "r11", "r12", "r13" );
00466
00467 #endif
00468
00469 #if defined(__tricore__)
00470
00471 #define MULADDC_INIT \
00472 asm( "ld.a %%a2, %0 " :: "m" (s)); \
00473 asm( "ld.a %%a3, %0 " :: "m" (d)); \
00474 asm( "ld.w %%d4, %0 " :: "m" (c)); \
00475 asm( "ld.w %%d1, %0 " :: "m" (b)); \
00476 asm( "xor %d5, %d5 " );
00477
00478 #define MULADDC_CORE \
00479 asm( "ld.w %d0, [%a2+] " ); \
00480 asm( "madd.u %e2, %e4, %d0, %d1 " ); \
00481 asm( "ld.w %d0, [%a3] " ); \
00482 asm( "addx %d2, %d2, %d0 " ); \
00483 asm( "addc %d3, %d3, 0 " ); \
00484 asm( "mov %d4, %d3 " ); \
00485 asm( "st.w [%a3+], %d2 " );
00486
00487 #define MULADDC_STOP \
00488 asm( "st.w %0, %%d4 " : "=m" (c)); \
00489 asm( "st.a %0, %%a3 " : "=m" (d)); \
00490 asm( "st.a %0, %%a2 " : "=m" (s) :: \
00491 "d0", "d1", "e2", "d4", "a2", "a3" );
00492
00493 #endif
00494
00495 #if defined(__arm__)
00496
00497 #define MULADDC_INIT \
00498 asm( "ldr r0, %0 " :: "m" (s)); \
00499 asm( "ldr r1, %0 " :: "m" (d)); \
00500 asm( "ldr r2, %0 " :: "m" (c)); \
00501 asm( "ldr r3, %0 " :: "m" (b));
00502
00503 #define MULADDC_CORE \
00504 asm( "ldr r4, [r0], #4 " ); \
00505 asm( "mov r5, #0 " ); \
00506 asm( "ldr r6, [r1] " ); \
00507 asm( "umlal r2, r5, r3, r4 " ); \
00508 asm( "adds r7, r6, r2 " ); \
00509 asm( "adc r2, r5, #0 " ); \
00510 asm( "str r7, [r1], #4 " );
00511
00512 #define MULADDC_STOP \
00513 asm( "str r2, %0 " : "=m" (c)); \
00514 asm( "str r1, %0 " : "=m" (d)); \
00515 asm( "str r0, %0 " : "=m" (s) :: \
00516 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7" );
00517
00518 #endif
00519
00520 #if defined(__alpha__)
00521
00522 #define MULADDC_INIT \
00523 asm( "ldq $1, %0 " :: "m" (s)); \
00524 asm( "ldq $2, %0 " :: "m" (d)); \
00525 asm( "ldq $3, %0 " :: "m" (c)); \
00526 asm( "ldq $4, %0 " :: "m" (b));
00527
00528 #define MULADDC_CORE \
00529 asm( "ldq $6, 0($1) " ); \
00530 asm( "addq $1, 8, $1 " ); \
00531 asm( "mulq $6, $4, $7 " ); \
00532 asm( "umulh $6, $4, $6 " ); \
00533 asm( "addq $7, $3, $7 " ); \
00534 asm( "cmpult $7, $3, $3 " ); \
00535 asm( "ldq $5, 0($2) " ); \
00536 asm( "addq $7, $5, $7 " ); \
00537 asm( "cmpult $7, $5, $5 " ); \
00538 asm( "stq $7, 0($2) " ); \
00539 asm( "addq $2, 8, $2 " ); \
00540 asm( "addq $6, $3, $3 " ); \
00541 asm( "addq $5, $3, $3 " );
00542
00543 #define MULADDC_STOP \
00544 asm( "stq $3, %0 " : "=m" (c)); \
00545 asm( "stq $2, %0 " : "=m" (d)); \
00546 asm( "stq $1, %0 " : "=m" (s) :: \
00547 "$1", "$2", "$3", "$4", "$5", "$6", "$7" );
00548
00549 #endif
00550
00551 #if defined(__mips__)
00552
00553 #define MULADDC_INIT \
00554 asm( "lw $10, %0 " :: "m" (s)); \
00555 asm( "lw $11, %0 " :: "m" (d)); \
00556 asm( "lw $12, %0 " :: "m" (c)); \
00557 asm( "lw $13, %0 " :: "m" (b));
00558
00559 #define MULADDC_CORE \
00560 asm( "lw $14, 0($10) " ); \
00561 asm( "multu $13, $14 " ); \
00562 asm( "addi $10, $10, 4 " ); \
00563 asm( "mflo $14 " ); \
00564 asm( "mfhi $9 " ); \
00565 asm( "addu $14, $12, $14 " ); \
00566 asm( "lw $15, 0($11) " ); \
00567 asm( "sltu $12, $14, $12 " ); \
00568 asm( "addu $15, $14, $15 " ); \
00569 asm( "sltu $14, $15, $14 " ); \
00570 asm( "addu $12, $12, $9 " ); \
00571 asm( "sw $15, 0($11) " ); \
00572 asm( "addu $12, $12, $14 " ); \
00573 asm( "addi $11, $11, 4 " );
00574
00575 #define MULADDC_STOP \
00576 asm( "sw $12, %0 " : "=m" (c)); \
00577 asm( "sw $11, %0 " : "=m" (d)); \
00578 asm( "sw $10, %0 " : "=m" (s) :: \
00579 "$9", "$10", "$11", "$12", "$13", "$14", "$15" );
00580
00581 #endif
00582 #endif
00583
00584 #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
00585
00586 #define MULADDC_INIT \
00587 __asm mov esi, s \
00588 __asm mov edi, d \
00589 __asm mov ecx, c \
00590 __asm mov ebx, b
00591
00592 #define MULADDC_CORE \
00593 __asm lodsd \
00594 __asm mul ebx \
00595 __asm add eax, ecx \
00596 __asm adc edx, 0 \
00597 __asm add eax, [edi] \
00598 __asm adc edx, 0 \
00599 __asm mov ecx, edx \
00600 __asm stosd
00601
00602 #if defined(POLARSSL_HAVE_SSE2)
00603
00604 #define EMIT __asm _emit
00605
00606 #define MULADDC_HUIT \
00607 EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
00608 EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
00609 EMIT 0x0F EMIT 0x6E EMIT 0x1F \
00610 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00611 EMIT 0x0F EMIT 0x6E EMIT 0x16 \
00612 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
00613 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
00614 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
00615 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
00616 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
00617 EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
00618 EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
00619 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
00620 EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
00621 EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
00622 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
00623 EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
00624 EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
00625 EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
00626 EMIT 0x0F EMIT 0x7E EMIT 0x0F \
00627 EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
00628 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
00629 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00630 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
00631 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
00632 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00633 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
00634 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
00635 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
00636 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00637 EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
00638 EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
00639 EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
00640 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
00641 EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
00642 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
00643 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00644 EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
00645 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
00646 EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
00647 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
00648 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00649 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
00650 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
00651 EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
00652 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
00653 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00654 EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
00655 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
00656 EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
00657 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
00658 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00659 EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
00660 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
00661 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00662 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00663 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
00664 EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
00665 EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
00666 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00667 EMIT 0x0F EMIT 0x7E EMIT 0xC9
00668
00669 #define MULADDC_STOP \
00670 EMIT 0x0F EMIT 0x77 \
00671 __asm mov c, ecx \
00672 __asm mov d, edi \
00673 __asm mov s, esi \
00674
00675 #else
00676
00677 #define MULADDC_STOP \
00678 __asm mov c, ecx \
00679 __asm mov d, edi \
00680 __asm mov s, esi \
00681
00682 #endif
00683 #endif
00684
00685 #endif
00686
00687 #if !defined(MULADDC_CORE)
00688 #if defined(POLARSSL_HAVE_LONGLONG)
00689
00690 #define MULADDC_INIT \
00691 { \
00692 t_dbl r; \
00693 t_int r0, r1;
00694
00695 #define MULADDC_CORE \
00696 r = *(s++) * (t_dbl) b; \
00697 r0 = r; \
00698 r1 = r >> biL; \
00699 r0 += c; r1 += (r0 < c); \
00700 r0 += *d; r1 += (r0 < *d); \
00701 c = r1; *(d++) = r0;
00702
00703 #define MULADDC_STOP \
00704 }
00705
00706 #else
00707 #define MULADDC_INIT \
00708 { \
00709 t_int s0, s1, b0, b1; \
00710 t_int r0, r1, rx, ry; \
00711 b0 = ( b << biH ) >> biH; \
00712 b1 = ( b >> biH );
00713
00714 #define MULADDC_CORE \
00715 s0 = ( *s << biH ) >> biH; \
00716 s1 = ( *s >> biH ); s++; \
00717 rx = s0 * b1; r0 = s0 * b0; \
00718 ry = s1 * b0; r1 = s1 * b1; \
00719 r1 += ( rx >> biH ); \
00720 r1 += ( ry >> biH ); \
00721 rx <<= biH; ry <<= biH; \
00722 r0 += rx; r1 += (r0 < rx); \
00723 r0 += ry; r1 += (r0 < ry); \
00724 r0 += c; r1 += (r0 < c); \
00725 r0 += *d; r1 += (r0 < *d); \
00726 c = r1; *(d++) = r0;
00727
00728 #define MULADDC_STOP \
00729 }
00730
00731 #endif
00732 #endif
00733
00734 #endif