Lines Matching refs:dtmp0
228 ! *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
229 ! y0 = vis_fpsub32(dtmp0, y0);
230 ! dtmp0 = div0 * y0;
231 ! dtmp0 = DTWO - dtmp0;
232 ! y0 *= dtmp0;
243 ! dtmp0 = *(double*)((char*)sign_arr + ux);
244 ! res *= dtmp0;
247 ! dtmp0 = K2 * x2;
248 ! dtmp0 += K1;
249 ! dtmp0 *= x2;
250 ! dtmp0 += K0;
251 ! dtmp0 *= xx;
252 ! res += dtmp0;
394 ld [%i4+%l6],%f0 ! (0_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
401 fpsub32 %f0,%f24,%f40 ! (0_0) y0 = vis_fpsub32(dtmp0, y0);
412 fmuld %f32,%f40,%f42 ! (0_0) dtmp0 = div0 * y0;
426 fsubd DTWO,%f42,%f44 ! (0_0) dtmp0 = dtwo - dtmp0;
431 ld [%i4+%l6],%f0 ! (1_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
438 fpsub32 %f0,%f24,%f38 ! (1_0) y0 = vis_fpsub32(dtmp0, y0);
441 fmuld %f40,%f44,%f40 ! (0_0) y0 *= dtmp0;
450 fmuld %f30,%f38,%f42 ! (1_0) dtmp0 = div0 * y0;
465 fsubd DTWO,%f42,%f44 ! (1_0) dtmp0 = dtwo - dtmp0;
471 ld [%i4+%l6],%f0 ! (2_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
480 fpsub32 %f0,%f24,%f40 ! (2_0) y0 = vis_fpsub32(dtmp0, y0);
482 fmuld %f38,%f44,%f38 ! (1_0) y0 *= dtmp0;
491 fmuld %f28,%f40,%f42 ! (2_0) dtmp0 = div0 * y0;
509 fsubd DTWO,%f42,%f44 ! (2_0) dtmp0 = dtwo - dtmp0;
515 ld [%i4+%l6],%f0 ! (3_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
521 fmuld K2,%f50,%f4 ! (0_0) dtmp0 = K2 * x2;
529 fpsub32 %f0,%f24,%f38 ! (3_0) y0 = vis_fpsub32(dtmp0, y0);
531 fmuld %f40,%f44,%f40 ! (2_0) y0 *= dtmp0;
540 fmuld %f34,%f38,%f42 ! (3_0) dtmp0 = div0 * y0;
543 faddd %f4,K1,%f4 ! (0_0) dtmp0 += K1;
557 ldd [%l0+%g1],%f48 ! (0_0) dtmp0 = *(double*)((char*)sign_arr + ux);
559 fmuld %f4,%f50,%f4 ! (0_0) dtmp0 *= x2;
567 fsubd DTWO,%f42,%f44 ! (3_0) dtmp0 = dtwo - dtmp0;
571 fmuld %f0,%f48,%f48 ! (0_0) res *= dtmp0;
573 ld [%i4+%l6],%f0 ! (4_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
576 faddd %f4,K0,%f42 ! (0_0) dtmp0 += K0;
593 fmuld K2,%f50,%f4 ! (1_1) dtmp0 = K2 * x2;
598 fpsub32 %f0,%f24,%f40 ! (4_1) y0 = vis_fpsub32(dtmp0, y0);
600 fmuld %f38,%f44,%f38 ! (3_1) y0 *= dtmp0;
605 fmuld %f42,%f22,%f44 ! (0_1) dtmp0 *= xx;
610 fmuld %f32,%f40,%f42 ! (4_1) dtmp0 = div0 * y0;
613 faddd %f4,K1,%f4 ! (1_1) dtmp0 += K1;
628 faddd %f48,%f44,%f12 ! (0_1) res += dtmp0;
630 fmuld %f4,%f50,%f4 ! (1_1) dtmp0 *= x2;
632 ldd [%l0+%o7],%f48 ! (1_1) dtmp0 = *(double*)((char*)sign_arr + ux);
638 fsubd DTWO,%f42,%f44 ! (4_1) dtmp0 = dtwo - dtmp0;
645 fmuld %f0,%f48,%f48 ! (1_1) res *= dtmp0;
647 ld [%i4+%l6],%f0 ! (5_1) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
653 faddd %f4,K0,%f42 ! (1_1) dtmp0 += K0;
655 fmuld K2,%f50,%f4 ! (2_1) dtmp0 = K2 * x2;
663 fpsub32 %f0,%f24,%f38 ! (5_1) y0 = vis_fpsub32(dtmp0, y0);
665 fmuld %f40,%f44,%f40 ! (4_1) y0 *= dtmp0;
670 fmuld %f42,%f20,%f44 ! (1_1) dtmp0 *= xx;
675 fmuld %f30,%f38,%f42 ! (5_1) dtmp0 = div0 * y0;
678 faddd %f4,K1,%f4 ! (2_1) dtmp0 += K1;
690 faddd %f48,%f44,%f12 ! (1_1) res += dtmp0;
692 ldd [%l0+%o1],%f48 ! (2_1) dtmp0 = *(double*)((char*)sign_arr + ux);
697 fmuld %f4,%f50,%f4 ! (2_1) dtmp0 *= x2;
703 fsubd DTWO,%f42,%f44 ! (5_1) dtmp0 = dtwo - dtmp0;
710 fmuld %f0,%f48,%f48 ! (2_1) res *= dtmp0;
712 ld [%i4+%l6],%f0 ! (6_1) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
718 faddd %f4,K0,%f42 ! (2_1) dtmp0 += K0;
720 fmuld K2,%f50,%f4 ! (3_1) dtmp0 = K2 * x2;
728 fpsub32 %f0,%f24,%f40 ! (6_1) y0 = vis_fpsub32(dtmp0, y0);
730 fmuld %f38,%f44,%f38 ! (5_1) y0 *= dtmp0;
735 fmuld %f42,%f18,%f44 ! (2_1) dtmp0 *= xx;
740 fmuld %f28,%f40,%f42 ! (6_1) dtmp0 = div0 * y0;
743 faddd %f4,K1,%f4 ! (3_1) dtmp0 += K1;
755 faddd %f48,%f44,%f12 ! (2_1) res += dtmp0;
757 ldd [%l0+%o1],%f48 ! (3_1) dtmp0 = *(double*)((char*)sign_arr + ux);
760 fmuld %f4,%f50,%f4 ! (3_1) dtmp0 *= x2;
768 fsubd DTWO,%f42,%f44 ! (6_1) dtmp0 = dtwo - dtmp0;
775 fmuld %f0,%f48,%f48 ! (3_1) res *= dtmp0;
777 ld [%i4+%l6],%f0 ! (7_1) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
783 faddd %f4,K0,%f42 ! (3_1) dtmp0 += K0;
785 fmuld K2,%f50,%f4 ! (4_1) dtmp0 = K2 * x2;
793 fpsub32 %f0,%f24,%f38 ! (7_1) y0 = vis_fpsub32(dtmp0, y0);
795 fmuld %f40,%f44,%f40 ! (6_1) y0 *= dtmp0;
800 fmuld %f42,%f16,%f44 ! (3_1) dtmp0 *= xx;
805 fmuld %f34,%f38,%f42 ! (7_1) dtmp0 = div0 * y0;
808 faddd %f4,K1,%f4 ! (4_1) dtmp0 += K1;
820 faddd %f48,%f44,%f12 ! (3_1) res += dtmp0;
822 ldd [%l0+%o1],%f48 ! (4_1) dtmp0 = *(double*)((char*)sign_arr + ux);
827 fmuld %f4,%f50,%f4 ! (4_1) dtmp0 *= x2;
833 fsubd DTWO,%f42,%f44 ! (7_1) dtmp0 = dtwo - dtmp0;
840 fmuld %f0,%f48,%f48 ! (4_1) res *= dtmp0;
842 ld [%i4+%l6],%f0 ! (0_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
848 faddd %f4,K0,%f42 ! (4_1) dtmp0 += K0;
850 fmuld K2,%f50,%f4 ! (5_1) dtmp0 = K2 * x2;
858 fpsub32 %f0,%f24,%f40 ! (0_0) y0 = vis_fpsub32(dtmp0, y0);
860 fmuld %f38,%f44,%f38 ! (7_1) y0 *= dtmp0;
865 fmuld %f42,%f14,%f44 ! (4_1) dtmp0 *= xx;
870 fmuld %f32,%f40,%f42 ! (0_0) dtmp0 = div0 * y0;
873 faddd %f4,K1,%f4 ! (5_1) dtmp0 += K1;
885 faddd %f48,%f44,%f12 ! (4_1) res += dtmp0;
887 ldd [%l0+%o1],%f48 ! (5_1) dtmp0 = *(double*)((char*)sign_arr + ux);
892 fmuld %f4,%f50,%f4 ! (5_1) dtmp0 *= x2;
898 fsubd DTWO,%f42,%f44 ! (0_0) dtmp0 = dtwo - dtmp0;
905 fmuld %f0,%f48,%f48 ! (5_1) res *= dtmp0;
907 ld [%i4+%l6],%f0 ! (1_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
913 faddd %f4,K0,%f42 ! (5_1) dtmp0 += K0;
915 fmuld K2,%f50,%f4 ! (6_1) dtmp0 = K2 * x2;
923 fpsub32 %f0,%f24,%f38 ! (1_0) y0 = vis_fpsub32(dtmp0, y0);
925 fmuld %f40,%f44,%f40 ! (0_0) y0 *= dtmp0;
930 fmuld %f42,%f36,%f44 ! (5_1) dtmp0 *= xx;
935 fmuld %f30,%f38,%f42 ! (1_0) dtmp0 = div0 * y0;
938 faddd %f4,K1,%f4 ! (6_1) dtmp0 += K1;
950 faddd %f48,%f44,%f12 ! (5_1) res += dtmp0;
952 ldd [%l0+%o1],%f48 ! (6_1) dtmp0 = *(double*)((char*)sign_arr + ux);
957 fmuld %f4,%f50,%f4 ! (6_1) dtmp0 *= x2;
963 fsubd DTWO,%f42,%f44 ! (1_0) dtmp0 = dtwo - dtmp0;
970 fmuld %f0,%f48,%f48 ! (6_1) res *= dtmp0;
972 ld [%i4+%l6],%f0 ! (2_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
978 faddd %f4,K0,%f42 ! (6_1) dtmp0 += K0;
980 fmuld K2,%f50,%f4 ! (7_1) dtmp0 = K2 * x2;
988 fpsub32 %f0,%f24,%f40 ! (2_0) y0 = vis_fpsub32(dtmp0, y0);
990 fmuld %f38,%f44,%f38 ! (1_0) y0 *= dtmp0;
995 fmuld %f42,%f10,%f44 ! (6_1) dtmp0 *= xx;
1000 fmuld %f28,%f40,%f42 ! (2_0) dtmp0 = div0 * y0;
1003 faddd %f4,K1,%f4 ! (7_1) dtmp0 += K1;
1015 faddd %f48,%f44,%f12 ! (6_1) res += dtmp0;
1017 ldd [%l0+%o1],%f48 ! (7_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1022 fmuld %f4,%f50,%f4 ! (7_1) dtmp0 *= x2;
1028 fsubd DTWO,%f42,%f44 ! (2_0) dtmp0 = dtwo - dtmp0;
1035 fmuld %f0,%f48,%f48 ! (7_1) res *= dtmp0;
1037 ld [%i4+%l6],%f0 ! (3_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
1043 faddd %f4,K0,%f42 ! (7_1) dtmp0 += K0;
1045 fmuld K2,%f50,%f4 ! (0_0) dtmp0 = K2 * x2;
1053 fpsub32 %f0,%f24,%f38 ! (3_0) y0 = vis_fpsub32(dtmp0, y0);
1055 fmuld %f40,%f44,%f40 ! (2_0) y0 *= dtmp0;
1060 fmuld %f42,%f8,%f44 ! (7_1) dtmp0 *= xx;
1065 fmuld %f34,%f38,%f42 ! (3_0) dtmp0 = div0 * y0;
1068 faddd %f4,K1,%f4 ! (0_0) dtmp0 += K1;
1080 faddd %f48,%f44,%f12 ! (7_1) res += dtmp0;
1082 ldd [%l0+%o1],%f48 ! (0_0) dtmp0 = *(double*)((char*)sign_arr + ux);
1085 fmuld %f4,%f50,%f4 ! (0_0) dtmp0 *= x2;
1093 fsubd DTWO,%f42,%f44 ! (3_0) dtmp0 = dtwo - dtmp0;
1100 fmuld %f0,%f48,%f48 ! (0_0) res *= dtmp0;
1101 ld [%i4+%l6],%f0 ! (4_0) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
1108 faddd %f4,K0,%f42 ! (0_0) dtmp0 += K0;
1120 fmuld K2,%f50,%f4 ! (1_1) dtmp0 = K2 * x2;
1125 fpsub32 %f0,%f24,%f40 ! (4_1) y0 = vis_fpsub32(dtmp0, y0);
1127 fmuld %f38,%f44,%f38 ! (3_1) y0 *= dtmp0;
1129 fmuld %f42,%f22,%f44 ! (0_1) dtmp0 *= xx;
1132 fmuld %f32,%f40,%f42 ! (4_1) dtmp0 = div0 * y0;
1133 faddd %f4,K1,%f4 ! (1_1) dtmp0 += K1;
1143 faddd %f48,%f44,%f12 ! (0_1) res += dtmp0;
1145 ldd [%l0+%l5],%f48 ! (1_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1147 fmuld %f4,%f50,%f4 ! (1_1) dtmp0 *= x2;
1152 fsubd DTWO,%f42,%f44 ! (4_1) dtmp0 = dtwo - dtmp0;
1159 fmuld %f0,%f48,%f48 ! (1_1) res *= dtmp0;
1161 ld [%i4+%l6],%f0 ! (5_1) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
1166 faddd %f4,K0,%f42 ! (1_1) dtmp0 += K0;
1172 fmuld K2,%f50,%f4 ! (2_1) dtmp0 = K2 * x2;
1178 fpsub32 %f0,%f24,%f38 ! (5_1) y0 = vis_fpsub32(dtmp0, y0);
1180 fmuld %f40,%f44,%f40 ! (4_1) y0 *= dtmp0;
1182 fmuld %f42,%f20,%f44 ! (1_1) dtmp0 *= xx;
1184 fmuld %f30,%f38,%f42 ! (5_1) dtmp0 = div0 * y0;
1185 faddd %f4,K1,%f4 ! (2_1) dtmp0 += K1;
1194 faddd %f48,%f44,%f12 ! (1_1) res += dtmp0;
1196 ldd [%l0+%l4],%f48 ! (2_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1198 fmuld %f4,%f50,%f4 ! (2_1) dtmp0 *= x2;
1203 fsubd DTWO,%f42,%f44 ! (5_1) dtmp0 = dtwo - dtmp0;
1210 fmuld %f0,%f48,%f48 ! (2_1) res *= dtmp0;
1212 ld [%i4+%l6],%f0 ! (6_1) *(float*)&dtmp0 = *(float*)((char*)parr0 + i);
1217 faddd %f4,K0,%f42 ! (2_1) dtmp0 += K0;
1223 fmuld K2,%f50,%f4 ! (3_1) dtmp0 = K2 * x2;
1229 fpsub32 %f0,%f24,%f40 ! (6_1) y0 = vis_fpsub32(dtmp0, y0);
1231 fmuld %f38,%f44,%f38 ! (5_1) y0 *= dtmp0;
1233 fmuld %f42,%f18,%f44 ! (2_1) dtmp0 *= xx;
1235 fmuld %f28,%f40,%f42 ! (6_1) dtmp0 = div0 * y0;
1236 faddd %f4,K1,%f4 ! (3_1) dtmp0 += K1;
1244 faddd %f48,%f44,%f12 ! (2_1) res += dtmp0;
1246 ldd [%l0+%l3],%f48 ! (3_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1248 fmuld %f4,%f50,%f4 ! (3_1) dtmp0 *= x2;
1252 fsubd DTWO,%f42,%f44 ! (6_1) dtmp0 = dtwo - dtmp0;
1257 fmuld %f0,%f48,%f48 ! (3_1) res *= dtmp0;
1263 faddd %f4,K0,%f42 ! (3_1) dtmp0 += K0;
1269 fmuld K2,%f50,%f4 ! (4_1) dtmp0 = K2 * x2;
1276 fmuld %f40,%f44,%f40 ! (6_1) y0 *= dtmp0;
1278 fmuld %f42,%f16,%f44 ! (3_1) dtmp0 *= xx;
1280 faddd %f4,K1,%f4 ! (4_1) dtmp0 += K1;
1288 faddd %f48,%f44,%f12 ! (3_1) res += dtmp0;
1290 ldd [%l0+%i0],%f48 ! (4_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1292 fmuld %f4,%f50,%f4 ! (4_1) dtmp0 *= x2;
1300 fmuld %f0,%f48,%f48 ! (4_1) res *= dtmp0;
1306 faddd %f4,K0,%f42 ! (4_1) dtmp0 += K0;
1312 fmuld K2,%f50,%f4 ! (5_1) dtmp0 = K2 * x2;
1319 fmuld %f42,%f14,%f44 ! (4_1) dtmp0 *= xx;
1321 faddd %f4,K1,%f4 ! (5_1) dtmp0 += K1;
1328 faddd %f48,%f44,%f12 ! (4_1) res += dtmp0;
1330 ldd [%l0+%i2],%f48 ! (5_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1332 fmuld %f4,%f50,%f4 ! (5_1) dtmp0 *= x2;
1339 fmuld %f0,%f48,%f48 ! (5_1) res *= dtmp0;
1345 faddd %f4,K0,%f42 ! (5_1) dtmp0 += K0;
1351 fmuld K2,%f50,%f4 ! (6_1) dtmp0 = K2 * x2;
1357 fmuld %f42,%f36,%f44 ! (5_1) dtmp0 *= xx;
1359 faddd %f4,K1,%f4 ! (6_1) dtmp0 += K1;
1365 faddd %f48,%f44,%f12 ! (5_1) res += dtmp0;
1367 ldd [%l0+%l2],%f48 ! (6_1) dtmp0 = *(double*)((char*)sign_arr + ux);
1369 fmuld %f4,%f50,%f4 ! (6_1) dtmp0 *= x2;
1372 fmuld %f0,%f48,%f48 ! (6_1) res *= dtmp0;
1377 faddd %f4,K0,%f42 ! (6_1) dtmp0 += K0;
1383 fmuld %f42,%f10,%f44 ! (6_1) dtmp0 *= xx;
1385 faddd %f48,%f44,%f12 ! (6_1) res += dtmp0;