| Loop Id: 143 | Module: exec | Source: advec_cell_kernel.f90:83-248 [...] | Coverage: 3.49% |
|---|
| Loop Id: 143 | Module: exec | Source: advec_cell_kernel.f90:83-248 [...] | Coverage: 3.49% |
|---|
0x42a640 VCMPPD $0xe,0xdb275(%RIP),%ZMM23,%K1 [10] |
0x42a64b VADDPD %ZMM31,%ZMM21,%ZMM21{%K1} |
0x42a651 VMULPD %ZMM18,%ZMM21,%ZMM5 |
0x42a657 VMOVUPD %ZMM5,(%R10,%R14,8) [9] |
0x42a65e ADD $0x8,%R14 |
0x42a662 CMP %R9,%R14 |
0x42a665 JA 42a940 |
0x42a66b LEA (%RCX,%R14,1),%R12 |
0x42a66f VMOVUPD (%RAX,%R14,8),%ZMM18 [12] |
0x42a676 VXORPD %XMM31,%XMM31,%XMM31 |
0x42a67c VCMPPD $0x1,%ZMM18,%ZMM31,%K1 |
0x42a683 VPBLENDMD %YMM27,%YMM28,%YMM5{%K1} |
0x42a689 VPMOVSXDQ %YMM5,%ZMM5 |
0x42a68f VPSUBQ %ZMM0,%ZMM5,%ZMM21 |
0x42a695 VPXOR %XMM5,%XMM5,%XMM5 |
0x42a699 VPMULLQ %ZMM21,%ZMM1,%ZMM5 |
0x42a69f VPBROADCASTQ %R12,%ZMM16 |
0x42a6a5 VPSUBQ %ZMM2,%ZMM16,%ZMM16 |
0x42a6ab VPSLLQ $0x3,%ZMM16,%ZMM16 |
0x42a6b2 VPADDQ 0xdb1c4(%RIP),%ZMM16,%ZMM23 [10] |
0x42a6bc VPADDQ %ZMM23,%ZMM7,%ZMM16 |
0x42a6c2 VPADDQ %ZMM5,%ZMM16,%ZMM16 |
0x42a6c8 VPXOR %XMM5,%XMM5,%XMM5 |
0x42a6cc KXNORW %K0,%K0,%K2 |
0x42a6d0 VGATHERQPD (,%ZMM16,1),%ZMM5{%K2} [2] |
0x42a6db VPBLENDMD %YMM29,%YMM26,%YMM16{%K1} |
0x42a6e1 VPBLENDMD %YMM27,%YMM26,%YMM17{%K1} |
0x42a6e7 VPMOVSXDQ %YMM17,%ZMM17 |
0x42a6ed VPSUBQ %ZMM0,%ZMM17,%ZMM17 |
0x42a6f3 VXORPD %XMM22,%XMM22,%XMM22 |
0x42a6f9 KXNORW %K0,%K0,%K2 |
0x42a6fd VGATHERQPD (%R11,%ZMM17,8),%ZMM22{%K2} [7] |
0x42a704 VPXORD %XMM17,%XMM17,%XMM17 |
0x42a70a VPMULLQ %ZMM21,%ZMM3,%ZMM17 |
0x42a710 VPADDQ %ZMM23,%ZMM11,%ZMM19 |
0x42a716 VPADDQ %ZMM17,%ZMM19,%ZMM17 |
0x42a71c VXORPD %XMM20,%XMM20,%XMM20 |
0x42a722 KXNORW %K0,%K0,%K2 |
0x42a726 VGATHERQPD (,%ZMM17,1),%ZMM20{%K2} [8] |
0x42a731 VPBLENDMD %YMM28,%YMM27,%YMM25{%K1} |
0x42a737 VANDPD %ZMM6,%ZMM18,%ZMM17 |
0x42a73d VDIVPD %ZMM5,%ZMM17,%ZMM15 |
0x42a743 VMOVAPD %ZMM30,%ZMM17 |
0x42a749 VPMOVSXDQ %YMM16,%ZMM16 |
0x42a74f VPSUBQ %ZMM0,%ZMM16,%ZMM16 |
0x42a755 VPXORD %XMM24,%XMM24,%XMM24 |
0x42a75b VPMULLQ %ZMM16,%ZMM3,%ZMM24 |
0x42a761 VPADDQ %ZMM24,%ZMM19,%ZMM24 |
0x42a767 VXORPD %XMM8,%XMM8,%XMM8 |
0x42a76c KXNORW %K0,%K0,%K1 |
0x42a770 VGATHERQPD (,%ZMM24,1),%ZMM8{%K1} [3] |
0x42a77b VFMADD213PD %ZMM30,%ZMM15,%ZMM17 |
0x42a781 VDIVPD %ZMM22,%ZMM17,%ZMM17 |
0x42a787 VSUBPD %ZMM15,%ZMM10,%ZMM24 |
0x42a78d VPMOVSXDQ %YMM25,%ZMM22 |
0x42a793 VPSUBQ %ZMM0,%ZMM22,%ZMM22 |
0x42a799 VPXORD %XMM25,%XMM25,%XMM25 |
0x42a79f VPMULLQ %ZMM22,%ZMM3,%ZMM25 |
0x42a7a5 VPADDQ %ZMM25,%ZMM19,%ZMM19 |
0x42a7ab VPXORD %XMM25,%XMM25,%XMM25 |
0x42a7b1 KXNORW %K0,%K0,%K1 |
0x42a7b5 VGATHERQPD (,%ZMM19,1),%ZMM25{%K1} [6] |
0x42a7c0 VSUBPD %ZMM8,%ZMM20,%ZMM8 |
0x42a7c6 VSUBPD %ZMM20,%ZMM25,%ZMM19 |
0x42a7cc VMULPD %ZMM8,%ZMM19,%ZMM25 |
0x42a7d2 VCMPPD $0x1,%ZMM25,%ZMM31,%K1 |
0x42a7d9 VFPCLASSPD $0x56,%ZMM19,%K2 |
0x42a7e0 VANDPD %ZMM6,%ZMM8,%ZMM8 |
0x42a7e6 VANDPD %ZMM6,%ZMM19,%ZMM19 |
0x42a7ec VMULPD %ZMM17,%ZMM8,%ZMM25 |
0x42a7f2 VFMADD231PD %ZMM24,%ZMM19,%ZMM25 |
0x42a7f8 VMULPD %ZMM13,%ZMM25,%ZMM25 |
0x42a7fe VMINPD %ZMM25,%ZMM19,%ZMM19 |
0x42a804 VSUBPD %ZMM15,%ZMM9,%ZMM15 |
0x42a80a VXORPD %ZMM12,%ZMM15,%ZMM15{%K2} |
0x42a810 VMINPD %ZMM19,%ZMM8,%ZMM8 |
0x42a816 VMOVAPD %ZMM20,%ZMM19 |
0x42a81c VFMADD231PD %ZMM15,%ZMM8,%ZMM19{%K1} |
0x42a822 VMULPD %ZMM18,%ZMM19,%ZMM18 |
0x42a828 VMOVUPD %ZMM18,(%R8,%R14,8) [4] |
0x42a82f VPXOR %XMM8,%XMM8,%XMM8 |
0x42a834 VPMULLQ %ZMM21,%ZMM4,%ZMM8 |
0x42a83a VPADDQ %ZMM23,%ZMM14,%ZMM15 |
0x42a840 VPADDQ %ZMM8,%ZMM15,%ZMM8 |
0x42a846 VPXORD %XMM21,%XMM21,%XMM21 |
0x42a84c KXNORW %K0,%K0,%K1 |
0x42a850 VGATHERQPD (,%ZMM8,1),%ZMM21{%K1} [11] |
0x42a85b VPXOR %XMM8,%XMM8,%XMM8 |
0x42a860 VPMULLQ %ZMM16,%ZMM4,%ZMM8 |
0x42a866 VPADDQ %ZMM8,%ZMM15,%ZMM8 |
0x42a86c VPXORD %XMM16,%XMM16,%XMM16 |
0x42a872 KXNORW %K0,%K0,%K1 |
0x42a876 VGATHERQPD (,%ZMM8,1),%ZMM16{%K1} [5] |
0x42a881 VPXOR %XMM8,%XMM8,%XMM8 |
0x42a886 VPMULLQ %ZMM22,%ZMM4,%ZMM8 |
0x42a88c VPADDQ %ZMM8,%ZMM15,%ZMM8 |
0x42a892 VPXOR %XMM15,%XMM15,%XMM15 |
0x42a897 KXNORW %K0,%K0,%K1 |
0x42a89b VGATHERQPD (,%ZMM8,1),%ZMM15{%K1} [1] |
0x42a8a6 VSUBPD %ZMM16,%ZMM21,%ZMM16 |
0x42a8ac VSUBPD %ZMM21,%ZMM15,%ZMM22 |
0x42a8b2 VMULPD %ZMM16,%ZMM22,%ZMM23 |
0x42a8b8 VCMPPD $0x1,%ZMM23,%ZMM31,%K0 |
0x42a8bf KORTESTB %K0,%K0 |
0x42a8c3 JE 42a640 |
0x42a8c9 VANDPD %ZMM6,%ZMM18,%ZMM8 |
0x42a8cf VMULPD %ZMM5,%ZMM20,%ZMM5 |
0x42a8d5 VDIVPD %ZMM5,%ZMM8,%ZMM5 |
0x42a8db VSUBPD %ZMM5,%ZMM9,%ZMM5 |
0x42a8e1 VFPCLASSPD $0x56,%ZMM22,%K1 |
0x42a8e8 VXORPD %ZMM12,%ZMM5,%ZMM5{%K1} |
0x42a8ee VANDPD %ZMM6,%ZMM16,%ZMM8 |
0x42a8f4 VANDPD %ZMM6,%ZMM22,%ZMM15 |
0x42a8fa VMULPD %ZMM17,%ZMM8,%ZMM16 |
0x42a900 VFMADD213PD %ZMM16,%ZMM15,%ZMM24 |
0x42a906 VMULPD %ZMM13,%ZMM24,%ZMM16 |
0x42a90c VMINPD %ZMM16,%ZMM15,%ZMM15 |
0x42a912 VMINPD %ZMM15,%ZMM8,%ZMM8 |
0x42a918 VMULPD %ZMM5,%ZMM8,%ZMM31 |
0x42a91e JMP 42a640 |
/scratch_na/users/xoserete/qaas_runs/171-415-7919/intel/CloverLeafFC/build/CloverLeafFC/CloverLeaf_ref/kernels/advec_cell_kernel.f90: 83 - 248 |
-------------------------------------------------------------------------------- |
83: IF(dir.EQ.g_xdir) THEN |
[...] |
204: IF(vol_flux_y(j,k).GT.0.0)THEN |
[...] |
216: sigmat=ABS(vol_flux_y(j,k))/pre_vol(j,donor) |
217: sigma3=(1.0_8+sigmat)*(vertexdy(k)/vertexdy(dif)) |
218: sigma4=2.0_8-sigmat |
219: |
220: sigma=sigmat |
221: sigmav=sigmat |
222: |
223: diffuw=density1(j,donor)-density1(j,upwind) |
224: diffdw=density1(j,downwind)-density1(j,donor) |
225: wind=1.0_8 |
226: IF(diffdw.LE.0.0) wind=-1.0_8 |
227: IF(diffuw*diffdw.GT.0.0)THEN |
228: limiter=(1.0_8-sigmav)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
229: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
230: ELSE |
231: limiter=0.0 |
232: ENDIF |
233: mass_flux_y(j,k)=vol_flux_y(j,k)*(density1(j,donor)+limiter) |
234: |
235: sigmam=ABS(mass_flux_y(j,k))/(density1(j,donor)*pre_vol(j,donor)) |
236: diffuw=energy1(j,donor)-energy1(j,upwind) |
237: diffdw=energy1(j,downwind)-energy1(j,donor) |
238: wind=1.0_8 |
239: IF(diffdw.LE.0.0) wind=-1.0_8 |
240: IF(diffuw*diffdw.GT.0.0)THEN |
241: limiter=(1.0_8-sigmam)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
242: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
243: ELSE |
244: limiter=0.0 |
245: ENDIF |
246: ener_flux(j,k)=mass_flux_y(j,k)*(energy1(j,donor)+limiter) |
247: |
248: ENDDO |
| Coverage (%) | Name | Source Location | Module |
|---|---|---|---|
| ►100.00+ | __kmp_invoke_microtask | libiomp5.so | |
| ○ | __kmp_invoke_task_func | libiomp5.so |
| Path / |
| Metric | Value |
|---|---|
| CQA speedup if no scalar integer | 1.08 |
| CQA speedup if FP arith vectorized | 1.00 |
| CQA speedup if fully vectorized | 1.01 |
| CQA speedup if no inter-iteration dependency | NA |
| CQA speedup if next bottleneck killed | NA |
| Bottlenecks | |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
| Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
| Source loop unroll confidence level | max |
| Unroll/vectorization loop type | NA |
| Unroll factor | NA |
| CQA cycles | 58.50 |
| CQA cycles if no scalar integer | 54.00 |
| CQA cycles if FP arith vectorized | 58.50 |
| CQA cycles if fully vectorized | 58.13 |
| Front-end cycles | 29.75 |
| DIV/SQRT cycles | 58.50 |
| P0 cycles | 10.50 |
| P1 cycles | 22.33 |
| P2 cycles | 22.33 |
| P3 cycles | 1.00 |
| P4 cycles | 58.50 |
| P5 cycles | 2.00 |
| P6 cycles | 1.00 |
| P7 cycles | 1.00 |
| P8 cycles | 1.00 |
| P9 cycles | 1.00 |
| P10 cycles | 22.33 |
| P11 cycles | 40.00 |
| Inter-iter dependencies cycles | 0 |
| FE+BE cycles (UFS) | 60.14 - 91.58 |
| Stall cycles (UFS) | 34.72 - 66.12 |
| Nb insns | 112.50 |
| Nb uops | 178.50 |
| Nb loads | 11.00 |
| Nb stores | 2.00 |
| Nb stack references | 0.00 |
| FLOP/cycle | 3.42 |
| Nb FLOP add-sub | 60.00 |
| Nb FLOP mul | 64.00 |
| Nb FLOP fma | 28.00 |
| Nb FLOP div | 20.00 |
| Nb FLOP rcp | 0.00 |
| Nb FLOP sqrt | 0.00 |
| Nb FLOP rsqrt | 0.00 |
| Bytes/cycle | 14.29 |
| Bytes prefetched | 0.00 |
| Bytes loaded | 704.00 |
| Bytes stored | 128.00 |
| Stride 0 | 1.00 |
| Stride 1 | 3.00 |
| Stride n | 0.00 |
| Stride unknown | 0.00 |
| Stride indirect | 6.00 |
| Vectorization ratio all | 98.97 |
| Vectorization ratio load | 100.00 |
| Vectorization ratio store | 100.00 |
| Vectorization ratio mul | 100.00 |
| Vectorization ratio add_sub | 100.00 |
| Vectorization ratio fma | 100.00 |
| Vectorization ratio div_sqrt | 100.00 |
| Vectorization ratio other | 98.01 |
| Vector-efficiency ratio all | 82.69 |
| Vector-efficiency ratio load | 100.00 |
| Vector-efficiency ratio store | 100.00 |
| Vector-efficiency ratio mul | 100.00 |
| Vector-efficiency ratio add_sub | 100.00 |
| Vector-efficiency ratio fma | 100.00 |
| Vector-efficiency ratio div_sqrt | 100.00 |
| Vector-efficiency ratio other | 66.42 |
| Metric | Value |
|---|---|
| CQA speedup if no scalar integer | 1.08 |
| CQA speedup if FP arith vectorized | 1.00 |
| CQA speedup if fully vectorized | 1.01 |
| CQA speedup if no inter-iteration dependency | NA |
| CQA speedup if next bottleneck killed | NA |
| Bottlenecks | P0, P5, |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
| Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
| Source loop unroll confidence level | max |
| Unroll/vectorization loop type | NA |
| Unroll factor | NA |
| CQA cycles | 62.50 |
| CQA cycles if no scalar integer | 58.00 |
| CQA cycles if FP arith vectorized | 62.50 |
| CQA cycles if fully vectorized | 62.13 |
| Front-end cycles | 31.17 |
| DIV/SQRT cycles | 62.50 |
| P0 cycles | 11.00 |
| P1 cycles | 22.33 |
| P2 cycles | 22.33 |
| P3 cycles | 1.00 |
| P4 cycles | 62.50 |
| P5 cycles | 2.00 |
| P6 cycles | 1.00 |
| P7 cycles | 1.00 |
| P8 cycles | 1.00 |
| P9 cycles | 1.00 |
| P10 cycles | 22.33 |
| P11 cycles | 48.00 |
| Inter-iter dependencies cycles | 0 |
| FE+BE cycles (UFS) | 64.85 - 94.25 |
| Stall cycles (UFS) | 37.97 - 67.37 |
| Nb insns | 120.00 |
| Nb uops | 187.00 |
| Nb loads | 11.00 |
| Nb stores | 2.00 |
| Nb stack references | 0.00 |
| FLOP/cycle | 3.71 |
| Nb FLOP add-sub | 64.00 |
| Nb FLOP mul | 80.00 |
| Nb FLOP fma | 32.00 |
| Nb FLOP div | 24.00 |
| Nb FLOP rcp | 0.00 |
| Nb FLOP sqrt | 0.00 |
| Nb FLOP rsqrt | 0.00 |
| Bytes/cycle | 13.31 |
| Bytes prefetched | 0.00 |
| Bytes loaded | 704.00 |
| Bytes stored | 128.00 |
| Stride 0 | 1.00 |
| Stride 1 | 3.00 |
| Stride n | 0.00 |
| Stride unknown | 0.00 |
| Stride indirect | 6.00 |
| Vectorization ratio all | 99.05 |
| Vectorization ratio load | 100.00 |
| Vectorization ratio store | 100.00 |
| Vectorization ratio mul | 100.00 |
| Vectorization ratio add_sub | 100.00 |
| Vectorization ratio fma | 100.00 |
| Vectorization ratio div_sqrt | 100.00 |
| Vectorization ratio other | 98.15 |
| Vector-efficiency ratio all | 83.93 |
| Vector-efficiency ratio load | 100.00 |
| Vector-efficiency ratio store | 100.00 |
| Vector-efficiency ratio mul | 100.00 |
| Vector-efficiency ratio add_sub | 100.00 |
| Vector-efficiency ratio fma | 100.00 |
| Vector-efficiency ratio div_sqrt | 100.00 |
| Vector-efficiency ratio other | 68.75 |
| Metric | Value |
|---|---|
| CQA speedup if no scalar integer | 1.09 |
| CQA speedup if FP arith vectorized | 1.00 |
| CQA speedup if fully vectorized | 1.01 |
| CQA speedup if no inter-iteration dependency | NA |
| CQA speedup if next bottleneck killed | NA |
| Bottlenecks | P0, P5, |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
| Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
| Source loop unroll confidence level | max |
| Unroll/vectorization loop type | NA |
| Unroll factor | NA |
| CQA cycles | 54.50 |
| CQA cycles if no scalar integer | 50.00 |
| CQA cycles if FP arith vectorized | 54.50 |
| CQA cycles if fully vectorized | 54.13 |
| Front-end cycles | 28.33 |
| DIV/SQRT cycles | 54.50 |
| P0 cycles | 10.00 |
| P1 cycles | 22.33 |
| P2 cycles | 22.33 |
| P3 cycles | 1.00 |
| P4 cycles | 54.50 |
| P5 cycles | 2.00 |
| P6 cycles | 1.00 |
| P7 cycles | 1.00 |
| P8 cycles | 1.00 |
| P9 cycles | 1.00 |
| P10 cycles | 22.33 |
| P11 cycles | 32.00 |
| Inter-iter dependencies cycles | 0 |
| FE+BE cycles (UFS) | 55.43 - 88.91 |
| Stall cycles (UFS) | 31.46 - 64.87 |
| Nb insns | 105.00 |
| Nb uops | 170.00 |
| Nb loads | 11.00 |
| Nb stores | 2.00 |
| Nb stack references | 0.00 |
| FLOP/cycle | 3.08 |
| Nb FLOP add-sub | 56.00 |
| Nb FLOP mul | 48.00 |
| Nb FLOP fma | 24.00 |
| Nb FLOP div | 16.00 |
| Nb FLOP rcp | 0.00 |
| Nb FLOP sqrt | 0.00 |
| Nb FLOP rsqrt | 0.00 |
| Bytes/cycle | 15.27 |
| Bytes prefetched | 0.00 |
| Bytes loaded | 704.00 |
| Bytes stored | 128.00 |
| Stride 0 | 1.00 |
| Stride 1 | 3.00 |
| Stride n | 0.00 |
| Stride unknown | 0.00 |
| Stride indirect | 6.00 |
| Vectorization ratio all | 98.90 |
| Vectorization ratio load | 100.00 |
| Vectorization ratio store | 100.00 |
| Vectorization ratio mul | 100.00 |
| Vectorization ratio add_sub | 100.00 |
| Vectorization ratio fma | 100.00 |
| Vectorization ratio div_sqrt | 100.00 |
| Vectorization ratio other | 97.87 |
| Vector-efficiency ratio all | 81.46 |
| Vector-efficiency ratio load | 100.00 |
| Vector-efficiency ratio store | 100.00 |
| Vector-efficiency ratio mul | 100.00 |
| Vector-efficiency ratio add_sub | 100.00 |
| Vector-efficiency ratio fma | 100.00 |
| Vector-efficiency ratio div_sqrt | 100.00 |
| Vector-efficiency ratio other | 64.10 |
| Path / |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source file and lines | advec_cell_kernel.f90:83-248 |
| Module | exec |
| nb instructions | 112.50 |
| nb uops | 178.50 |
| loop length | 694 |
| used x86 registers | 8 |
| used mmx registers | 0 |
| used xmm registers | 11 |
| used ymm registers | 8 |
| used zmm registers | 28 |
| nb stack references | 0 |
| ADD-SUB / MUL ratio | 0.98 |
| micro-operation queue | 29.75 cycles |
| front end | 29.75 cycles |
| P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| uops | 58.50 | 10.00 | 22.33 | 22.33 | 1.00 | 58.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| cycles | 58.50 | 10.50 | 22.33 | 22.33 | 1.00 | 58.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| Cycles executing div or sqrt instructions | 40.00 |
| Longest recurrence chain latency (RecMII) | 0.00 |
| FE+BE cycles | 60.14-91.58 |
| Stall cycles | 34.72-66.12 |
| RS full (events) | 43.06-0.89 |
| PRF_FLOAT full (events) | 10.98-70.06 |
| Front-end | 29.75 |
| Dispatch | 58.50 |
| DIV/SQRT | 40.00 |
| Data deps. | 0.00 |
| Overall L1 | 58.50 |
| all | 97% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 95% |
| all | 100% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 100% |
| all | 98% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 98% |
| all | 69% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 36% |
| all | 94% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 89% |
| all | 82% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 66% |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source file and lines | advec_cell_kernel.f90:83-248 |
| Module | exec |
| nb instructions | 120 |
| nb uops | 187 |
| loop length | 739 |
| used x86 registers | 8 |
| used mmx registers | 0 |
| used xmm registers | 11 |
| used ymm registers | 8 |
| used zmm registers | 28 |
| nb stack references | 0 |
| ADD-SUB / MUL ratio | 0.80 |
| micro-operation queue | 31.17 cycles |
| front end | 31.17 cycles |
| P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| uops | 62.50 | 10.00 | 22.33 | 22.33 | 1.00 | 62.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| cycles | 62.50 | 11.00 | 22.33 | 22.33 | 1.00 | 62.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| Cycles executing div or sqrt instructions | 48.00 |
| Longest recurrence chain latency (RecMII) | 0.00 |
| FE+BE cycles | 64.85-94.25 |
| Stall cycles | 37.97-67.37 |
| RS full (events) | 33.92-1.42 |
| PRF_FLOAT full (events) | 20.59-72.13 |
| Front-end | 31.17 |
| Dispatch | 62.50 |
| DIV/SQRT | 48.00 |
| Data deps. | 0.00 |
| Overall L1 | 62.50 |
| all | 97% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 95% |
| all | 100% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 100% |
| all | 99% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 98% |
| all | 69% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 36% |
| all | 95% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 90% |
| all | 83% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 68% |
| Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| VCMPPD $0xe,0xdb275(%RIP),%ZMM23,%K1 | 2 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 1 |
| VADDPD %ZMM31,%ZMM21,%ZMM21{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM18,%ZMM21,%ZMM5 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVUPD %ZMM5,(%R10,%R14,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
| ADD $0x8,%R14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %R9,%R14 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| JA 42a940 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x1f00> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| LEA (%RCX,%R14,1),%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VMOVUPD (%RAX,%R14,8),%ZMM18 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
| VXORPD %XMM31,%XMM31,%XMM31 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VCMPPD $0x1,%ZMM18,%ZMM31,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPBLENDMD %YMM27,%YMM28,%YMM5{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPMOVSXDQ %YMM5,%ZMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM5,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXOR %XMM5,%XMM5,%XMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM21,%ZMM1,%ZMM5 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPBROADCASTQ %R12,%ZMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM2,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPSLLQ $0x3,%ZMM16,%ZMM16 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
| VPADDQ 0xdb1c4(%RIP),%ZMM16,%ZMM23 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
| VPADDQ %ZMM23,%ZMM7,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM5,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXOR %XMM5,%XMM5,%XMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM16,1),%ZMM5{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPBLENDMD %YMM29,%YMM26,%YMM16{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPBLENDMD %YMM27,%YMM26,%YMM17{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPMOVSXDQ %YMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM17,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VXORPD %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (%R11,%ZMM17,8),%ZMM22{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXORD %XMM17,%XMM17,%XMM17 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM21,%ZMM3,%ZMM17 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM23,%ZMM11,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM17,%ZMM19,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VXORPD %XMM20,%XMM20,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM17,1),%ZMM20{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPBLENDMD %YMM28,%YMM27,%YMM25{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %ZMM6,%ZMM18,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VDIVPD %ZMM5,%ZMM17,%ZMM15 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
| VMOVAPD %ZMM30,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
| VPMOVSXDQ %YMM16,%ZMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXORD %XMM24,%XMM24,%XMM24 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM16,%ZMM3,%ZMM24 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM24,%ZMM19,%ZMM24 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VXORPD %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM24,1),%ZMM8{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VFMADD213PD %ZMM30,%ZMM15,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VDIVPD %ZMM22,%ZMM17,%ZMM17 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
| VSUBPD %ZMM15,%ZMM10,%ZMM24 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VPMOVSXDQ %YMM25,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM22,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXORD %XMM25,%XMM25,%XMM25 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM22,%ZMM3,%ZMM25 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM25,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM25,%XMM25,%XMM25 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM19,1),%ZMM25{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VSUBPD %ZMM8,%ZMM20,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBPD %ZMM20,%ZMM25,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM8,%ZMM19,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPPD $0x1,%ZMM25,%ZMM31,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VFPCLASSPD $0x56,%ZMM19,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VANDPD %ZMM6,%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VANDPD %ZMM6,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VMULPD %ZMM17,%ZMM8,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD231PD %ZMM24,%ZMM19,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM13,%ZMM25,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINPD %ZMM25,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBPD %ZMM15,%ZMM9,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VXORPD %ZMM12,%ZMM15,%ZMM15{%K2} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VMINPD %ZMM19,%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVAPD %ZMM20,%ZMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
| VFMADD231PD %ZMM15,%ZMM8,%ZMM19{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM18,%ZMM19,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVUPD %ZMM18,(%R8,%R14,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM21,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM23,%ZMM14,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM21,%XMM21,%XMM21 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM21{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM16,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM16,%XMM16,%XMM16 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM16{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM22,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXOR %XMM15,%XMM15,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM15{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VSUBPD %ZMM16,%ZMM21,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBPD %ZMM21,%ZMM15,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM16,%ZMM22,%ZMM23 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPPD $0x1,%ZMM23,%ZMM31,%K0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| KORTESTB %K0,%K0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| JE 42a640 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x1c00> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| VANDPD %ZMM6,%ZMM18,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VMULPD %ZMM5,%ZMM20,%ZMM5 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VDIVPD %ZMM5,%ZMM8,%ZMM5 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
| VSUBPD %ZMM5,%ZMM9,%ZMM5 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VFPCLASSPD $0x56,%ZMM22,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VXORPD %ZMM12,%ZMM5,%ZMM5{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VANDPD %ZMM6,%ZMM16,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VANDPD %ZMM6,%ZMM22,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VMULPD %ZMM17,%ZMM8,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD213PD %ZMM16,%ZMM15,%ZMM24 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM13,%ZMM24,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINPD %ZMM16,%ZMM15,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINPD %ZMM15,%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM5,%ZMM8,%ZMM31 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| JMP 42a640 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x1c00> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |
| Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
| Source file and lines | advec_cell_kernel.f90:83-248 |
| Module | exec |
| nb instructions | 105 |
| nb uops | 170 |
| loop length | 649 |
| used x86 registers | 8 |
| used mmx registers | 0 |
| used xmm registers | 11 |
| used ymm registers | 8 |
| used zmm registers | 28 |
| nb stack references | 0 |
| ADD-SUB / MUL ratio | 1.17 |
| micro-operation queue | 28.33 cycles |
| front end | 28.33 cycles |
| P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| uops | 54.50 | 10.00 | 22.33 | 22.33 | 1.00 | 54.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| cycles | 54.50 | 10.00 | 22.33 | 22.33 | 1.00 | 54.50 | 2.00 | 1.00 | 1.00 | 1.00 | 1.00 | 22.33 |
| Cycles executing div or sqrt instructions | 32.00 |
| Longest recurrence chain latency (RecMII) | 0.00 |
| FE+BE cycles | 55.43-88.91 |
| Stall cycles | 31.46-64.87 |
| RS full (events) | 52.20-0.35 |
| PRF_FLOAT full (events) | 1.37-68.00 |
| Front-end | 28.33 |
| Dispatch | 54.50 |
| DIV/SQRT | 32.00 |
| Data deps. | 0.00 |
| Overall L1 | 54.50 |
| all | 97% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 95% |
| all | 100% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 100% |
| all | 98% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 97% |
| all | 69% |
| load | 100% |
| store | NA (no store vectorizable/vectorized instructions) |
| mul | 100% |
| add-sub | 100% |
| fma | NA (no fma vectorizable/vectorized instructions) |
| other | 36% |
| all | 93% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 88% |
| all | 81% |
| load | 100% |
| store | 100% |
| mul | 100% |
| add-sub | 100% |
| fma | 100% |
| div/sqrt | 100% |
| other | 64% |
| Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| VCMPPD $0xe,0xdb275(%RIP),%ZMM23,%K1 | 2 | 0 | 0 | 0.33 | 0.33 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 1 |
| VADDPD %ZMM31,%ZMM21,%ZMM21{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM18,%ZMM21,%ZMM5 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVUPD %ZMM5,(%R10,%R14,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
| ADD $0x8,%R14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
| CMP %R9,%R14 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
| JA 42a940 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x1f00> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
| LEA (%RCX,%R14,1),%R12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VMOVUPD (%RAX,%R14,8),%ZMM18 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
| VXORPD %XMM31,%XMM31,%XMM31 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VCMPPD $0x1,%ZMM18,%ZMM31,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPBLENDMD %YMM27,%YMM28,%YMM5{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPMOVSXDQ %YMM5,%ZMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM5,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXOR %XMM5,%XMM5,%XMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM21,%ZMM1,%ZMM5 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPBROADCASTQ %R12,%ZMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM2,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPSLLQ $0x3,%ZMM16,%ZMM16 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 1 |
| VPADDQ 0xdb1c4(%RIP),%ZMM16,%ZMM23 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
| VPADDQ %ZMM23,%ZMM7,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM5,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXOR %XMM5,%XMM5,%XMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM16,1),%ZMM5{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPBLENDMD %YMM29,%YMM26,%YMM16{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPBLENDMD %YMM27,%YMM26,%YMM17{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VPMOVSXDQ %YMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM17,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VXORPD %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (%R11,%ZMM17,8),%ZMM22{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXORD %XMM17,%XMM17,%XMM17 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM21,%ZMM3,%ZMM17 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM23,%ZMM11,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM17,%ZMM19,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VXORPD %XMM20,%XMM20,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM17,1),%ZMM20{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPBLENDMD %YMM28,%YMM27,%YMM25{%K1} | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.33 |
| VANDPD %ZMM6,%ZMM18,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VDIVPD %ZMM5,%ZMM17,%ZMM15 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
| VMOVAPD %ZMM30,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
| VPMOVSXDQ %YMM16,%ZMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXORD %XMM24,%XMM24,%XMM24 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM16,%ZMM3,%ZMM24 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM24,%ZMM19,%ZMM24 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VXORPD %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM24,1),%ZMM8{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VFMADD213PD %ZMM30,%ZMM15,%ZMM17 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VDIVPD %ZMM22,%ZMM17,%ZMM17 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
| VSUBPD %ZMM15,%ZMM10,%ZMM24 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VPMOVSXDQ %YMM25,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VPSUBQ %ZMM0,%ZMM22,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VPXORD %XMM25,%XMM25,%XMM25 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| VPMULLQ %ZMM22,%ZMM3,%ZMM25 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM25,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM25,%XMM25,%XMM25 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM19,1),%ZMM25{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VSUBPD %ZMM8,%ZMM20,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBPD %ZMM20,%ZMM25,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM8,%ZMM19,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPPD $0x1,%ZMM25,%ZMM31,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VFPCLASSPD $0x56,%ZMM19,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| VANDPD %ZMM6,%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VANDPD %ZMM6,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VMULPD %ZMM17,%ZMM8,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VFMADD231PD %ZMM24,%ZMM19,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM13,%ZMM25,%ZMM25 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMINPD %ZMM25,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VSUBPD %ZMM15,%ZMM9,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VXORPD %ZMM12,%ZMM15,%ZMM15{%K2} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
| VMINPD %ZMM19,%ZMM8,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVAPD %ZMM20,%ZMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
| VFMADD231PD %ZMM15,%ZMM8,%ZMM19{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMULPD %ZMM18,%ZMM19,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VMOVUPD %ZMM18,(%R8,%R14,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM21,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM23,%ZMM14,%ZMM15 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM21,%XMM21,%XMM21 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM21{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM16,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXORD %XMM16,%XMM16,%XMM16 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM16{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VPXOR %XMM8,%XMM8,%XMM8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| VPMULLQ %ZMM22,%ZMM4,%ZMM8 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
| VPADDQ %ZMM8,%ZMM15,%ZMM8 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
| VPXOR %XMM15,%XMM15,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
| KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
| VGATHERQPD (,%ZMM8,1),%ZMM15{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
| VSUBPD %ZMM16,%ZMM21,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VSUBPD %ZMM21,%ZMM15,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
| VMULPD %ZMM16,%ZMM22,%ZMM23 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
| VCMPPD $0x1,%ZMM23,%ZMM31,%K0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
| KORTESTB %K0,%K0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| JE 42a640 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x1c00> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
