1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64LoweredPanicBoundsCR:
219 return rewriteValueAMD64_OpAMD64LoweredPanicBoundsCR(v)
220 case OpAMD64LoweredPanicBoundsRC:
221 return rewriteValueAMD64_OpAMD64LoweredPanicBoundsRC(v)
222 case OpAMD64LoweredPanicBoundsRR:
223 return rewriteValueAMD64_OpAMD64LoweredPanicBoundsRR(v)
224 case OpAMD64MOVBELstore:
225 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
226 case OpAMD64MOVBEQstore:
227 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
228 case OpAMD64MOVBEWstore:
229 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
230 case OpAMD64MOVBQSX:
231 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
232 case OpAMD64MOVBQSXload:
233 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
234 case OpAMD64MOVBQZX:
235 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
236 case OpAMD64MOVBatomicload:
237 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
238 case OpAMD64MOVBload:
239 return rewriteValueAMD64_OpAMD64MOVBload(v)
240 case OpAMD64MOVBstore:
241 return rewriteValueAMD64_OpAMD64MOVBstore(v)
242 case OpAMD64MOVBstoreconst:
243 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
244 case OpAMD64MOVLQSX:
245 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
246 case OpAMD64MOVLQSXload:
247 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
248 case OpAMD64MOVLQZX:
249 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
250 case OpAMD64MOVLatomicload:
251 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
252 case OpAMD64MOVLf2i:
253 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
254 case OpAMD64MOVLi2f:
255 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
256 case OpAMD64MOVLload:
257 return rewriteValueAMD64_OpAMD64MOVLload(v)
258 case OpAMD64MOVLstore:
259 return rewriteValueAMD64_OpAMD64MOVLstore(v)
260 case OpAMD64MOVLstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
262 case OpAMD64MOVOload:
263 return rewriteValueAMD64_OpAMD64MOVOload(v)
264 case OpAMD64MOVOstore:
265 return rewriteValueAMD64_OpAMD64MOVOstore(v)
266 case OpAMD64MOVOstoreconst:
267 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
268 case OpAMD64MOVQatomicload:
269 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
270 case OpAMD64MOVQf2i:
271 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
272 case OpAMD64MOVQi2f:
273 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
274 case OpAMD64MOVQload:
275 return rewriteValueAMD64_OpAMD64MOVQload(v)
276 case OpAMD64MOVQstore:
277 return rewriteValueAMD64_OpAMD64MOVQstore(v)
278 case OpAMD64MOVQstoreconst:
279 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
280 case OpAMD64MOVSDload:
281 return rewriteValueAMD64_OpAMD64MOVSDload(v)
282 case OpAMD64MOVSDstore:
283 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
284 case OpAMD64MOVSSload:
285 return rewriteValueAMD64_OpAMD64MOVSSload(v)
286 case OpAMD64MOVSSstore:
287 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
288 case OpAMD64MOVWQSX:
289 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
290 case OpAMD64MOVWQSXload:
291 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
292 case OpAMD64MOVWQZX:
293 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
294 case OpAMD64MOVWload:
295 return rewriteValueAMD64_OpAMD64MOVWload(v)
296 case OpAMD64MOVWstore:
297 return rewriteValueAMD64_OpAMD64MOVWstore(v)
298 case OpAMD64MOVWstoreconst:
299 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
300 case OpAMD64MULL:
301 return rewriteValueAMD64_OpAMD64MULL(v)
302 case OpAMD64MULLconst:
303 return rewriteValueAMD64_OpAMD64MULLconst(v)
304 case OpAMD64MULQ:
305 return rewriteValueAMD64_OpAMD64MULQ(v)
306 case OpAMD64MULQconst:
307 return rewriteValueAMD64_OpAMD64MULQconst(v)
308 case OpAMD64MULSD:
309 return rewriteValueAMD64_OpAMD64MULSD(v)
310 case OpAMD64MULSDload:
311 return rewriteValueAMD64_OpAMD64MULSDload(v)
312 case OpAMD64MULSS:
313 return rewriteValueAMD64_OpAMD64MULSS(v)
314 case OpAMD64MULSSload:
315 return rewriteValueAMD64_OpAMD64MULSSload(v)
316 case OpAMD64NEGL:
317 return rewriteValueAMD64_OpAMD64NEGL(v)
318 case OpAMD64NEGQ:
319 return rewriteValueAMD64_OpAMD64NEGQ(v)
320 case OpAMD64NOTL:
321 return rewriteValueAMD64_OpAMD64NOTL(v)
322 case OpAMD64NOTQ:
323 return rewriteValueAMD64_OpAMD64NOTQ(v)
324 case OpAMD64ORL:
325 return rewriteValueAMD64_OpAMD64ORL(v)
326 case OpAMD64ORLconst:
327 return rewriteValueAMD64_OpAMD64ORLconst(v)
328 case OpAMD64ORLconstmodify:
329 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
330 case OpAMD64ORLload:
331 return rewriteValueAMD64_OpAMD64ORLload(v)
332 case OpAMD64ORLmodify:
333 return rewriteValueAMD64_OpAMD64ORLmodify(v)
334 case OpAMD64ORQ:
335 return rewriteValueAMD64_OpAMD64ORQ(v)
336 case OpAMD64ORQconst:
337 return rewriteValueAMD64_OpAMD64ORQconst(v)
338 case OpAMD64ORQconstmodify:
339 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
340 case OpAMD64ORQload:
341 return rewriteValueAMD64_OpAMD64ORQload(v)
342 case OpAMD64ORQmodify:
343 return rewriteValueAMD64_OpAMD64ORQmodify(v)
344 case OpAMD64ROLB:
345 return rewriteValueAMD64_OpAMD64ROLB(v)
346 case OpAMD64ROLBconst:
347 return rewriteValueAMD64_OpAMD64ROLBconst(v)
348 case OpAMD64ROLL:
349 return rewriteValueAMD64_OpAMD64ROLL(v)
350 case OpAMD64ROLLconst:
351 return rewriteValueAMD64_OpAMD64ROLLconst(v)
352 case OpAMD64ROLQ:
353 return rewriteValueAMD64_OpAMD64ROLQ(v)
354 case OpAMD64ROLQconst:
355 return rewriteValueAMD64_OpAMD64ROLQconst(v)
356 case OpAMD64ROLW:
357 return rewriteValueAMD64_OpAMD64ROLW(v)
358 case OpAMD64ROLWconst:
359 return rewriteValueAMD64_OpAMD64ROLWconst(v)
360 case OpAMD64RORB:
361 return rewriteValueAMD64_OpAMD64RORB(v)
362 case OpAMD64RORL:
363 return rewriteValueAMD64_OpAMD64RORL(v)
364 case OpAMD64RORQ:
365 return rewriteValueAMD64_OpAMD64RORQ(v)
366 case OpAMD64RORW:
367 return rewriteValueAMD64_OpAMD64RORW(v)
368 case OpAMD64SARB:
369 return rewriteValueAMD64_OpAMD64SARB(v)
370 case OpAMD64SARBconst:
371 return rewriteValueAMD64_OpAMD64SARBconst(v)
372 case OpAMD64SARL:
373 return rewriteValueAMD64_OpAMD64SARL(v)
374 case OpAMD64SARLconst:
375 return rewriteValueAMD64_OpAMD64SARLconst(v)
376 case OpAMD64SARQ:
377 return rewriteValueAMD64_OpAMD64SARQ(v)
378 case OpAMD64SARQconst:
379 return rewriteValueAMD64_OpAMD64SARQconst(v)
380 case OpAMD64SARW:
381 return rewriteValueAMD64_OpAMD64SARW(v)
382 case OpAMD64SARWconst:
383 return rewriteValueAMD64_OpAMD64SARWconst(v)
384 case OpAMD64SARXLload:
385 return rewriteValueAMD64_OpAMD64SARXLload(v)
386 case OpAMD64SARXQload:
387 return rewriteValueAMD64_OpAMD64SARXQload(v)
388 case OpAMD64SBBLcarrymask:
389 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
390 case OpAMD64SBBQ:
391 return rewriteValueAMD64_OpAMD64SBBQ(v)
392 case OpAMD64SBBQcarrymask:
393 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
394 case OpAMD64SBBQconst:
395 return rewriteValueAMD64_OpAMD64SBBQconst(v)
396 case OpAMD64SETA:
397 return rewriteValueAMD64_OpAMD64SETA(v)
398 case OpAMD64SETAE:
399 return rewriteValueAMD64_OpAMD64SETAE(v)
400 case OpAMD64SETAEstore:
401 return rewriteValueAMD64_OpAMD64SETAEstore(v)
402 case OpAMD64SETAstore:
403 return rewriteValueAMD64_OpAMD64SETAstore(v)
404 case OpAMD64SETB:
405 return rewriteValueAMD64_OpAMD64SETB(v)
406 case OpAMD64SETBE:
407 return rewriteValueAMD64_OpAMD64SETBE(v)
408 case OpAMD64SETBEstore:
409 return rewriteValueAMD64_OpAMD64SETBEstore(v)
410 case OpAMD64SETBstore:
411 return rewriteValueAMD64_OpAMD64SETBstore(v)
412 case OpAMD64SETEQ:
413 return rewriteValueAMD64_OpAMD64SETEQ(v)
414 case OpAMD64SETEQstore:
415 return rewriteValueAMD64_OpAMD64SETEQstore(v)
416 case OpAMD64SETG:
417 return rewriteValueAMD64_OpAMD64SETG(v)
418 case OpAMD64SETGE:
419 return rewriteValueAMD64_OpAMD64SETGE(v)
420 case OpAMD64SETGEstore:
421 return rewriteValueAMD64_OpAMD64SETGEstore(v)
422 case OpAMD64SETGstore:
423 return rewriteValueAMD64_OpAMD64SETGstore(v)
424 case OpAMD64SETL:
425 return rewriteValueAMD64_OpAMD64SETL(v)
426 case OpAMD64SETLE:
427 return rewriteValueAMD64_OpAMD64SETLE(v)
428 case OpAMD64SETLEstore:
429 return rewriteValueAMD64_OpAMD64SETLEstore(v)
430 case OpAMD64SETLstore:
431 return rewriteValueAMD64_OpAMD64SETLstore(v)
432 case OpAMD64SETNE:
433 return rewriteValueAMD64_OpAMD64SETNE(v)
434 case OpAMD64SETNEstore:
435 return rewriteValueAMD64_OpAMD64SETNEstore(v)
436 case OpAMD64SHLL:
437 return rewriteValueAMD64_OpAMD64SHLL(v)
438 case OpAMD64SHLLconst:
439 return rewriteValueAMD64_OpAMD64SHLLconst(v)
440 case OpAMD64SHLQ:
441 return rewriteValueAMD64_OpAMD64SHLQ(v)
442 case OpAMD64SHLQconst:
443 return rewriteValueAMD64_OpAMD64SHLQconst(v)
444 case OpAMD64SHLXLload:
445 return rewriteValueAMD64_OpAMD64SHLXLload(v)
446 case OpAMD64SHLXQload:
447 return rewriteValueAMD64_OpAMD64SHLXQload(v)
448 case OpAMD64SHRB:
449 return rewriteValueAMD64_OpAMD64SHRB(v)
450 case OpAMD64SHRBconst:
451 return rewriteValueAMD64_OpAMD64SHRBconst(v)
452 case OpAMD64SHRL:
453 return rewriteValueAMD64_OpAMD64SHRL(v)
454 case OpAMD64SHRLconst:
455 return rewriteValueAMD64_OpAMD64SHRLconst(v)
456 case OpAMD64SHRQ:
457 return rewriteValueAMD64_OpAMD64SHRQ(v)
458 case OpAMD64SHRQconst:
459 return rewriteValueAMD64_OpAMD64SHRQconst(v)
460 case OpAMD64SHRW:
461 return rewriteValueAMD64_OpAMD64SHRW(v)
462 case OpAMD64SHRWconst:
463 return rewriteValueAMD64_OpAMD64SHRWconst(v)
464 case OpAMD64SHRXLload:
465 return rewriteValueAMD64_OpAMD64SHRXLload(v)
466 case OpAMD64SHRXQload:
467 return rewriteValueAMD64_OpAMD64SHRXQload(v)
468 case OpAMD64SUBL:
469 return rewriteValueAMD64_OpAMD64SUBL(v)
470 case OpAMD64SUBLconst:
471 return rewriteValueAMD64_OpAMD64SUBLconst(v)
472 case OpAMD64SUBLload:
473 return rewriteValueAMD64_OpAMD64SUBLload(v)
474 case OpAMD64SUBLmodify:
475 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
476 case OpAMD64SUBQ:
477 return rewriteValueAMD64_OpAMD64SUBQ(v)
478 case OpAMD64SUBQborrow:
479 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
480 case OpAMD64SUBQconst:
481 return rewriteValueAMD64_OpAMD64SUBQconst(v)
482 case OpAMD64SUBQload:
483 return rewriteValueAMD64_OpAMD64SUBQload(v)
484 case OpAMD64SUBQmodify:
485 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
486 case OpAMD64SUBSD:
487 return rewriteValueAMD64_OpAMD64SUBSD(v)
488 case OpAMD64SUBSDload:
489 return rewriteValueAMD64_OpAMD64SUBSDload(v)
490 case OpAMD64SUBSS:
491 return rewriteValueAMD64_OpAMD64SUBSS(v)
492 case OpAMD64SUBSSload:
493 return rewriteValueAMD64_OpAMD64SUBSSload(v)
494 case OpAMD64TESTB:
495 return rewriteValueAMD64_OpAMD64TESTB(v)
496 case OpAMD64TESTBconst:
497 return rewriteValueAMD64_OpAMD64TESTBconst(v)
498 case OpAMD64TESTL:
499 return rewriteValueAMD64_OpAMD64TESTL(v)
500 case OpAMD64TESTLconst:
501 return rewriteValueAMD64_OpAMD64TESTLconst(v)
502 case OpAMD64TESTQ:
503 return rewriteValueAMD64_OpAMD64TESTQ(v)
504 case OpAMD64TESTQconst:
505 return rewriteValueAMD64_OpAMD64TESTQconst(v)
506 case OpAMD64TESTW:
507 return rewriteValueAMD64_OpAMD64TESTW(v)
508 case OpAMD64TESTWconst:
509 return rewriteValueAMD64_OpAMD64TESTWconst(v)
510 case OpAMD64XADDLlock:
511 return rewriteValueAMD64_OpAMD64XADDLlock(v)
512 case OpAMD64XADDQlock:
513 return rewriteValueAMD64_OpAMD64XADDQlock(v)
514 case OpAMD64XCHGL:
515 return rewriteValueAMD64_OpAMD64XCHGL(v)
516 case OpAMD64XCHGQ:
517 return rewriteValueAMD64_OpAMD64XCHGQ(v)
518 case OpAMD64XORL:
519 return rewriteValueAMD64_OpAMD64XORL(v)
520 case OpAMD64XORLconst:
521 return rewriteValueAMD64_OpAMD64XORLconst(v)
522 case OpAMD64XORLconstmodify:
523 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
524 case OpAMD64XORLload:
525 return rewriteValueAMD64_OpAMD64XORLload(v)
526 case OpAMD64XORLmodify:
527 return rewriteValueAMD64_OpAMD64XORLmodify(v)
528 case OpAMD64XORQ:
529 return rewriteValueAMD64_OpAMD64XORQ(v)
530 case OpAMD64XORQconst:
531 return rewriteValueAMD64_OpAMD64XORQconst(v)
532 case OpAMD64XORQconstmodify:
533 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
534 case OpAMD64XORQload:
535 return rewriteValueAMD64_OpAMD64XORQload(v)
536 case OpAMD64XORQmodify:
537 return rewriteValueAMD64_OpAMD64XORQmodify(v)
538 case OpAdd16:
539 v.Op = OpAMD64ADDL
540 return true
541 case OpAdd32:
542 v.Op = OpAMD64ADDL
543 return true
544 case OpAdd32F:
545 v.Op = OpAMD64ADDSS
546 return true
547 case OpAdd64:
548 v.Op = OpAMD64ADDQ
549 return true
550 case OpAdd64F:
551 v.Op = OpAMD64ADDSD
552 return true
553 case OpAdd8:
554 v.Op = OpAMD64ADDL
555 return true
556 case OpAddPtr:
557 v.Op = OpAMD64ADDQ
558 return true
559 case OpAddr:
560 return rewriteValueAMD64_OpAddr(v)
561 case OpAnd16:
562 v.Op = OpAMD64ANDL
563 return true
564 case OpAnd32:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAnd64:
568 v.Op = OpAMD64ANDQ
569 return true
570 case OpAnd8:
571 v.Op = OpAMD64ANDL
572 return true
573 case OpAndB:
574 v.Op = OpAMD64ANDL
575 return true
576 case OpAtomicAdd32:
577 return rewriteValueAMD64_OpAtomicAdd32(v)
578 case OpAtomicAdd64:
579 return rewriteValueAMD64_OpAtomicAdd64(v)
580 case OpAtomicAnd32:
581 return rewriteValueAMD64_OpAtomicAnd32(v)
582 case OpAtomicAnd32value:
583 return rewriteValueAMD64_OpAtomicAnd32value(v)
584 case OpAtomicAnd64value:
585 return rewriteValueAMD64_OpAtomicAnd64value(v)
586 case OpAtomicAnd8:
587 return rewriteValueAMD64_OpAtomicAnd8(v)
588 case OpAtomicCompareAndSwap32:
589 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
590 case OpAtomicCompareAndSwap64:
591 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
592 case OpAtomicExchange32:
593 return rewriteValueAMD64_OpAtomicExchange32(v)
594 case OpAtomicExchange64:
595 return rewriteValueAMD64_OpAtomicExchange64(v)
596 case OpAtomicExchange8:
597 return rewriteValueAMD64_OpAtomicExchange8(v)
598 case OpAtomicLoad32:
599 return rewriteValueAMD64_OpAtomicLoad32(v)
600 case OpAtomicLoad64:
601 return rewriteValueAMD64_OpAtomicLoad64(v)
602 case OpAtomicLoad8:
603 return rewriteValueAMD64_OpAtomicLoad8(v)
604 case OpAtomicLoadPtr:
605 return rewriteValueAMD64_OpAtomicLoadPtr(v)
606 case OpAtomicOr32:
607 return rewriteValueAMD64_OpAtomicOr32(v)
608 case OpAtomicOr32value:
609 return rewriteValueAMD64_OpAtomicOr32value(v)
610 case OpAtomicOr64value:
611 return rewriteValueAMD64_OpAtomicOr64value(v)
612 case OpAtomicOr8:
613 return rewriteValueAMD64_OpAtomicOr8(v)
614 case OpAtomicStore32:
615 return rewriteValueAMD64_OpAtomicStore32(v)
616 case OpAtomicStore64:
617 return rewriteValueAMD64_OpAtomicStore64(v)
618 case OpAtomicStore8:
619 return rewriteValueAMD64_OpAtomicStore8(v)
620 case OpAtomicStorePtrNoWB:
621 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
622 case OpAvg64u:
623 v.Op = OpAMD64AVGQU
624 return true
625 case OpBitLen16:
626 return rewriteValueAMD64_OpBitLen16(v)
627 case OpBitLen32:
628 return rewriteValueAMD64_OpBitLen32(v)
629 case OpBitLen64:
630 return rewriteValueAMD64_OpBitLen64(v)
631 case OpBitLen8:
632 return rewriteValueAMD64_OpBitLen8(v)
633 case OpBswap16:
634 return rewriteValueAMD64_OpBswap16(v)
635 case OpBswap32:
636 v.Op = OpAMD64BSWAPL
637 return true
638 case OpBswap64:
639 v.Op = OpAMD64BSWAPQ
640 return true
641 case OpCeil:
642 return rewriteValueAMD64_OpCeil(v)
643 case OpClosureCall:
644 v.Op = OpAMD64CALLclosure
645 return true
646 case OpCom16:
647 v.Op = OpAMD64NOTL
648 return true
649 case OpCom32:
650 v.Op = OpAMD64NOTL
651 return true
652 case OpCom64:
653 v.Op = OpAMD64NOTQ
654 return true
655 case OpCom8:
656 v.Op = OpAMD64NOTL
657 return true
658 case OpCondSelect:
659 return rewriteValueAMD64_OpCondSelect(v)
660 case OpConst16:
661 return rewriteValueAMD64_OpConst16(v)
662 case OpConst32:
663 v.Op = OpAMD64MOVLconst
664 return true
665 case OpConst32F:
666 v.Op = OpAMD64MOVSSconst
667 return true
668 case OpConst64:
669 v.Op = OpAMD64MOVQconst
670 return true
671 case OpConst64F:
672 v.Op = OpAMD64MOVSDconst
673 return true
674 case OpConst8:
675 return rewriteValueAMD64_OpConst8(v)
676 case OpConstBool:
677 return rewriteValueAMD64_OpConstBool(v)
678 case OpConstNil:
679 return rewriteValueAMD64_OpConstNil(v)
680 case OpCtz16:
681 return rewriteValueAMD64_OpCtz16(v)
682 case OpCtz16NonZero:
683 return rewriteValueAMD64_OpCtz16NonZero(v)
684 case OpCtz32:
685 return rewriteValueAMD64_OpCtz32(v)
686 case OpCtz32NonZero:
687 return rewriteValueAMD64_OpCtz32NonZero(v)
688 case OpCtz64:
689 return rewriteValueAMD64_OpCtz64(v)
690 case OpCtz64NonZero:
691 return rewriteValueAMD64_OpCtz64NonZero(v)
692 case OpCtz8:
693 return rewriteValueAMD64_OpCtz8(v)
694 case OpCtz8NonZero:
695 return rewriteValueAMD64_OpCtz8NonZero(v)
696 case OpCvt32Fto32:
697 v.Op = OpAMD64CVTTSS2SL
698 return true
699 case OpCvt32Fto64:
700 v.Op = OpAMD64CVTTSS2SQ
701 return true
702 case OpCvt32Fto64F:
703 v.Op = OpAMD64CVTSS2SD
704 return true
705 case OpCvt32to32F:
706 v.Op = OpAMD64CVTSL2SS
707 return true
708 case OpCvt32to64F:
709 v.Op = OpAMD64CVTSL2SD
710 return true
711 case OpCvt64Fto32:
712 v.Op = OpAMD64CVTTSD2SL
713 return true
714 case OpCvt64Fto32F:
715 v.Op = OpAMD64CVTSD2SS
716 return true
717 case OpCvt64Fto64:
718 v.Op = OpAMD64CVTTSD2SQ
719 return true
720 case OpCvt64to32F:
721 v.Op = OpAMD64CVTSQ2SS
722 return true
723 case OpCvt64to64F:
724 v.Op = OpAMD64CVTSQ2SD
725 return true
726 case OpCvtBoolToUint8:
727 v.Op = OpCopy
728 return true
729 case OpDiv128u:
730 v.Op = OpAMD64DIVQU2
731 return true
732 case OpDiv16:
733 return rewriteValueAMD64_OpDiv16(v)
734 case OpDiv16u:
735 return rewriteValueAMD64_OpDiv16u(v)
736 case OpDiv32:
737 return rewriteValueAMD64_OpDiv32(v)
738 case OpDiv32F:
739 v.Op = OpAMD64DIVSS
740 return true
741 case OpDiv32u:
742 return rewriteValueAMD64_OpDiv32u(v)
743 case OpDiv64:
744 return rewriteValueAMD64_OpDiv64(v)
745 case OpDiv64F:
746 v.Op = OpAMD64DIVSD
747 return true
748 case OpDiv64u:
749 return rewriteValueAMD64_OpDiv64u(v)
750 case OpDiv8:
751 return rewriteValueAMD64_OpDiv8(v)
752 case OpDiv8u:
753 return rewriteValueAMD64_OpDiv8u(v)
754 case OpEq16:
755 return rewriteValueAMD64_OpEq16(v)
756 case OpEq32:
757 return rewriteValueAMD64_OpEq32(v)
758 case OpEq32F:
759 return rewriteValueAMD64_OpEq32F(v)
760 case OpEq64:
761 return rewriteValueAMD64_OpEq64(v)
762 case OpEq64F:
763 return rewriteValueAMD64_OpEq64F(v)
764 case OpEq8:
765 return rewriteValueAMD64_OpEq8(v)
766 case OpEqB:
767 return rewriteValueAMD64_OpEqB(v)
768 case OpEqPtr:
769 return rewriteValueAMD64_OpEqPtr(v)
770 case OpFMA:
771 return rewriteValueAMD64_OpFMA(v)
772 case OpFloor:
773 return rewriteValueAMD64_OpFloor(v)
774 case OpGetCallerPC:
775 v.Op = OpAMD64LoweredGetCallerPC
776 return true
777 case OpGetCallerSP:
778 v.Op = OpAMD64LoweredGetCallerSP
779 return true
780 case OpGetClosurePtr:
781 v.Op = OpAMD64LoweredGetClosurePtr
782 return true
783 case OpGetG:
784 return rewriteValueAMD64_OpGetG(v)
785 case OpHasCPUFeature:
786 return rewriteValueAMD64_OpHasCPUFeature(v)
787 case OpHmul32:
788 v.Op = OpAMD64HMULL
789 return true
790 case OpHmul32u:
791 v.Op = OpAMD64HMULLU
792 return true
793 case OpHmul64:
794 v.Op = OpAMD64HMULQ
795 return true
796 case OpHmul64u:
797 v.Op = OpAMD64HMULQU
798 return true
799 case OpInterCall:
800 v.Op = OpAMD64CALLinter
801 return true
802 case OpIsInBounds:
803 return rewriteValueAMD64_OpIsInBounds(v)
804 case OpIsNonNil:
805 return rewriteValueAMD64_OpIsNonNil(v)
806 case OpIsSliceInBounds:
807 return rewriteValueAMD64_OpIsSliceInBounds(v)
808 case OpLeq16:
809 return rewriteValueAMD64_OpLeq16(v)
810 case OpLeq16U:
811 return rewriteValueAMD64_OpLeq16U(v)
812 case OpLeq32:
813 return rewriteValueAMD64_OpLeq32(v)
814 case OpLeq32F:
815 return rewriteValueAMD64_OpLeq32F(v)
816 case OpLeq32U:
817 return rewriteValueAMD64_OpLeq32U(v)
818 case OpLeq64:
819 return rewriteValueAMD64_OpLeq64(v)
820 case OpLeq64F:
821 return rewriteValueAMD64_OpLeq64F(v)
822 case OpLeq64U:
823 return rewriteValueAMD64_OpLeq64U(v)
824 case OpLeq8:
825 return rewriteValueAMD64_OpLeq8(v)
826 case OpLeq8U:
827 return rewriteValueAMD64_OpLeq8U(v)
828 case OpLess16:
829 return rewriteValueAMD64_OpLess16(v)
830 case OpLess16U:
831 return rewriteValueAMD64_OpLess16U(v)
832 case OpLess32:
833 return rewriteValueAMD64_OpLess32(v)
834 case OpLess32F:
835 return rewriteValueAMD64_OpLess32F(v)
836 case OpLess32U:
837 return rewriteValueAMD64_OpLess32U(v)
838 case OpLess64:
839 return rewriteValueAMD64_OpLess64(v)
840 case OpLess64F:
841 return rewriteValueAMD64_OpLess64F(v)
842 case OpLess64U:
843 return rewriteValueAMD64_OpLess64U(v)
844 case OpLess8:
845 return rewriteValueAMD64_OpLess8(v)
846 case OpLess8U:
847 return rewriteValueAMD64_OpLess8U(v)
848 case OpLoad:
849 return rewriteValueAMD64_OpLoad(v)
850 case OpLocalAddr:
851 return rewriteValueAMD64_OpLocalAddr(v)
852 case OpLsh16x16:
853 return rewriteValueAMD64_OpLsh16x16(v)
854 case OpLsh16x32:
855 return rewriteValueAMD64_OpLsh16x32(v)
856 case OpLsh16x64:
857 return rewriteValueAMD64_OpLsh16x64(v)
858 case OpLsh16x8:
859 return rewriteValueAMD64_OpLsh16x8(v)
860 case OpLsh32x16:
861 return rewriteValueAMD64_OpLsh32x16(v)
862 case OpLsh32x32:
863 return rewriteValueAMD64_OpLsh32x32(v)
864 case OpLsh32x64:
865 return rewriteValueAMD64_OpLsh32x64(v)
866 case OpLsh32x8:
867 return rewriteValueAMD64_OpLsh32x8(v)
868 case OpLsh64x16:
869 return rewriteValueAMD64_OpLsh64x16(v)
870 case OpLsh64x32:
871 return rewriteValueAMD64_OpLsh64x32(v)
872 case OpLsh64x64:
873 return rewriteValueAMD64_OpLsh64x64(v)
874 case OpLsh64x8:
875 return rewriteValueAMD64_OpLsh64x8(v)
876 case OpLsh8x16:
877 return rewriteValueAMD64_OpLsh8x16(v)
878 case OpLsh8x32:
879 return rewriteValueAMD64_OpLsh8x32(v)
880 case OpLsh8x64:
881 return rewriteValueAMD64_OpLsh8x64(v)
882 case OpLsh8x8:
883 return rewriteValueAMD64_OpLsh8x8(v)
884 case OpMax32F:
885 return rewriteValueAMD64_OpMax32F(v)
886 case OpMax64F:
887 return rewriteValueAMD64_OpMax64F(v)
888 case OpMin32F:
889 return rewriteValueAMD64_OpMin32F(v)
890 case OpMin64F:
891 return rewriteValueAMD64_OpMin64F(v)
892 case OpMod16:
893 return rewriteValueAMD64_OpMod16(v)
894 case OpMod16u:
895 return rewriteValueAMD64_OpMod16u(v)
896 case OpMod32:
897 return rewriteValueAMD64_OpMod32(v)
898 case OpMod32u:
899 return rewriteValueAMD64_OpMod32u(v)
900 case OpMod64:
901 return rewriteValueAMD64_OpMod64(v)
902 case OpMod64u:
903 return rewriteValueAMD64_OpMod64u(v)
904 case OpMod8:
905 return rewriteValueAMD64_OpMod8(v)
906 case OpMod8u:
907 return rewriteValueAMD64_OpMod8u(v)
908 case OpMove:
909 return rewriteValueAMD64_OpMove(v)
910 case OpMul16:
911 v.Op = OpAMD64MULL
912 return true
913 case OpMul32:
914 v.Op = OpAMD64MULL
915 return true
916 case OpMul32F:
917 v.Op = OpAMD64MULSS
918 return true
919 case OpMul64:
920 v.Op = OpAMD64MULQ
921 return true
922 case OpMul64F:
923 v.Op = OpAMD64MULSD
924 return true
925 case OpMul64uhilo:
926 v.Op = OpAMD64MULQU2
927 return true
928 case OpMul8:
929 v.Op = OpAMD64MULL
930 return true
931 case OpNeg16:
932 v.Op = OpAMD64NEGL
933 return true
934 case OpNeg32:
935 v.Op = OpAMD64NEGL
936 return true
937 case OpNeg32F:
938 return rewriteValueAMD64_OpNeg32F(v)
939 case OpNeg64:
940 v.Op = OpAMD64NEGQ
941 return true
942 case OpNeg64F:
943 return rewriteValueAMD64_OpNeg64F(v)
944 case OpNeg8:
945 v.Op = OpAMD64NEGL
946 return true
947 case OpNeq16:
948 return rewriteValueAMD64_OpNeq16(v)
949 case OpNeq32:
950 return rewriteValueAMD64_OpNeq32(v)
951 case OpNeq32F:
952 return rewriteValueAMD64_OpNeq32F(v)
953 case OpNeq64:
954 return rewriteValueAMD64_OpNeq64(v)
955 case OpNeq64F:
956 return rewriteValueAMD64_OpNeq64F(v)
957 case OpNeq8:
958 return rewriteValueAMD64_OpNeq8(v)
959 case OpNeqB:
960 return rewriteValueAMD64_OpNeqB(v)
961 case OpNeqPtr:
962 return rewriteValueAMD64_OpNeqPtr(v)
963 case OpNilCheck:
964 v.Op = OpAMD64LoweredNilCheck
965 return true
966 case OpNot:
967 return rewriteValueAMD64_OpNot(v)
968 case OpOffPtr:
969 return rewriteValueAMD64_OpOffPtr(v)
970 case OpOr16:
971 v.Op = OpAMD64ORL
972 return true
973 case OpOr32:
974 v.Op = OpAMD64ORL
975 return true
976 case OpOr64:
977 v.Op = OpAMD64ORQ
978 return true
979 case OpOr8:
980 v.Op = OpAMD64ORL
981 return true
982 case OpOrB:
983 v.Op = OpAMD64ORL
984 return true
985 case OpPanicBounds:
986 v.Op = OpAMD64LoweredPanicBoundsRR
987 return true
988 case OpPopCount16:
989 return rewriteValueAMD64_OpPopCount16(v)
990 case OpPopCount32:
991 v.Op = OpAMD64POPCNTL
992 return true
993 case OpPopCount64:
994 v.Op = OpAMD64POPCNTQ
995 return true
996 case OpPopCount8:
997 return rewriteValueAMD64_OpPopCount8(v)
998 case OpPrefetchCache:
999 v.Op = OpAMD64PrefetchT0
1000 return true
1001 case OpPrefetchCacheStreamed:
1002 v.Op = OpAMD64PrefetchNTA
1003 return true
1004 case OpRotateLeft16:
1005 v.Op = OpAMD64ROLW
1006 return true
1007 case OpRotateLeft32:
1008 v.Op = OpAMD64ROLL
1009 return true
1010 case OpRotateLeft64:
1011 v.Op = OpAMD64ROLQ
1012 return true
1013 case OpRotateLeft8:
1014 v.Op = OpAMD64ROLB
1015 return true
1016 case OpRound32F:
1017 v.Op = OpAMD64LoweredRound32F
1018 return true
1019 case OpRound64F:
1020 v.Op = OpAMD64LoweredRound64F
1021 return true
1022 case OpRoundToEven:
1023 return rewriteValueAMD64_OpRoundToEven(v)
1024 case OpRsh16Ux16:
1025 return rewriteValueAMD64_OpRsh16Ux16(v)
1026 case OpRsh16Ux32:
1027 return rewriteValueAMD64_OpRsh16Ux32(v)
1028 case OpRsh16Ux64:
1029 return rewriteValueAMD64_OpRsh16Ux64(v)
1030 case OpRsh16Ux8:
1031 return rewriteValueAMD64_OpRsh16Ux8(v)
1032 case OpRsh16x16:
1033 return rewriteValueAMD64_OpRsh16x16(v)
1034 case OpRsh16x32:
1035 return rewriteValueAMD64_OpRsh16x32(v)
1036 case OpRsh16x64:
1037 return rewriteValueAMD64_OpRsh16x64(v)
1038 case OpRsh16x8:
1039 return rewriteValueAMD64_OpRsh16x8(v)
1040 case OpRsh32Ux16:
1041 return rewriteValueAMD64_OpRsh32Ux16(v)
1042 case OpRsh32Ux32:
1043 return rewriteValueAMD64_OpRsh32Ux32(v)
1044 case OpRsh32Ux64:
1045 return rewriteValueAMD64_OpRsh32Ux64(v)
1046 case OpRsh32Ux8:
1047 return rewriteValueAMD64_OpRsh32Ux8(v)
1048 case OpRsh32x16:
1049 return rewriteValueAMD64_OpRsh32x16(v)
1050 case OpRsh32x32:
1051 return rewriteValueAMD64_OpRsh32x32(v)
1052 case OpRsh32x64:
1053 return rewriteValueAMD64_OpRsh32x64(v)
1054 case OpRsh32x8:
1055 return rewriteValueAMD64_OpRsh32x8(v)
1056 case OpRsh64Ux16:
1057 return rewriteValueAMD64_OpRsh64Ux16(v)
1058 case OpRsh64Ux32:
1059 return rewriteValueAMD64_OpRsh64Ux32(v)
1060 case OpRsh64Ux64:
1061 return rewriteValueAMD64_OpRsh64Ux64(v)
1062 case OpRsh64Ux8:
1063 return rewriteValueAMD64_OpRsh64Ux8(v)
1064 case OpRsh64x16:
1065 return rewriteValueAMD64_OpRsh64x16(v)
1066 case OpRsh64x32:
1067 return rewriteValueAMD64_OpRsh64x32(v)
1068 case OpRsh64x64:
1069 return rewriteValueAMD64_OpRsh64x64(v)
1070 case OpRsh64x8:
1071 return rewriteValueAMD64_OpRsh64x8(v)
1072 case OpRsh8Ux16:
1073 return rewriteValueAMD64_OpRsh8Ux16(v)
1074 case OpRsh8Ux32:
1075 return rewriteValueAMD64_OpRsh8Ux32(v)
1076 case OpRsh8Ux64:
1077 return rewriteValueAMD64_OpRsh8Ux64(v)
1078 case OpRsh8Ux8:
1079 return rewriteValueAMD64_OpRsh8Ux8(v)
1080 case OpRsh8x16:
1081 return rewriteValueAMD64_OpRsh8x16(v)
1082 case OpRsh8x32:
1083 return rewriteValueAMD64_OpRsh8x32(v)
1084 case OpRsh8x64:
1085 return rewriteValueAMD64_OpRsh8x64(v)
1086 case OpRsh8x8:
1087 return rewriteValueAMD64_OpRsh8x8(v)
1088 case OpSelect0:
1089 return rewriteValueAMD64_OpSelect0(v)
1090 case OpSelect1:
1091 return rewriteValueAMD64_OpSelect1(v)
1092 case OpSelectN:
1093 return rewriteValueAMD64_OpSelectN(v)
1094 case OpSignExt16to32:
1095 v.Op = OpAMD64MOVWQSX
1096 return true
1097 case OpSignExt16to64:
1098 v.Op = OpAMD64MOVWQSX
1099 return true
1100 case OpSignExt32to64:
1101 v.Op = OpAMD64MOVLQSX
1102 return true
1103 case OpSignExt8to16:
1104 v.Op = OpAMD64MOVBQSX
1105 return true
1106 case OpSignExt8to32:
1107 v.Op = OpAMD64MOVBQSX
1108 return true
1109 case OpSignExt8to64:
1110 v.Op = OpAMD64MOVBQSX
1111 return true
1112 case OpSlicemask:
1113 return rewriteValueAMD64_OpSlicemask(v)
1114 case OpSpectreIndex:
1115 return rewriteValueAMD64_OpSpectreIndex(v)
1116 case OpSpectreSliceIndex:
1117 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1118 case OpSqrt:
1119 v.Op = OpAMD64SQRTSD
1120 return true
1121 case OpSqrt32:
1122 v.Op = OpAMD64SQRTSS
1123 return true
1124 case OpStaticCall:
1125 v.Op = OpAMD64CALLstatic
1126 return true
1127 case OpStore:
1128 return rewriteValueAMD64_OpStore(v)
1129 case OpSub16:
1130 v.Op = OpAMD64SUBL
1131 return true
1132 case OpSub32:
1133 v.Op = OpAMD64SUBL
1134 return true
1135 case OpSub32F:
1136 v.Op = OpAMD64SUBSS
1137 return true
1138 case OpSub64:
1139 v.Op = OpAMD64SUBQ
1140 return true
1141 case OpSub64F:
1142 v.Op = OpAMD64SUBSD
1143 return true
1144 case OpSub8:
1145 v.Op = OpAMD64SUBL
1146 return true
1147 case OpSubPtr:
1148 v.Op = OpAMD64SUBQ
1149 return true
1150 case OpTailCall:
1151 v.Op = OpAMD64CALLtail
1152 return true
1153 case OpTrunc:
1154 return rewriteValueAMD64_OpTrunc(v)
1155 case OpTrunc16to8:
1156 v.Op = OpCopy
1157 return true
1158 case OpTrunc32to16:
1159 v.Op = OpCopy
1160 return true
1161 case OpTrunc32to8:
1162 v.Op = OpCopy
1163 return true
1164 case OpTrunc64to16:
1165 v.Op = OpCopy
1166 return true
1167 case OpTrunc64to32:
1168 v.Op = OpCopy
1169 return true
1170 case OpTrunc64to8:
1171 v.Op = OpCopy
1172 return true
1173 case OpWB:
1174 v.Op = OpAMD64LoweredWB
1175 return true
1176 case OpXor16:
1177 v.Op = OpAMD64XORL
1178 return true
1179 case OpXor32:
1180 v.Op = OpAMD64XORL
1181 return true
1182 case OpXor64:
1183 v.Op = OpAMD64XORQ
1184 return true
1185 case OpXor8:
1186 v.Op = OpAMD64XORL
1187 return true
1188 case OpZero:
1189 return rewriteValueAMD64_OpZero(v)
1190 case OpZeroExt16to32:
1191 v.Op = OpAMD64MOVWQZX
1192 return true
1193 case OpZeroExt16to64:
1194 v.Op = OpAMD64MOVWQZX
1195 return true
1196 case OpZeroExt32to64:
1197 v.Op = OpAMD64MOVLQZX
1198 return true
1199 case OpZeroExt8to16:
1200 v.Op = OpAMD64MOVBQZX
1201 return true
1202 case OpZeroExt8to32:
1203 v.Op = OpAMD64MOVBQZX
1204 return true
1205 case OpZeroExt8to64:
1206 v.Op = OpAMD64MOVBQZX
1207 return true
1208 }
1209 return false
1210 }
1211 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1212 v_2 := v.Args[2]
1213 v_1 := v.Args[1]
1214 v_0 := v.Args[0]
1215
1216
1217
1218 for {
1219 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1220 x := v_0
1221 if v_1.Op != OpAMD64MOVQconst {
1222 continue
1223 }
1224 c := auxIntToInt64(v_1.AuxInt)
1225 carry := v_2
1226 if !(is32Bit(c)) {
1227 continue
1228 }
1229 v.reset(OpAMD64ADCQconst)
1230 v.AuxInt = int32ToAuxInt(int32(c))
1231 v.AddArg2(x, carry)
1232 return true
1233 }
1234 break
1235 }
1236
1237
1238 for {
1239 x := v_0
1240 y := v_1
1241 if v_2.Op != OpAMD64FlagEQ {
1242 break
1243 }
1244 v.reset(OpAMD64ADDQcarry)
1245 v.AddArg2(x, y)
1246 return true
1247 }
1248 return false
1249 }
1250 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1251 v_1 := v.Args[1]
1252 v_0 := v.Args[0]
1253
1254
1255 for {
1256 c := auxIntToInt32(v.AuxInt)
1257 x := v_0
1258 if v_1.Op != OpAMD64FlagEQ {
1259 break
1260 }
1261 v.reset(OpAMD64ADDQconstcarry)
1262 v.AuxInt = int32ToAuxInt(c)
1263 v.AddArg(x)
1264 return true
1265 }
1266 return false
1267 }
1268 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1269 v_1 := v.Args[1]
1270 v_0 := v.Args[0]
1271
1272
1273 for {
1274 if v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1275 break
1276 }
1277 x := v_0.Args[0]
1278 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1279 break
1280 }
1281 v.reset(OpAMD64ANDLconst)
1282 v.AuxInt = int32ToAuxInt(-2)
1283 v.AddArg(x)
1284 return true
1285 }
1286
1287
1288 for {
1289 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1290 x := v_0
1291 if v_1.Op != OpAMD64MOVLconst {
1292 continue
1293 }
1294 c := auxIntToInt32(v_1.AuxInt)
1295 v.reset(OpAMD64ADDLconst)
1296 v.AuxInt = int32ToAuxInt(c)
1297 v.AddArg(x)
1298 return true
1299 }
1300 break
1301 }
1302
1303
1304 for {
1305 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1306 x := v_0
1307 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1308 continue
1309 }
1310 y := v_1.Args[0]
1311 v.reset(OpAMD64LEAL8)
1312 v.AddArg2(x, y)
1313 return true
1314 }
1315 break
1316 }
1317
1318
1319 for {
1320 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1321 x := v_0
1322 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1323 continue
1324 }
1325 y := v_1.Args[0]
1326 v.reset(OpAMD64LEAL4)
1327 v.AddArg2(x, y)
1328 return true
1329 }
1330 break
1331 }
1332
1333
1334 for {
1335 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1336 x := v_0
1337 if v_1.Op != OpAMD64ADDL {
1338 continue
1339 }
1340 y := v_1.Args[1]
1341 if y != v_1.Args[0] {
1342 continue
1343 }
1344 v.reset(OpAMD64LEAL2)
1345 v.AddArg2(x, y)
1346 return true
1347 }
1348 break
1349 }
1350
1351
1352 for {
1353 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1354 x := v_0
1355 if v_1.Op != OpAMD64ADDL {
1356 continue
1357 }
1358 _ = v_1.Args[1]
1359 v_1_0 := v_1.Args[0]
1360 v_1_1 := v_1.Args[1]
1361 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1362 if x != v_1_0 {
1363 continue
1364 }
1365 y := v_1_1
1366 v.reset(OpAMD64LEAL2)
1367 v.AddArg2(y, x)
1368 return true
1369 }
1370 }
1371 break
1372 }
1373
1374
1375 for {
1376 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1377 if v_0.Op != OpAMD64ADDLconst {
1378 continue
1379 }
1380 c := auxIntToInt32(v_0.AuxInt)
1381 x := v_0.Args[0]
1382 y := v_1
1383 v.reset(OpAMD64LEAL1)
1384 v.AuxInt = int32ToAuxInt(c)
1385 v.AddArg2(x, y)
1386 return true
1387 }
1388 break
1389 }
1390
1391
1392
1393 for {
1394 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1395 x := v_0
1396 if v_1.Op != OpAMD64LEAL {
1397 continue
1398 }
1399 c := auxIntToInt32(v_1.AuxInt)
1400 s := auxToSym(v_1.Aux)
1401 y := v_1.Args[0]
1402 if !(x.Op != OpSB && y.Op != OpSB) {
1403 continue
1404 }
1405 v.reset(OpAMD64LEAL1)
1406 v.AuxInt = int32ToAuxInt(c)
1407 v.Aux = symToAux(s)
1408 v.AddArg2(x, y)
1409 return true
1410 }
1411 break
1412 }
1413
1414
1415 for {
1416 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1417 x := v_0
1418 if v_1.Op != OpAMD64NEGL {
1419 continue
1420 }
1421 y := v_1.Args[0]
1422 v.reset(OpAMD64SUBL)
1423 v.AddArg2(x, y)
1424 return true
1425 }
1426 break
1427 }
1428
1429
1430
1431 for {
1432 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1433 x := v_0
1434 l := v_1
1435 if l.Op != OpAMD64MOVLload {
1436 continue
1437 }
1438 off := auxIntToInt32(l.AuxInt)
1439 sym := auxToSym(l.Aux)
1440 mem := l.Args[1]
1441 ptr := l.Args[0]
1442 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1443 continue
1444 }
1445 v.reset(OpAMD64ADDLload)
1446 v.AuxInt = int32ToAuxInt(off)
1447 v.Aux = symToAux(sym)
1448 v.AddArg3(x, ptr, mem)
1449 return true
1450 }
1451 break
1452 }
1453 return false
1454 }
1455 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1456 v_0 := v.Args[0]
1457
1458
1459 for {
1460 c := auxIntToInt32(v.AuxInt)
1461 if v_0.Op != OpAMD64ADDL {
1462 break
1463 }
1464 y := v_0.Args[1]
1465 x := v_0.Args[0]
1466 v.reset(OpAMD64LEAL1)
1467 v.AuxInt = int32ToAuxInt(c)
1468 v.AddArg2(x, y)
1469 return true
1470 }
1471
1472
1473 for {
1474 c := auxIntToInt32(v.AuxInt)
1475 if v_0.Op != OpAMD64ADDL {
1476 break
1477 }
1478 x := v_0.Args[1]
1479 if x != v_0.Args[0] {
1480 break
1481 }
1482 v.reset(OpAMD64LEAL1)
1483 v.AuxInt = int32ToAuxInt(c)
1484 v.AddArg2(x, x)
1485 return true
1486 }
1487
1488
1489
1490 for {
1491 c := auxIntToInt32(v.AuxInt)
1492 if v_0.Op != OpAMD64LEAL {
1493 break
1494 }
1495 d := auxIntToInt32(v_0.AuxInt)
1496 s := auxToSym(v_0.Aux)
1497 x := v_0.Args[0]
1498 if !(is32Bit(int64(c) + int64(d))) {
1499 break
1500 }
1501 v.reset(OpAMD64LEAL)
1502 v.AuxInt = int32ToAuxInt(c + d)
1503 v.Aux = symToAux(s)
1504 v.AddArg(x)
1505 return true
1506 }
1507
1508
1509
1510 for {
1511 c := auxIntToInt32(v.AuxInt)
1512 if v_0.Op != OpAMD64LEAL1 {
1513 break
1514 }
1515 d := auxIntToInt32(v_0.AuxInt)
1516 s := auxToSym(v_0.Aux)
1517 y := v_0.Args[1]
1518 x := v_0.Args[0]
1519 if !(is32Bit(int64(c) + int64(d))) {
1520 break
1521 }
1522 v.reset(OpAMD64LEAL1)
1523 v.AuxInt = int32ToAuxInt(c + d)
1524 v.Aux = symToAux(s)
1525 v.AddArg2(x, y)
1526 return true
1527 }
1528
1529
1530
1531 for {
1532 c := auxIntToInt32(v.AuxInt)
1533 if v_0.Op != OpAMD64LEAL2 {
1534 break
1535 }
1536 d := auxIntToInt32(v_0.AuxInt)
1537 s := auxToSym(v_0.Aux)
1538 y := v_0.Args[1]
1539 x := v_0.Args[0]
1540 if !(is32Bit(int64(c) + int64(d))) {
1541 break
1542 }
1543 v.reset(OpAMD64LEAL2)
1544 v.AuxInt = int32ToAuxInt(c + d)
1545 v.Aux = symToAux(s)
1546 v.AddArg2(x, y)
1547 return true
1548 }
1549
1550
1551
1552 for {
1553 c := auxIntToInt32(v.AuxInt)
1554 if v_0.Op != OpAMD64LEAL4 {
1555 break
1556 }
1557 d := auxIntToInt32(v_0.AuxInt)
1558 s := auxToSym(v_0.Aux)
1559 y := v_0.Args[1]
1560 x := v_0.Args[0]
1561 if !(is32Bit(int64(c) + int64(d))) {
1562 break
1563 }
1564 v.reset(OpAMD64LEAL4)
1565 v.AuxInt = int32ToAuxInt(c + d)
1566 v.Aux = symToAux(s)
1567 v.AddArg2(x, y)
1568 return true
1569 }
1570
1571
1572
1573 for {
1574 c := auxIntToInt32(v.AuxInt)
1575 if v_0.Op != OpAMD64LEAL8 {
1576 break
1577 }
1578 d := auxIntToInt32(v_0.AuxInt)
1579 s := auxToSym(v_0.Aux)
1580 y := v_0.Args[1]
1581 x := v_0.Args[0]
1582 if !(is32Bit(int64(c) + int64(d))) {
1583 break
1584 }
1585 v.reset(OpAMD64LEAL8)
1586 v.AuxInt = int32ToAuxInt(c + d)
1587 v.Aux = symToAux(s)
1588 v.AddArg2(x, y)
1589 return true
1590 }
1591
1592
1593
1594 for {
1595 c := auxIntToInt32(v.AuxInt)
1596 x := v_0
1597 if !(c == 0) {
1598 break
1599 }
1600 v.copyOf(x)
1601 return true
1602 }
1603
1604
1605 for {
1606 c := auxIntToInt32(v.AuxInt)
1607 if v_0.Op != OpAMD64MOVLconst {
1608 break
1609 }
1610 d := auxIntToInt32(v_0.AuxInt)
1611 v.reset(OpAMD64MOVLconst)
1612 v.AuxInt = int32ToAuxInt(c + d)
1613 return true
1614 }
1615
1616
1617 for {
1618 c := auxIntToInt32(v.AuxInt)
1619 if v_0.Op != OpAMD64ADDLconst {
1620 break
1621 }
1622 d := auxIntToInt32(v_0.AuxInt)
1623 x := v_0.Args[0]
1624 v.reset(OpAMD64ADDLconst)
1625 v.AuxInt = int32ToAuxInt(c + d)
1626 v.AddArg(x)
1627 return true
1628 }
1629
1630
1631 for {
1632 off := auxIntToInt32(v.AuxInt)
1633 x := v_0
1634 if x.Op != OpSP {
1635 break
1636 }
1637 v.reset(OpAMD64LEAL)
1638 v.AuxInt = int32ToAuxInt(off)
1639 v.AddArg(x)
1640 return true
1641 }
1642 return false
1643 }
1644 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1645 v_1 := v.Args[1]
1646 v_0 := v.Args[0]
1647
1648
1649
1650 for {
1651 valoff1 := auxIntToValAndOff(v.AuxInt)
1652 sym := auxToSym(v.Aux)
1653 if v_0.Op != OpAMD64ADDQconst {
1654 break
1655 }
1656 off2 := auxIntToInt32(v_0.AuxInt)
1657 base := v_0.Args[0]
1658 mem := v_1
1659 if !(ValAndOff(valoff1).canAdd32(off2)) {
1660 break
1661 }
1662 v.reset(OpAMD64ADDLconstmodify)
1663 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1664 v.Aux = symToAux(sym)
1665 v.AddArg2(base, mem)
1666 return true
1667 }
1668
1669
1670
1671 for {
1672 valoff1 := auxIntToValAndOff(v.AuxInt)
1673 sym1 := auxToSym(v.Aux)
1674 if v_0.Op != OpAMD64LEAQ {
1675 break
1676 }
1677 off2 := auxIntToInt32(v_0.AuxInt)
1678 sym2 := auxToSym(v_0.Aux)
1679 base := v_0.Args[0]
1680 mem := v_1
1681 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1682 break
1683 }
1684 v.reset(OpAMD64ADDLconstmodify)
1685 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1686 v.Aux = symToAux(mergeSym(sym1, sym2))
1687 v.AddArg2(base, mem)
1688 return true
1689 }
1690 return false
1691 }
1692 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1693 v_2 := v.Args[2]
1694 v_1 := v.Args[1]
1695 v_0 := v.Args[0]
1696 b := v.Block
1697 typ := &b.Func.Config.Types
1698
1699
1700
1701 for {
1702 off1 := auxIntToInt32(v.AuxInt)
1703 sym := auxToSym(v.Aux)
1704 val := v_0
1705 if v_1.Op != OpAMD64ADDQconst {
1706 break
1707 }
1708 off2 := auxIntToInt32(v_1.AuxInt)
1709 base := v_1.Args[0]
1710 mem := v_2
1711 if !(is32Bit(int64(off1) + int64(off2))) {
1712 break
1713 }
1714 v.reset(OpAMD64ADDLload)
1715 v.AuxInt = int32ToAuxInt(off1 + off2)
1716 v.Aux = symToAux(sym)
1717 v.AddArg3(val, base, mem)
1718 return true
1719 }
1720
1721
1722
1723 for {
1724 off1 := auxIntToInt32(v.AuxInt)
1725 sym1 := auxToSym(v.Aux)
1726 val := v_0
1727 if v_1.Op != OpAMD64LEAQ {
1728 break
1729 }
1730 off2 := auxIntToInt32(v_1.AuxInt)
1731 sym2 := auxToSym(v_1.Aux)
1732 base := v_1.Args[0]
1733 mem := v_2
1734 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1735 break
1736 }
1737 v.reset(OpAMD64ADDLload)
1738 v.AuxInt = int32ToAuxInt(off1 + off2)
1739 v.Aux = symToAux(mergeSym(sym1, sym2))
1740 v.AddArg3(val, base, mem)
1741 return true
1742 }
1743
1744
1745 for {
1746 off := auxIntToInt32(v.AuxInt)
1747 sym := auxToSym(v.Aux)
1748 x := v_0
1749 ptr := v_1
1750 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1751 break
1752 }
1753 y := v_2.Args[1]
1754 if ptr != v_2.Args[0] {
1755 break
1756 }
1757 v.reset(OpAMD64ADDL)
1758 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1759 v0.AddArg(y)
1760 v.AddArg2(x, v0)
1761 return true
1762 }
1763 return false
1764 }
1765 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1766 v_2 := v.Args[2]
1767 v_1 := v.Args[1]
1768 v_0 := v.Args[0]
1769
1770
1771
1772 for {
1773 off1 := auxIntToInt32(v.AuxInt)
1774 sym := auxToSym(v.Aux)
1775 if v_0.Op != OpAMD64ADDQconst {
1776 break
1777 }
1778 off2 := auxIntToInt32(v_0.AuxInt)
1779 base := v_0.Args[0]
1780 val := v_1
1781 mem := v_2
1782 if !(is32Bit(int64(off1) + int64(off2))) {
1783 break
1784 }
1785 v.reset(OpAMD64ADDLmodify)
1786 v.AuxInt = int32ToAuxInt(off1 + off2)
1787 v.Aux = symToAux(sym)
1788 v.AddArg3(base, val, mem)
1789 return true
1790 }
1791
1792
1793
1794 for {
1795 off1 := auxIntToInt32(v.AuxInt)
1796 sym1 := auxToSym(v.Aux)
1797 if v_0.Op != OpAMD64LEAQ {
1798 break
1799 }
1800 off2 := auxIntToInt32(v_0.AuxInt)
1801 sym2 := auxToSym(v_0.Aux)
1802 base := v_0.Args[0]
1803 val := v_1
1804 mem := v_2
1805 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1806 break
1807 }
1808 v.reset(OpAMD64ADDLmodify)
1809 v.AuxInt = int32ToAuxInt(off1 + off2)
1810 v.Aux = symToAux(mergeSym(sym1, sym2))
1811 v.AddArg3(base, val, mem)
1812 return true
1813 }
1814 return false
1815 }
1816 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1817 v_1 := v.Args[1]
1818 v_0 := v.Args[0]
1819
1820
1821 for {
1822 if v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
1823 break
1824 }
1825 x := v_0.Args[0]
1826 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1827 break
1828 }
1829 v.reset(OpAMD64ANDQconst)
1830 v.AuxInt = int32ToAuxInt(-2)
1831 v.AddArg(x)
1832 return true
1833 }
1834
1835
1836
1837 for {
1838 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1839 x := v_0
1840 if v_1.Op != OpAMD64MOVQconst {
1841 continue
1842 }
1843 t := v_1.Type
1844 c := auxIntToInt64(v_1.AuxInt)
1845 if !(is32Bit(c) && !t.IsPtr()) {
1846 continue
1847 }
1848 v.reset(OpAMD64ADDQconst)
1849 v.AuxInt = int32ToAuxInt(int32(c))
1850 v.AddArg(x)
1851 return true
1852 }
1853 break
1854 }
1855
1856
1857 for {
1858 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1859 x := v_0
1860 if v_1.Op != OpAMD64MOVLconst {
1861 continue
1862 }
1863 c := auxIntToInt32(v_1.AuxInt)
1864 v.reset(OpAMD64ADDQconst)
1865 v.AuxInt = int32ToAuxInt(c)
1866 v.AddArg(x)
1867 return true
1868 }
1869 break
1870 }
1871
1872
1873 for {
1874 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1875 x := v_0
1876 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1877 continue
1878 }
1879 y := v_1.Args[0]
1880 v.reset(OpAMD64LEAQ8)
1881 v.AddArg2(x, y)
1882 return true
1883 }
1884 break
1885 }
1886
1887
1888 for {
1889 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1890 x := v_0
1891 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1892 continue
1893 }
1894 y := v_1.Args[0]
1895 v.reset(OpAMD64LEAQ4)
1896 v.AddArg2(x, y)
1897 return true
1898 }
1899 break
1900 }
1901
1902
1903 for {
1904 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1905 x := v_0
1906 if v_1.Op != OpAMD64ADDQ {
1907 continue
1908 }
1909 y := v_1.Args[1]
1910 if y != v_1.Args[0] {
1911 continue
1912 }
1913 v.reset(OpAMD64LEAQ2)
1914 v.AddArg2(x, y)
1915 return true
1916 }
1917 break
1918 }
1919
1920
1921 for {
1922 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1923 x := v_0
1924 if v_1.Op != OpAMD64ADDQ {
1925 continue
1926 }
1927 _ = v_1.Args[1]
1928 v_1_0 := v_1.Args[0]
1929 v_1_1 := v_1.Args[1]
1930 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1931 if x != v_1_0 {
1932 continue
1933 }
1934 y := v_1_1
1935 v.reset(OpAMD64LEAQ2)
1936 v.AddArg2(y, x)
1937 return true
1938 }
1939 }
1940 break
1941 }
1942
1943
1944 for {
1945 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1946 if v_0.Op != OpAMD64ADDQconst {
1947 continue
1948 }
1949 c := auxIntToInt32(v_0.AuxInt)
1950 x := v_0.Args[0]
1951 y := v_1
1952 v.reset(OpAMD64LEAQ1)
1953 v.AuxInt = int32ToAuxInt(c)
1954 v.AddArg2(x, y)
1955 return true
1956 }
1957 break
1958 }
1959
1960
1961
1962 for {
1963 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1964 x := v_0
1965 if v_1.Op != OpAMD64LEAQ {
1966 continue
1967 }
1968 c := auxIntToInt32(v_1.AuxInt)
1969 s := auxToSym(v_1.Aux)
1970 y := v_1.Args[0]
1971 if !(x.Op != OpSB && y.Op != OpSB) {
1972 continue
1973 }
1974 v.reset(OpAMD64LEAQ1)
1975 v.AuxInt = int32ToAuxInt(c)
1976 v.Aux = symToAux(s)
1977 v.AddArg2(x, y)
1978 return true
1979 }
1980 break
1981 }
1982
1983
1984 for {
1985 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1986 x := v_0
1987 if v_1.Op != OpAMD64NEGQ {
1988 continue
1989 }
1990 y := v_1.Args[0]
1991 v.reset(OpAMD64SUBQ)
1992 v.AddArg2(x, y)
1993 return true
1994 }
1995 break
1996 }
1997
1998
1999
2000 for {
2001 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2002 x := v_0
2003 l := v_1
2004 if l.Op != OpAMD64MOVQload {
2005 continue
2006 }
2007 off := auxIntToInt32(l.AuxInt)
2008 sym := auxToSym(l.Aux)
2009 mem := l.Args[1]
2010 ptr := l.Args[0]
2011 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2012 continue
2013 }
2014 v.reset(OpAMD64ADDQload)
2015 v.AuxInt = int32ToAuxInt(off)
2016 v.Aux = symToAux(sym)
2017 v.AddArg3(x, ptr, mem)
2018 return true
2019 }
2020 break
2021 }
2022 return false
2023 }
2024 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2025 v_1 := v.Args[1]
2026 v_0 := v.Args[0]
2027
2028
2029
2030 for {
2031 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2032 x := v_0
2033 if v_1.Op != OpAMD64MOVQconst {
2034 continue
2035 }
2036 c := auxIntToInt64(v_1.AuxInt)
2037 if !(is32Bit(c)) {
2038 continue
2039 }
2040 v.reset(OpAMD64ADDQconstcarry)
2041 v.AuxInt = int32ToAuxInt(int32(c))
2042 v.AddArg(x)
2043 return true
2044 }
2045 break
2046 }
2047 return false
2048 }
2049 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2050 v_0 := v.Args[0]
2051
2052
2053 for {
2054 c := auxIntToInt32(v.AuxInt)
2055 if v_0.Op != OpAMD64ADDQ {
2056 break
2057 }
2058 y := v_0.Args[1]
2059 x := v_0.Args[0]
2060 v.reset(OpAMD64LEAQ1)
2061 v.AuxInt = int32ToAuxInt(c)
2062 v.AddArg2(x, y)
2063 return true
2064 }
2065
2066
2067 for {
2068 c := auxIntToInt32(v.AuxInt)
2069 if v_0.Op != OpAMD64ADDQ {
2070 break
2071 }
2072 x := v_0.Args[1]
2073 if x != v_0.Args[0] {
2074 break
2075 }
2076 v.reset(OpAMD64LEAQ1)
2077 v.AuxInt = int32ToAuxInt(c)
2078 v.AddArg2(x, x)
2079 return true
2080 }
2081
2082
2083
2084 for {
2085 c := auxIntToInt32(v.AuxInt)
2086 if v_0.Op != OpAMD64LEAQ {
2087 break
2088 }
2089 d := auxIntToInt32(v_0.AuxInt)
2090 s := auxToSym(v_0.Aux)
2091 x := v_0.Args[0]
2092 if !(is32Bit(int64(c) + int64(d))) {
2093 break
2094 }
2095 v.reset(OpAMD64LEAQ)
2096 v.AuxInt = int32ToAuxInt(c + d)
2097 v.Aux = symToAux(s)
2098 v.AddArg(x)
2099 return true
2100 }
2101
2102
2103
2104 for {
2105 c := auxIntToInt32(v.AuxInt)
2106 if v_0.Op != OpAMD64LEAQ1 {
2107 break
2108 }
2109 d := auxIntToInt32(v_0.AuxInt)
2110 s := auxToSym(v_0.Aux)
2111 y := v_0.Args[1]
2112 x := v_0.Args[0]
2113 if !(is32Bit(int64(c) + int64(d))) {
2114 break
2115 }
2116 v.reset(OpAMD64LEAQ1)
2117 v.AuxInt = int32ToAuxInt(c + d)
2118 v.Aux = symToAux(s)
2119 v.AddArg2(x, y)
2120 return true
2121 }
2122
2123
2124
2125 for {
2126 c := auxIntToInt32(v.AuxInt)
2127 if v_0.Op != OpAMD64LEAQ2 {
2128 break
2129 }
2130 d := auxIntToInt32(v_0.AuxInt)
2131 s := auxToSym(v_0.Aux)
2132 y := v_0.Args[1]
2133 x := v_0.Args[0]
2134 if !(is32Bit(int64(c) + int64(d))) {
2135 break
2136 }
2137 v.reset(OpAMD64LEAQ2)
2138 v.AuxInt = int32ToAuxInt(c + d)
2139 v.Aux = symToAux(s)
2140 v.AddArg2(x, y)
2141 return true
2142 }
2143
2144
2145
2146 for {
2147 c := auxIntToInt32(v.AuxInt)
2148 if v_0.Op != OpAMD64LEAQ4 {
2149 break
2150 }
2151 d := auxIntToInt32(v_0.AuxInt)
2152 s := auxToSym(v_0.Aux)
2153 y := v_0.Args[1]
2154 x := v_0.Args[0]
2155 if !(is32Bit(int64(c) + int64(d))) {
2156 break
2157 }
2158 v.reset(OpAMD64LEAQ4)
2159 v.AuxInt = int32ToAuxInt(c + d)
2160 v.Aux = symToAux(s)
2161 v.AddArg2(x, y)
2162 return true
2163 }
2164
2165
2166
2167 for {
2168 c := auxIntToInt32(v.AuxInt)
2169 if v_0.Op != OpAMD64LEAQ8 {
2170 break
2171 }
2172 d := auxIntToInt32(v_0.AuxInt)
2173 s := auxToSym(v_0.Aux)
2174 y := v_0.Args[1]
2175 x := v_0.Args[0]
2176 if !(is32Bit(int64(c) + int64(d))) {
2177 break
2178 }
2179 v.reset(OpAMD64LEAQ8)
2180 v.AuxInt = int32ToAuxInt(c + d)
2181 v.Aux = symToAux(s)
2182 v.AddArg2(x, y)
2183 return true
2184 }
2185
2186
2187 for {
2188 if auxIntToInt32(v.AuxInt) != 0 {
2189 break
2190 }
2191 x := v_0
2192 v.copyOf(x)
2193 return true
2194 }
2195
2196
2197 for {
2198 c := auxIntToInt32(v.AuxInt)
2199 if v_0.Op != OpAMD64MOVQconst {
2200 break
2201 }
2202 d := auxIntToInt64(v_0.AuxInt)
2203 v.reset(OpAMD64MOVQconst)
2204 v.AuxInt = int64ToAuxInt(int64(c) + d)
2205 return true
2206 }
2207
2208
2209
2210 for {
2211 c := auxIntToInt32(v.AuxInt)
2212 if v_0.Op != OpAMD64ADDQconst {
2213 break
2214 }
2215 d := auxIntToInt32(v_0.AuxInt)
2216 x := v_0.Args[0]
2217 if !(is32Bit(int64(c) + int64(d))) {
2218 break
2219 }
2220 v.reset(OpAMD64ADDQconst)
2221 v.AuxInt = int32ToAuxInt(c + d)
2222 v.AddArg(x)
2223 return true
2224 }
2225
2226
2227 for {
2228 off := auxIntToInt32(v.AuxInt)
2229 x := v_0
2230 if x.Op != OpSP {
2231 break
2232 }
2233 v.reset(OpAMD64LEAQ)
2234 v.AuxInt = int32ToAuxInt(off)
2235 v.AddArg(x)
2236 return true
2237 }
2238 return false
2239 }
2240 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2241 v_1 := v.Args[1]
2242 v_0 := v.Args[0]
2243
2244
2245
2246 for {
2247 valoff1 := auxIntToValAndOff(v.AuxInt)
2248 sym := auxToSym(v.Aux)
2249 if v_0.Op != OpAMD64ADDQconst {
2250 break
2251 }
2252 off2 := auxIntToInt32(v_0.AuxInt)
2253 base := v_0.Args[0]
2254 mem := v_1
2255 if !(ValAndOff(valoff1).canAdd32(off2)) {
2256 break
2257 }
2258 v.reset(OpAMD64ADDQconstmodify)
2259 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2260 v.Aux = symToAux(sym)
2261 v.AddArg2(base, mem)
2262 return true
2263 }
2264
2265
2266
2267 for {
2268 valoff1 := auxIntToValAndOff(v.AuxInt)
2269 sym1 := auxToSym(v.Aux)
2270 if v_0.Op != OpAMD64LEAQ {
2271 break
2272 }
2273 off2 := auxIntToInt32(v_0.AuxInt)
2274 sym2 := auxToSym(v_0.Aux)
2275 base := v_0.Args[0]
2276 mem := v_1
2277 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2278 break
2279 }
2280 v.reset(OpAMD64ADDQconstmodify)
2281 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2282 v.Aux = symToAux(mergeSym(sym1, sym2))
2283 v.AddArg2(base, mem)
2284 return true
2285 }
2286 return false
2287 }
2288 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2289 v_2 := v.Args[2]
2290 v_1 := v.Args[1]
2291 v_0 := v.Args[0]
2292 b := v.Block
2293 typ := &b.Func.Config.Types
2294
2295
2296
2297 for {
2298 off1 := auxIntToInt32(v.AuxInt)
2299 sym := auxToSym(v.Aux)
2300 val := v_0
2301 if v_1.Op != OpAMD64ADDQconst {
2302 break
2303 }
2304 off2 := auxIntToInt32(v_1.AuxInt)
2305 base := v_1.Args[0]
2306 mem := v_2
2307 if !(is32Bit(int64(off1) + int64(off2))) {
2308 break
2309 }
2310 v.reset(OpAMD64ADDQload)
2311 v.AuxInt = int32ToAuxInt(off1 + off2)
2312 v.Aux = symToAux(sym)
2313 v.AddArg3(val, base, mem)
2314 return true
2315 }
2316
2317
2318
2319 for {
2320 off1 := auxIntToInt32(v.AuxInt)
2321 sym1 := auxToSym(v.Aux)
2322 val := v_0
2323 if v_1.Op != OpAMD64LEAQ {
2324 break
2325 }
2326 off2 := auxIntToInt32(v_1.AuxInt)
2327 sym2 := auxToSym(v_1.Aux)
2328 base := v_1.Args[0]
2329 mem := v_2
2330 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2331 break
2332 }
2333 v.reset(OpAMD64ADDQload)
2334 v.AuxInt = int32ToAuxInt(off1 + off2)
2335 v.Aux = symToAux(mergeSym(sym1, sym2))
2336 v.AddArg3(val, base, mem)
2337 return true
2338 }
2339
2340
2341 for {
2342 off := auxIntToInt32(v.AuxInt)
2343 sym := auxToSym(v.Aux)
2344 x := v_0
2345 ptr := v_1
2346 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2347 break
2348 }
2349 y := v_2.Args[1]
2350 if ptr != v_2.Args[0] {
2351 break
2352 }
2353 v.reset(OpAMD64ADDQ)
2354 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2355 v0.AddArg(y)
2356 v.AddArg2(x, v0)
2357 return true
2358 }
2359 return false
2360 }
2361 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2362 v_2 := v.Args[2]
2363 v_1 := v.Args[1]
2364 v_0 := v.Args[0]
2365
2366
2367
2368 for {
2369 off1 := auxIntToInt32(v.AuxInt)
2370 sym := auxToSym(v.Aux)
2371 if v_0.Op != OpAMD64ADDQconst {
2372 break
2373 }
2374 off2 := auxIntToInt32(v_0.AuxInt)
2375 base := v_0.Args[0]
2376 val := v_1
2377 mem := v_2
2378 if !(is32Bit(int64(off1) + int64(off2))) {
2379 break
2380 }
2381 v.reset(OpAMD64ADDQmodify)
2382 v.AuxInt = int32ToAuxInt(off1 + off2)
2383 v.Aux = symToAux(sym)
2384 v.AddArg3(base, val, mem)
2385 return true
2386 }
2387
2388
2389
2390 for {
2391 off1 := auxIntToInt32(v.AuxInt)
2392 sym1 := auxToSym(v.Aux)
2393 if v_0.Op != OpAMD64LEAQ {
2394 break
2395 }
2396 off2 := auxIntToInt32(v_0.AuxInt)
2397 sym2 := auxToSym(v_0.Aux)
2398 base := v_0.Args[0]
2399 val := v_1
2400 mem := v_2
2401 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2402 break
2403 }
2404 v.reset(OpAMD64ADDQmodify)
2405 v.AuxInt = int32ToAuxInt(off1 + off2)
2406 v.Aux = symToAux(mergeSym(sym1, sym2))
2407 v.AddArg3(base, val, mem)
2408 return true
2409 }
2410 return false
2411 }
2412 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2413 v_1 := v.Args[1]
2414 v_0 := v.Args[0]
2415
2416
2417
2418 for {
2419 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2420 x := v_0
2421 l := v_1
2422 if l.Op != OpAMD64MOVSDload {
2423 continue
2424 }
2425 off := auxIntToInt32(l.AuxInt)
2426 sym := auxToSym(l.Aux)
2427 mem := l.Args[1]
2428 ptr := l.Args[0]
2429 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2430 continue
2431 }
2432 v.reset(OpAMD64ADDSDload)
2433 v.AuxInt = int32ToAuxInt(off)
2434 v.Aux = symToAux(sym)
2435 v.AddArg3(x, ptr, mem)
2436 return true
2437 }
2438 break
2439 }
2440
2441
2442
2443 for {
2444 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2445 if v_0.Op != OpAMD64MULSD {
2446 continue
2447 }
2448 y := v_0.Args[1]
2449 x := v_0.Args[0]
2450 z := v_1
2451 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2452 continue
2453 }
2454 v.reset(OpAMD64VFMADD231SD)
2455 v.AddArg3(z, x, y)
2456 return true
2457 }
2458 break
2459 }
2460 return false
2461 }
2462 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2463 v_2 := v.Args[2]
2464 v_1 := v.Args[1]
2465 v_0 := v.Args[0]
2466 b := v.Block
2467 typ := &b.Func.Config.Types
2468
2469
2470
2471 for {
2472 off1 := auxIntToInt32(v.AuxInt)
2473 sym := auxToSym(v.Aux)
2474 val := v_0
2475 if v_1.Op != OpAMD64ADDQconst {
2476 break
2477 }
2478 off2 := auxIntToInt32(v_1.AuxInt)
2479 base := v_1.Args[0]
2480 mem := v_2
2481 if !(is32Bit(int64(off1) + int64(off2))) {
2482 break
2483 }
2484 v.reset(OpAMD64ADDSDload)
2485 v.AuxInt = int32ToAuxInt(off1 + off2)
2486 v.Aux = symToAux(sym)
2487 v.AddArg3(val, base, mem)
2488 return true
2489 }
2490
2491
2492
2493 for {
2494 off1 := auxIntToInt32(v.AuxInt)
2495 sym1 := auxToSym(v.Aux)
2496 val := v_0
2497 if v_1.Op != OpAMD64LEAQ {
2498 break
2499 }
2500 off2 := auxIntToInt32(v_1.AuxInt)
2501 sym2 := auxToSym(v_1.Aux)
2502 base := v_1.Args[0]
2503 mem := v_2
2504 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2505 break
2506 }
2507 v.reset(OpAMD64ADDSDload)
2508 v.AuxInt = int32ToAuxInt(off1 + off2)
2509 v.Aux = symToAux(mergeSym(sym1, sym2))
2510 v.AddArg3(val, base, mem)
2511 return true
2512 }
2513
2514
2515 for {
2516 off := auxIntToInt32(v.AuxInt)
2517 sym := auxToSym(v.Aux)
2518 x := v_0
2519 ptr := v_1
2520 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2521 break
2522 }
2523 y := v_2.Args[1]
2524 if ptr != v_2.Args[0] {
2525 break
2526 }
2527 v.reset(OpAMD64ADDSD)
2528 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2529 v0.AddArg(y)
2530 v.AddArg2(x, v0)
2531 return true
2532 }
2533 return false
2534 }
2535 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2536 v_1 := v.Args[1]
2537 v_0 := v.Args[0]
2538
2539
2540
2541 for {
2542 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2543 x := v_0
2544 l := v_1
2545 if l.Op != OpAMD64MOVSSload {
2546 continue
2547 }
2548 off := auxIntToInt32(l.AuxInt)
2549 sym := auxToSym(l.Aux)
2550 mem := l.Args[1]
2551 ptr := l.Args[0]
2552 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2553 continue
2554 }
2555 v.reset(OpAMD64ADDSSload)
2556 v.AuxInt = int32ToAuxInt(off)
2557 v.Aux = symToAux(sym)
2558 v.AddArg3(x, ptr, mem)
2559 return true
2560 }
2561 break
2562 }
2563
2564
2565
2566 for {
2567 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2568 if v_0.Op != OpAMD64MULSS {
2569 continue
2570 }
2571 y := v_0.Args[1]
2572 x := v_0.Args[0]
2573 z := v_1
2574 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2575 continue
2576 }
2577 v.reset(OpAMD64VFMADD231SS)
2578 v.AddArg3(z, x, y)
2579 return true
2580 }
2581 break
2582 }
2583 return false
2584 }
2585 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2586 v_2 := v.Args[2]
2587 v_1 := v.Args[1]
2588 v_0 := v.Args[0]
2589 b := v.Block
2590 typ := &b.Func.Config.Types
2591
2592
2593
2594 for {
2595 off1 := auxIntToInt32(v.AuxInt)
2596 sym := auxToSym(v.Aux)
2597 val := v_0
2598 if v_1.Op != OpAMD64ADDQconst {
2599 break
2600 }
2601 off2 := auxIntToInt32(v_1.AuxInt)
2602 base := v_1.Args[0]
2603 mem := v_2
2604 if !(is32Bit(int64(off1) + int64(off2))) {
2605 break
2606 }
2607 v.reset(OpAMD64ADDSSload)
2608 v.AuxInt = int32ToAuxInt(off1 + off2)
2609 v.Aux = symToAux(sym)
2610 v.AddArg3(val, base, mem)
2611 return true
2612 }
2613
2614
2615
2616 for {
2617 off1 := auxIntToInt32(v.AuxInt)
2618 sym1 := auxToSym(v.Aux)
2619 val := v_0
2620 if v_1.Op != OpAMD64LEAQ {
2621 break
2622 }
2623 off2 := auxIntToInt32(v_1.AuxInt)
2624 sym2 := auxToSym(v_1.Aux)
2625 base := v_1.Args[0]
2626 mem := v_2
2627 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2628 break
2629 }
2630 v.reset(OpAMD64ADDSSload)
2631 v.AuxInt = int32ToAuxInt(off1 + off2)
2632 v.Aux = symToAux(mergeSym(sym1, sym2))
2633 v.AddArg3(val, base, mem)
2634 return true
2635 }
2636
2637
2638 for {
2639 off := auxIntToInt32(v.AuxInt)
2640 sym := auxToSym(v.Aux)
2641 x := v_0
2642 ptr := v_1
2643 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2644 break
2645 }
2646 y := v_2.Args[1]
2647 if ptr != v_2.Args[0] {
2648 break
2649 }
2650 v.reset(OpAMD64ADDSS)
2651 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2652 v0.AddArg(y)
2653 v.AddArg2(x, v0)
2654 return true
2655 }
2656 return false
2657 }
2658 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2659 v_1 := v.Args[1]
2660 v_0 := v.Args[0]
2661 b := v.Block
2662 typ := &b.Func.Config.Types
2663
2664
2665 for {
2666 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2667 if v_0.Op != OpAMD64NOTL {
2668 continue
2669 }
2670 v_0_0 := v_0.Args[0]
2671 if v_0_0.Op != OpAMD64SHLL {
2672 continue
2673 }
2674 y := v_0_0.Args[1]
2675 v_0_0_0 := v_0_0.Args[0]
2676 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2677 continue
2678 }
2679 x := v_1
2680 v.reset(OpAMD64BTRL)
2681 v.AddArg2(x, y)
2682 return true
2683 }
2684 break
2685 }
2686
2687
2688 for {
2689 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2690 x := v_0
2691 if v_1.Op != OpAMD64MOVLconst {
2692 continue
2693 }
2694 c := auxIntToInt32(v_1.AuxInt)
2695 v.reset(OpAMD64ANDLconst)
2696 v.AuxInt = int32ToAuxInt(c)
2697 v.AddArg(x)
2698 return true
2699 }
2700 break
2701 }
2702
2703
2704 for {
2705 x := v_0
2706 if x != v_1 {
2707 break
2708 }
2709 v.copyOf(x)
2710 return true
2711 }
2712
2713
2714
2715 for {
2716 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2717 x := v_0
2718 l := v_1
2719 if l.Op != OpAMD64MOVLload {
2720 continue
2721 }
2722 off := auxIntToInt32(l.AuxInt)
2723 sym := auxToSym(l.Aux)
2724 mem := l.Args[1]
2725 ptr := l.Args[0]
2726 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2727 continue
2728 }
2729 v.reset(OpAMD64ANDLload)
2730 v.AuxInt = int32ToAuxInt(off)
2731 v.Aux = symToAux(sym)
2732 v.AddArg3(x, ptr, mem)
2733 return true
2734 }
2735 break
2736 }
2737
2738
2739
2740 for {
2741 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2742 x := v_0
2743 if v_1.Op != OpAMD64NOTL {
2744 continue
2745 }
2746 y := v_1.Args[0]
2747 if !(buildcfg.GOAMD64 >= 3) {
2748 continue
2749 }
2750 v.reset(OpAMD64ANDNL)
2751 v.AddArg2(x, y)
2752 return true
2753 }
2754 break
2755 }
2756
2757
2758
2759 for {
2760 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2761 x := v_0
2762 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2763 continue
2764 }
2765 v.reset(OpAMD64BLSIL)
2766 v.AddArg(x)
2767 return true
2768 }
2769 break
2770 }
2771
2772
2773
2774 for {
2775 t := v.Type
2776 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2777 x := v_0
2778 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2779 continue
2780 }
2781 v.reset(OpSelect0)
2782 v.Type = t
2783 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2784 v0.AddArg(x)
2785 v.AddArg(v0)
2786 return true
2787 }
2788 break
2789 }
2790 return false
2791 }
2792 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2793 v_0 := v.Args[0]
2794
2795
2796 for {
2797 c := auxIntToInt32(v.AuxInt)
2798 if v_0.Op != OpAMD64ANDLconst {
2799 break
2800 }
2801 d := auxIntToInt32(v_0.AuxInt)
2802 x := v_0.Args[0]
2803 v.reset(OpAMD64ANDLconst)
2804 v.AuxInt = int32ToAuxInt(c & d)
2805 v.AddArg(x)
2806 return true
2807 }
2808
2809
2810 for {
2811 if auxIntToInt32(v.AuxInt) != 0xFF {
2812 break
2813 }
2814 x := v_0
2815 v.reset(OpAMD64MOVBQZX)
2816 v.AddArg(x)
2817 return true
2818 }
2819
2820
2821 for {
2822 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2823 break
2824 }
2825 x := v_0
2826 v.reset(OpAMD64MOVWQZX)
2827 v.AddArg(x)
2828 return true
2829 }
2830
2831
2832
2833 for {
2834 c := auxIntToInt32(v.AuxInt)
2835 if !(c == 0) {
2836 break
2837 }
2838 v.reset(OpAMD64MOVLconst)
2839 v.AuxInt = int32ToAuxInt(0)
2840 return true
2841 }
2842
2843
2844
2845 for {
2846 c := auxIntToInt32(v.AuxInt)
2847 x := v_0
2848 if !(c == -1) {
2849 break
2850 }
2851 v.copyOf(x)
2852 return true
2853 }
2854
2855
2856 for {
2857 c := auxIntToInt32(v.AuxInt)
2858 if v_0.Op != OpAMD64MOVLconst {
2859 break
2860 }
2861 d := auxIntToInt32(v_0.AuxInt)
2862 v.reset(OpAMD64MOVLconst)
2863 v.AuxInt = int32ToAuxInt(c & d)
2864 return true
2865 }
2866 return false
2867 }
2868 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2869 v_1 := v.Args[1]
2870 v_0 := v.Args[0]
2871
2872
2873
2874 for {
2875 valoff1 := auxIntToValAndOff(v.AuxInt)
2876 sym := auxToSym(v.Aux)
2877 if v_0.Op != OpAMD64ADDQconst {
2878 break
2879 }
2880 off2 := auxIntToInt32(v_0.AuxInt)
2881 base := v_0.Args[0]
2882 mem := v_1
2883 if !(ValAndOff(valoff1).canAdd32(off2)) {
2884 break
2885 }
2886 v.reset(OpAMD64ANDLconstmodify)
2887 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2888 v.Aux = symToAux(sym)
2889 v.AddArg2(base, mem)
2890 return true
2891 }
2892
2893
2894
2895 for {
2896 valoff1 := auxIntToValAndOff(v.AuxInt)
2897 sym1 := auxToSym(v.Aux)
2898 if v_0.Op != OpAMD64LEAQ {
2899 break
2900 }
2901 off2 := auxIntToInt32(v_0.AuxInt)
2902 sym2 := auxToSym(v_0.Aux)
2903 base := v_0.Args[0]
2904 mem := v_1
2905 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2906 break
2907 }
2908 v.reset(OpAMD64ANDLconstmodify)
2909 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2910 v.Aux = symToAux(mergeSym(sym1, sym2))
2911 v.AddArg2(base, mem)
2912 return true
2913 }
2914 return false
2915 }
2916 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2917 v_2 := v.Args[2]
2918 v_1 := v.Args[1]
2919 v_0 := v.Args[0]
2920 b := v.Block
2921 typ := &b.Func.Config.Types
2922
2923
2924
2925 for {
2926 off1 := auxIntToInt32(v.AuxInt)
2927 sym := auxToSym(v.Aux)
2928 val := v_0
2929 if v_1.Op != OpAMD64ADDQconst {
2930 break
2931 }
2932 off2 := auxIntToInt32(v_1.AuxInt)
2933 base := v_1.Args[0]
2934 mem := v_2
2935 if !(is32Bit(int64(off1) + int64(off2))) {
2936 break
2937 }
2938 v.reset(OpAMD64ANDLload)
2939 v.AuxInt = int32ToAuxInt(off1 + off2)
2940 v.Aux = symToAux(sym)
2941 v.AddArg3(val, base, mem)
2942 return true
2943 }
2944
2945
2946
2947 for {
2948 off1 := auxIntToInt32(v.AuxInt)
2949 sym1 := auxToSym(v.Aux)
2950 val := v_0
2951 if v_1.Op != OpAMD64LEAQ {
2952 break
2953 }
2954 off2 := auxIntToInt32(v_1.AuxInt)
2955 sym2 := auxToSym(v_1.Aux)
2956 base := v_1.Args[0]
2957 mem := v_2
2958 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2959 break
2960 }
2961 v.reset(OpAMD64ANDLload)
2962 v.AuxInt = int32ToAuxInt(off1 + off2)
2963 v.Aux = symToAux(mergeSym(sym1, sym2))
2964 v.AddArg3(val, base, mem)
2965 return true
2966 }
2967
2968
2969 for {
2970 off := auxIntToInt32(v.AuxInt)
2971 sym := auxToSym(v.Aux)
2972 x := v_0
2973 ptr := v_1
2974 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2975 break
2976 }
2977 y := v_2.Args[1]
2978 if ptr != v_2.Args[0] {
2979 break
2980 }
2981 v.reset(OpAMD64ANDL)
2982 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2983 v0.AddArg(y)
2984 v.AddArg2(x, v0)
2985 return true
2986 }
2987 return false
2988 }
2989 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2990 v_2 := v.Args[2]
2991 v_1 := v.Args[1]
2992 v_0 := v.Args[0]
2993
2994
2995
2996 for {
2997 off1 := auxIntToInt32(v.AuxInt)
2998 sym := auxToSym(v.Aux)
2999 if v_0.Op != OpAMD64ADDQconst {
3000 break
3001 }
3002 off2 := auxIntToInt32(v_0.AuxInt)
3003 base := v_0.Args[0]
3004 val := v_1
3005 mem := v_2
3006 if !(is32Bit(int64(off1) + int64(off2))) {
3007 break
3008 }
3009 v.reset(OpAMD64ANDLmodify)
3010 v.AuxInt = int32ToAuxInt(off1 + off2)
3011 v.Aux = symToAux(sym)
3012 v.AddArg3(base, val, mem)
3013 return true
3014 }
3015
3016
3017
3018 for {
3019 off1 := auxIntToInt32(v.AuxInt)
3020 sym1 := auxToSym(v.Aux)
3021 if v_0.Op != OpAMD64LEAQ {
3022 break
3023 }
3024 off2 := auxIntToInt32(v_0.AuxInt)
3025 sym2 := auxToSym(v_0.Aux)
3026 base := v_0.Args[0]
3027 val := v_1
3028 mem := v_2
3029 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3030 break
3031 }
3032 v.reset(OpAMD64ANDLmodify)
3033 v.AuxInt = int32ToAuxInt(off1 + off2)
3034 v.Aux = symToAux(mergeSym(sym1, sym2))
3035 v.AddArg3(base, val, mem)
3036 return true
3037 }
3038 return false
3039 }
3040 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3041 v_1 := v.Args[1]
3042 v_0 := v.Args[0]
3043
3044
3045 for {
3046 x := v_0
3047 if v_1.Op != OpAMD64SHLL {
3048 break
3049 }
3050 y := v_1.Args[1]
3051 v_1_0 := v_1.Args[0]
3052 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3053 break
3054 }
3055 v.reset(OpAMD64BTRL)
3056 v.AddArg2(x, y)
3057 return true
3058 }
3059 return false
3060 }
3061 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3062 v_1 := v.Args[1]
3063 v_0 := v.Args[0]
3064
3065
3066 for {
3067 x := v_0
3068 if v_1.Op != OpAMD64SHLQ {
3069 break
3070 }
3071 y := v_1.Args[1]
3072 v_1_0 := v_1.Args[0]
3073 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3074 break
3075 }
3076 v.reset(OpAMD64BTRQ)
3077 v.AddArg2(x, y)
3078 return true
3079 }
3080 return false
3081 }
3082 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3083 v_1 := v.Args[1]
3084 v_0 := v.Args[0]
3085 b := v.Block
3086 typ := &b.Func.Config.Types
3087
3088
3089 for {
3090 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3091 if v_0.Op != OpAMD64NOTQ {
3092 continue
3093 }
3094 v_0_0 := v_0.Args[0]
3095 if v_0_0.Op != OpAMD64SHLQ {
3096 continue
3097 }
3098 y := v_0_0.Args[1]
3099 v_0_0_0 := v_0_0.Args[0]
3100 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3101 continue
3102 }
3103 x := v_1
3104 v.reset(OpAMD64BTRQ)
3105 v.AddArg2(x, y)
3106 return true
3107 }
3108 break
3109 }
3110
3111
3112
3113 for {
3114 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3115 if v_0.Op != OpAMD64MOVQconst {
3116 continue
3117 }
3118 c := auxIntToInt64(v_0.AuxInt)
3119 x := v_1
3120 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3121 continue
3122 }
3123 v.reset(OpAMD64BTRQconst)
3124 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3125 v.AddArg(x)
3126 return true
3127 }
3128 break
3129 }
3130
3131
3132
3133 for {
3134 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3135 x := v_0
3136 if v_1.Op != OpAMD64MOVQconst {
3137 continue
3138 }
3139 c := auxIntToInt64(v_1.AuxInt)
3140 if !(is32Bit(c)) {
3141 continue
3142 }
3143 v.reset(OpAMD64ANDQconst)
3144 v.AuxInt = int32ToAuxInt(int32(c))
3145 v.AddArg(x)
3146 return true
3147 }
3148 break
3149 }
3150
3151
3152 for {
3153 x := v_0
3154 if x != v_1 {
3155 break
3156 }
3157 v.copyOf(x)
3158 return true
3159 }
3160
3161
3162
3163 for {
3164 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3165 x := v_0
3166 l := v_1
3167 if l.Op != OpAMD64MOVQload {
3168 continue
3169 }
3170 off := auxIntToInt32(l.AuxInt)
3171 sym := auxToSym(l.Aux)
3172 mem := l.Args[1]
3173 ptr := l.Args[0]
3174 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3175 continue
3176 }
3177 v.reset(OpAMD64ANDQload)
3178 v.AuxInt = int32ToAuxInt(off)
3179 v.Aux = symToAux(sym)
3180 v.AddArg3(x, ptr, mem)
3181 return true
3182 }
3183 break
3184 }
3185
3186
3187
3188 for {
3189 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3190 x := v_0
3191 if v_1.Op != OpAMD64NOTQ {
3192 continue
3193 }
3194 y := v_1.Args[0]
3195 if !(buildcfg.GOAMD64 >= 3) {
3196 continue
3197 }
3198 v.reset(OpAMD64ANDNQ)
3199 v.AddArg2(x, y)
3200 return true
3201 }
3202 break
3203 }
3204
3205
3206
3207 for {
3208 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3209 x := v_0
3210 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3211 continue
3212 }
3213 v.reset(OpAMD64BLSIQ)
3214 v.AddArg(x)
3215 return true
3216 }
3217 break
3218 }
3219
3220
3221
3222 for {
3223 t := v.Type
3224 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3225 x := v_0
3226 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3227 continue
3228 }
3229 v.reset(OpSelect0)
3230 v.Type = t
3231 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3232 v0.AddArg(x)
3233 v.AddArg(v0)
3234 return true
3235 }
3236 break
3237 }
3238 return false
3239 }
3240 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3241 v_0 := v.Args[0]
3242
3243
3244 for {
3245 c := auxIntToInt32(v.AuxInt)
3246 if v_0.Op != OpAMD64ANDQconst {
3247 break
3248 }
3249 d := auxIntToInt32(v_0.AuxInt)
3250 x := v_0.Args[0]
3251 v.reset(OpAMD64ANDQconst)
3252 v.AuxInt = int32ToAuxInt(c & d)
3253 v.AddArg(x)
3254 return true
3255 }
3256
3257
3258 for {
3259 if auxIntToInt32(v.AuxInt) != 0xFF {
3260 break
3261 }
3262 x := v_0
3263 v.reset(OpAMD64MOVBQZX)
3264 v.AddArg(x)
3265 return true
3266 }
3267
3268
3269 for {
3270 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3271 break
3272 }
3273 x := v_0
3274 v.reset(OpAMD64MOVWQZX)
3275 v.AddArg(x)
3276 return true
3277 }
3278
3279
3280 for {
3281 if auxIntToInt32(v.AuxInt) != 0 {
3282 break
3283 }
3284 v.reset(OpAMD64MOVQconst)
3285 v.AuxInt = int64ToAuxInt(0)
3286 return true
3287 }
3288
3289
3290 for {
3291 if auxIntToInt32(v.AuxInt) != -1 {
3292 break
3293 }
3294 x := v_0
3295 v.copyOf(x)
3296 return true
3297 }
3298
3299
3300 for {
3301 c := auxIntToInt32(v.AuxInt)
3302 if v_0.Op != OpAMD64MOVQconst {
3303 break
3304 }
3305 d := auxIntToInt64(v_0.AuxInt)
3306 v.reset(OpAMD64MOVQconst)
3307 v.AuxInt = int64ToAuxInt(int64(c) & d)
3308 return true
3309 }
3310 return false
3311 }
3312 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3313 v_1 := v.Args[1]
3314 v_0 := v.Args[0]
3315
3316
3317
3318 for {
3319 valoff1 := auxIntToValAndOff(v.AuxInt)
3320 sym := auxToSym(v.Aux)
3321 if v_0.Op != OpAMD64ADDQconst {
3322 break
3323 }
3324 off2 := auxIntToInt32(v_0.AuxInt)
3325 base := v_0.Args[0]
3326 mem := v_1
3327 if !(ValAndOff(valoff1).canAdd32(off2)) {
3328 break
3329 }
3330 v.reset(OpAMD64ANDQconstmodify)
3331 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3332 v.Aux = symToAux(sym)
3333 v.AddArg2(base, mem)
3334 return true
3335 }
3336
3337
3338
3339 for {
3340 valoff1 := auxIntToValAndOff(v.AuxInt)
3341 sym1 := auxToSym(v.Aux)
3342 if v_0.Op != OpAMD64LEAQ {
3343 break
3344 }
3345 off2 := auxIntToInt32(v_0.AuxInt)
3346 sym2 := auxToSym(v_0.Aux)
3347 base := v_0.Args[0]
3348 mem := v_1
3349 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3350 break
3351 }
3352 v.reset(OpAMD64ANDQconstmodify)
3353 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3354 v.Aux = symToAux(mergeSym(sym1, sym2))
3355 v.AddArg2(base, mem)
3356 return true
3357 }
3358 return false
3359 }
3360 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3361 v_2 := v.Args[2]
3362 v_1 := v.Args[1]
3363 v_0 := v.Args[0]
3364 b := v.Block
3365 typ := &b.Func.Config.Types
3366
3367
3368
3369 for {
3370 off1 := auxIntToInt32(v.AuxInt)
3371 sym := auxToSym(v.Aux)
3372 val := v_0
3373 if v_1.Op != OpAMD64ADDQconst {
3374 break
3375 }
3376 off2 := auxIntToInt32(v_1.AuxInt)
3377 base := v_1.Args[0]
3378 mem := v_2
3379 if !(is32Bit(int64(off1) + int64(off2))) {
3380 break
3381 }
3382 v.reset(OpAMD64ANDQload)
3383 v.AuxInt = int32ToAuxInt(off1 + off2)
3384 v.Aux = symToAux(sym)
3385 v.AddArg3(val, base, mem)
3386 return true
3387 }
3388
3389
3390
3391 for {
3392 off1 := auxIntToInt32(v.AuxInt)
3393 sym1 := auxToSym(v.Aux)
3394 val := v_0
3395 if v_1.Op != OpAMD64LEAQ {
3396 break
3397 }
3398 off2 := auxIntToInt32(v_1.AuxInt)
3399 sym2 := auxToSym(v_1.Aux)
3400 base := v_1.Args[0]
3401 mem := v_2
3402 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3403 break
3404 }
3405 v.reset(OpAMD64ANDQload)
3406 v.AuxInt = int32ToAuxInt(off1 + off2)
3407 v.Aux = symToAux(mergeSym(sym1, sym2))
3408 v.AddArg3(val, base, mem)
3409 return true
3410 }
3411
3412
3413 for {
3414 off := auxIntToInt32(v.AuxInt)
3415 sym := auxToSym(v.Aux)
3416 x := v_0
3417 ptr := v_1
3418 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3419 break
3420 }
3421 y := v_2.Args[1]
3422 if ptr != v_2.Args[0] {
3423 break
3424 }
3425 v.reset(OpAMD64ANDQ)
3426 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3427 v0.AddArg(y)
3428 v.AddArg2(x, v0)
3429 return true
3430 }
3431 return false
3432 }
3433 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3434 v_2 := v.Args[2]
3435 v_1 := v.Args[1]
3436 v_0 := v.Args[0]
3437
3438
3439
3440 for {
3441 off1 := auxIntToInt32(v.AuxInt)
3442 sym := auxToSym(v.Aux)
3443 if v_0.Op != OpAMD64ADDQconst {
3444 break
3445 }
3446 off2 := auxIntToInt32(v_0.AuxInt)
3447 base := v_0.Args[0]
3448 val := v_1
3449 mem := v_2
3450 if !(is32Bit(int64(off1) + int64(off2))) {
3451 break
3452 }
3453 v.reset(OpAMD64ANDQmodify)
3454 v.AuxInt = int32ToAuxInt(off1 + off2)
3455 v.Aux = symToAux(sym)
3456 v.AddArg3(base, val, mem)
3457 return true
3458 }
3459
3460
3461
3462 for {
3463 off1 := auxIntToInt32(v.AuxInt)
3464 sym1 := auxToSym(v.Aux)
3465 if v_0.Op != OpAMD64LEAQ {
3466 break
3467 }
3468 off2 := auxIntToInt32(v_0.AuxInt)
3469 sym2 := auxToSym(v_0.Aux)
3470 base := v_0.Args[0]
3471 val := v_1
3472 mem := v_2
3473 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3474 break
3475 }
3476 v.reset(OpAMD64ANDQmodify)
3477 v.AuxInt = int32ToAuxInt(off1 + off2)
3478 v.Aux = symToAux(mergeSym(sym1, sym2))
3479 v.AddArg3(base, val, mem)
3480 return true
3481 }
3482 return false
3483 }
3484 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3485 v_0 := v.Args[0]
3486 b := v.Block
3487
3488
3489 for {
3490 if v_0.Op != OpAMD64ORQconst {
3491 break
3492 }
3493 t := v_0.Type
3494 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3495 break
3496 }
3497 v_0_0 := v_0.Args[0]
3498 if v_0_0.Op != OpAMD64MOVBQZX {
3499 break
3500 }
3501 x := v_0_0.Args[0]
3502 v.reset(OpAMD64BSFQ)
3503 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3504 v0.AuxInt = int32ToAuxInt(1 << 8)
3505 v0.AddArg(x)
3506 v.AddArg(v0)
3507 return true
3508 }
3509
3510
3511 for {
3512 if v_0.Op != OpAMD64ORQconst {
3513 break
3514 }
3515 t := v_0.Type
3516 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3517 break
3518 }
3519 v_0_0 := v_0.Args[0]
3520 if v_0_0.Op != OpAMD64MOVWQZX {
3521 break
3522 }
3523 x := v_0_0.Args[0]
3524 v.reset(OpAMD64BSFQ)
3525 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3526 v0.AuxInt = int32ToAuxInt(1 << 16)
3527 v0.AddArg(x)
3528 v.AddArg(v0)
3529 return true
3530 }
3531 return false
3532 }
3533 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3534 v_0 := v.Args[0]
3535 b := v.Block
3536 typ := &b.Func.Config.Types
3537
3538
3539 for {
3540 if v_0.Op != OpAMD64BSWAPL {
3541 break
3542 }
3543 p := v_0.Args[0]
3544 v.copyOf(p)
3545 return true
3546 }
3547
3548
3549
3550 for {
3551 x := v_0
3552 if x.Op != OpAMD64MOVLload {
3553 break
3554 }
3555 i := auxIntToInt32(x.AuxInt)
3556 s := auxToSym(x.Aux)
3557 mem := x.Args[1]
3558 p := x.Args[0]
3559 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3560 break
3561 }
3562 b = x.Block
3563 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3564 v.copyOf(v0)
3565 v0.AuxInt = int32ToAuxInt(i)
3566 v0.Aux = symToAux(s)
3567 v0.AddArg2(p, mem)
3568 return true
3569 }
3570
3571
3572
3573 for {
3574 x := v_0
3575 if x.Op != OpAMD64MOVBELload {
3576 break
3577 }
3578 i := auxIntToInt32(x.AuxInt)
3579 s := auxToSym(x.Aux)
3580 mem := x.Args[1]
3581 p := x.Args[0]
3582 if !(x.Uses == 1) {
3583 break
3584 }
3585 b = x.Block
3586 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3587 v.copyOf(v0)
3588 v0.AuxInt = int32ToAuxInt(i)
3589 v0.Aux = symToAux(s)
3590 v0.AddArg2(p, mem)
3591 return true
3592 }
3593 return false
3594 }
3595 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3596 v_0 := v.Args[0]
3597 b := v.Block
3598 typ := &b.Func.Config.Types
3599
3600
3601 for {
3602 if v_0.Op != OpAMD64BSWAPQ {
3603 break
3604 }
3605 p := v_0.Args[0]
3606 v.copyOf(p)
3607 return true
3608 }
3609
3610
3611
3612 for {
3613 x := v_0
3614 if x.Op != OpAMD64MOVQload {
3615 break
3616 }
3617 i := auxIntToInt32(x.AuxInt)
3618 s := auxToSym(x.Aux)
3619 mem := x.Args[1]
3620 p := x.Args[0]
3621 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3622 break
3623 }
3624 b = x.Block
3625 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3626 v.copyOf(v0)
3627 v0.AuxInt = int32ToAuxInt(i)
3628 v0.Aux = symToAux(s)
3629 v0.AddArg2(p, mem)
3630 return true
3631 }
3632
3633
3634
3635 for {
3636 x := v_0
3637 if x.Op != OpAMD64MOVBEQload {
3638 break
3639 }
3640 i := auxIntToInt32(x.AuxInt)
3641 s := auxToSym(x.Aux)
3642 mem := x.Args[1]
3643 p := x.Args[0]
3644 if !(x.Uses == 1) {
3645 break
3646 }
3647 b = x.Block
3648 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3649 v.copyOf(v0)
3650 v0.AuxInt = int32ToAuxInt(i)
3651 v0.Aux = symToAux(s)
3652 v0.AddArg2(p, mem)
3653 return true
3654 }
3655 return false
3656 }
3657 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3658 v_0 := v.Args[0]
3659
3660
3661 for {
3662 c := auxIntToInt8(v.AuxInt)
3663 if v_0.Op != OpAMD64MOVQconst {
3664 break
3665 }
3666 d := auxIntToInt64(v_0.AuxInt)
3667 v.reset(OpAMD64MOVQconst)
3668 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3669 return true
3670 }
3671 return false
3672 }
3673 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3674 v_0 := v.Args[0]
3675
3676
3677
3678 for {
3679 c := auxIntToInt8(v.AuxInt)
3680 if v_0.Op != OpAMD64SHRQconst {
3681 break
3682 }
3683 d := auxIntToInt8(v_0.AuxInt)
3684 x := v_0.Args[0]
3685 if !((c + d) < 64) {
3686 break
3687 }
3688 v.reset(OpAMD64BTQconst)
3689 v.AuxInt = int8ToAuxInt(c + d)
3690 v.AddArg(x)
3691 return true
3692 }
3693
3694
3695
3696 for {
3697 c := auxIntToInt8(v.AuxInt)
3698 if v_0.Op != OpAMD64ADDQ {
3699 break
3700 }
3701 x := v_0.Args[1]
3702 if x != v_0.Args[0] || !(c > 1) {
3703 break
3704 }
3705 v.reset(OpAMD64BTLconst)
3706 v.AuxInt = int8ToAuxInt(c - 1)
3707 v.AddArg(x)
3708 return true
3709 }
3710
3711
3712
3713 for {
3714 c := auxIntToInt8(v.AuxInt)
3715 if v_0.Op != OpAMD64SHLQconst {
3716 break
3717 }
3718 d := auxIntToInt8(v_0.AuxInt)
3719 x := v_0.Args[0]
3720 if !(c > d) {
3721 break
3722 }
3723 v.reset(OpAMD64BTLconst)
3724 v.AuxInt = int8ToAuxInt(c - d)
3725 v.AddArg(x)
3726 return true
3727 }
3728
3729
3730 for {
3731 if auxIntToInt8(v.AuxInt) != 0 {
3732 break
3733 }
3734 s := v_0
3735 if s.Op != OpAMD64SHRQ {
3736 break
3737 }
3738 y := s.Args[1]
3739 x := s.Args[0]
3740 v.reset(OpAMD64BTQ)
3741 v.AddArg2(y, x)
3742 return true
3743 }
3744
3745
3746
3747 for {
3748 c := auxIntToInt8(v.AuxInt)
3749 if v_0.Op != OpAMD64SHRLconst {
3750 break
3751 }
3752 d := auxIntToInt8(v_0.AuxInt)
3753 x := v_0.Args[0]
3754 if !((c + d) < 32) {
3755 break
3756 }
3757 v.reset(OpAMD64BTLconst)
3758 v.AuxInt = int8ToAuxInt(c + d)
3759 v.AddArg(x)
3760 return true
3761 }
3762
3763
3764
3765 for {
3766 c := auxIntToInt8(v.AuxInt)
3767 if v_0.Op != OpAMD64ADDL {
3768 break
3769 }
3770 x := v_0.Args[1]
3771 if x != v_0.Args[0] || !(c > 1) {
3772 break
3773 }
3774 v.reset(OpAMD64BTLconst)
3775 v.AuxInt = int8ToAuxInt(c - 1)
3776 v.AddArg(x)
3777 return true
3778 }
3779
3780
3781
3782 for {
3783 c := auxIntToInt8(v.AuxInt)
3784 if v_0.Op != OpAMD64SHLLconst {
3785 break
3786 }
3787 d := auxIntToInt8(v_0.AuxInt)
3788 x := v_0.Args[0]
3789 if !(c > d) {
3790 break
3791 }
3792 v.reset(OpAMD64BTLconst)
3793 v.AuxInt = int8ToAuxInt(c - d)
3794 v.AddArg(x)
3795 return true
3796 }
3797
3798
3799 for {
3800 if auxIntToInt8(v.AuxInt) != 0 {
3801 break
3802 }
3803 s := v_0
3804 if s.Op != OpAMD64SHRL {
3805 break
3806 }
3807 y := s.Args[1]
3808 x := s.Args[0]
3809 v.reset(OpAMD64BTL)
3810 v.AddArg2(y, x)
3811 return true
3812 }
3813
3814
3815 for {
3816 if auxIntToInt8(v.AuxInt) != 0 {
3817 break
3818 }
3819 s := v_0
3820 if s.Op != OpAMD64SHRXL {
3821 break
3822 }
3823 y := s.Args[1]
3824 x := s.Args[0]
3825 v.reset(OpAMD64BTL)
3826 v.AddArg2(y, x)
3827 return true
3828 }
3829 return false
3830 }
3831 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3832 v_0 := v.Args[0]
3833
3834
3835
3836 for {
3837 c := auxIntToInt8(v.AuxInt)
3838 if v_0.Op != OpAMD64SHRQconst {
3839 break
3840 }
3841 d := auxIntToInt8(v_0.AuxInt)
3842 x := v_0.Args[0]
3843 if !((c + d) < 64) {
3844 break
3845 }
3846 v.reset(OpAMD64BTQconst)
3847 v.AuxInt = int8ToAuxInt(c + d)
3848 v.AddArg(x)
3849 return true
3850 }
3851
3852
3853
3854 for {
3855 c := auxIntToInt8(v.AuxInt)
3856 if v_0.Op != OpAMD64ADDQ {
3857 break
3858 }
3859 x := v_0.Args[1]
3860 if x != v_0.Args[0] || !(c > 1) {
3861 break
3862 }
3863 v.reset(OpAMD64BTQconst)
3864 v.AuxInt = int8ToAuxInt(c - 1)
3865 v.AddArg(x)
3866 return true
3867 }
3868
3869
3870
3871 for {
3872 c := auxIntToInt8(v.AuxInt)
3873 if v_0.Op != OpAMD64SHLQconst {
3874 break
3875 }
3876 d := auxIntToInt8(v_0.AuxInt)
3877 x := v_0.Args[0]
3878 if !(c > d) {
3879 break
3880 }
3881 v.reset(OpAMD64BTQconst)
3882 v.AuxInt = int8ToAuxInt(c - d)
3883 v.AddArg(x)
3884 return true
3885 }
3886
3887
3888 for {
3889 if auxIntToInt8(v.AuxInt) != 0 {
3890 break
3891 }
3892 s := v_0
3893 if s.Op != OpAMD64SHRQ {
3894 break
3895 }
3896 y := s.Args[1]
3897 x := s.Args[0]
3898 v.reset(OpAMD64BTQ)
3899 v.AddArg2(y, x)
3900 return true
3901 }
3902 return false
3903 }
3904 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3905 v_0 := v.Args[0]
3906
3907
3908 for {
3909 c := auxIntToInt8(v.AuxInt)
3910 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3911 break
3912 }
3913 x := v_0.Args[0]
3914 v.reset(OpAMD64BTRQconst)
3915 v.AuxInt = int8ToAuxInt(c)
3916 v.AddArg(x)
3917 return true
3918 }
3919
3920
3921 for {
3922 c := auxIntToInt8(v.AuxInt)
3923 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3924 break
3925 }
3926 x := v_0.Args[0]
3927 v.reset(OpAMD64BTRQconst)
3928 v.AuxInt = int8ToAuxInt(c)
3929 v.AddArg(x)
3930 return true
3931 }
3932
3933
3934 for {
3935 c := auxIntToInt8(v.AuxInt)
3936 if v_0.Op != OpAMD64MOVQconst {
3937 break
3938 }
3939 d := auxIntToInt64(v_0.AuxInt)
3940 v.reset(OpAMD64MOVQconst)
3941 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3942 return true
3943 }
3944 return false
3945 }
3946 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3947 v_0 := v.Args[0]
3948
3949
3950 for {
3951 c := auxIntToInt8(v.AuxInt)
3952 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3953 break
3954 }
3955 x := v_0.Args[0]
3956 v.reset(OpAMD64BTSQconst)
3957 v.AuxInt = int8ToAuxInt(c)
3958 v.AddArg(x)
3959 return true
3960 }
3961
3962
3963 for {
3964 c := auxIntToInt8(v.AuxInt)
3965 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3966 break
3967 }
3968 x := v_0.Args[0]
3969 v.reset(OpAMD64BTSQconst)
3970 v.AuxInt = int8ToAuxInt(c)
3971 v.AddArg(x)
3972 return true
3973 }
3974
3975
3976 for {
3977 c := auxIntToInt8(v.AuxInt)
3978 if v_0.Op != OpAMD64MOVQconst {
3979 break
3980 }
3981 d := auxIntToInt64(v_0.AuxInt)
3982 v.reset(OpAMD64MOVQconst)
3983 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3984 return true
3985 }
3986 return false
3987 }
3988 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3989 v_2 := v.Args[2]
3990 v_1 := v.Args[1]
3991 v_0 := v.Args[0]
3992
3993
3994 for {
3995 x := v_0
3996 y := v_1
3997 if v_2.Op != OpAMD64InvertFlags {
3998 break
3999 }
4000 cond := v_2.Args[0]
4001 v.reset(OpAMD64CMOVLLS)
4002 v.AddArg3(x, y, cond)
4003 return true
4004 }
4005
4006
4007 for {
4008 x := v_1
4009 if v_2.Op != OpAMD64FlagEQ {
4010 break
4011 }
4012 v.copyOf(x)
4013 return true
4014 }
4015
4016
4017 for {
4018 x := v_1
4019 if v_2.Op != OpAMD64FlagGT_UGT {
4020 break
4021 }
4022 v.copyOf(x)
4023 return true
4024 }
4025
4026
4027 for {
4028 y := v_0
4029 if v_2.Op != OpAMD64FlagGT_ULT {
4030 break
4031 }
4032 v.copyOf(y)
4033 return true
4034 }
4035
4036
4037 for {
4038 y := v_0
4039 if v_2.Op != OpAMD64FlagLT_ULT {
4040 break
4041 }
4042 v.copyOf(y)
4043 return true
4044 }
4045
4046
4047 for {
4048 x := v_1
4049 if v_2.Op != OpAMD64FlagLT_UGT {
4050 break
4051 }
4052 v.copyOf(x)
4053 return true
4054 }
4055 return false
4056 }
4057 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4058 v_2 := v.Args[2]
4059 v_1 := v.Args[1]
4060 v_0 := v.Args[0]
4061
4062
4063 for {
4064 x := v_0
4065 y := v_1
4066 if v_2.Op != OpAMD64InvertFlags {
4067 break
4068 }
4069 cond := v_2.Args[0]
4070 v.reset(OpAMD64CMOVLHI)
4071 v.AddArg3(x, y, cond)
4072 return true
4073 }
4074
4075
4076 for {
4077 y := v_0
4078 if v_2.Op != OpAMD64FlagEQ {
4079 break
4080 }
4081 v.copyOf(y)
4082 return true
4083 }
4084
4085
4086 for {
4087 y := v_0
4088 if v_2.Op != OpAMD64FlagGT_UGT {
4089 break
4090 }
4091 v.copyOf(y)
4092 return true
4093 }
4094
4095
4096 for {
4097 x := v_1
4098 if v_2.Op != OpAMD64FlagGT_ULT {
4099 break
4100 }
4101 v.copyOf(x)
4102 return true
4103 }
4104
4105
4106 for {
4107 x := v_1
4108 if v_2.Op != OpAMD64FlagLT_ULT {
4109 break
4110 }
4111 v.copyOf(x)
4112 return true
4113 }
4114
4115
4116 for {
4117 y := v_0
4118 if v_2.Op != OpAMD64FlagLT_UGT {
4119 break
4120 }
4121 v.copyOf(y)
4122 return true
4123 }
4124 return false
4125 }
4126 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4127 v_2 := v.Args[2]
4128 v_1 := v.Args[1]
4129 v_0 := v.Args[0]
4130 b := v.Block
4131
4132
4133 for {
4134 x := v_0
4135 y := v_1
4136 if v_2.Op != OpAMD64InvertFlags {
4137 break
4138 }
4139 cond := v_2.Args[0]
4140 v.reset(OpAMD64CMOVLEQ)
4141 v.AddArg3(x, y, cond)
4142 return true
4143 }
4144
4145
4146 for {
4147 x := v_1
4148 if v_2.Op != OpAMD64FlagEQ {
4149 break
4150 }
4151 v.copyOf(x)
4152 return true
4153 }
4154
4155
4156 for {
4157 y := v_0
4158 if v_2.Op != OpAMD64FlagGT_UGT {
4159 break
4160 }
4161 v.copyOf(y)
4162 return true
4163 }
4164
4165
4166 for {
4167 y := v_0
4168 if v_2.Op != OpAMD64FlagGT_ULT {
4169 break
4170 }
4171 v.copyOf(y)
4172 return true
4173 }
4174
4175
4176 for {
4177 y := v_0
4178 if v_2.Op != OpAMD64FlagLT_ULT {
4179 break
4180 }
4181 v.copyOf(y)
4182 return true
4183 }
4184
4185
4186 for {
4187 y := v_0
4188 if v_2.Op != OpAMD64FlagLT_UGT {
4189 break
4190 }
4191 v.copyOf(y)
4192 return true
4193 }
4194
4195
4196 for {
4197 x := v_0
4198 y := v_1
4199 if v_2.Op != OpAMD64TESTQ {
4200 break
4201 }
4202 _ = v_2.Args[1]
4203 v_2_0 := v_2.Args[0]
4204 v_2_1 := v_2.Args[1]
4205 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4206 s := v_2_0
4207 if s.Op != OpSelect0 {
4208 continue
4209 }
4210 blsr := s.Args[0]
4211 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4212 continue
4213 }
4214 v.reset(OpAMD64CMOVLEQ)
4215 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4216 v0.AddArg(blsr)
4217 v.AddArg3(x, y, v0)
4218 return true
4219 }
4220 break
4221 }
4222
4223
4224 for {
4225 x := v_0
4226 y := v_1
4227 if v_2.Op != OpAMD64TESTL {
4228 break
4229 }
4230 _ = v_2.Args[1]
4231 v_2_0 := v_2.Args[0]
4232 v_2_1 := v_2.Args[1]
4233 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4234 s := v_2_0
4235 if s.Op != OpSelect0 {
4236 continue
4237 }
4238 blsr := s.Args[0]
4239 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4240 continue
4241 }
4242 v.reset(OpAMD64CMOVLEQ)
4243 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4244 v0.AddArg(blsr)
4245 v.AddArg3(x, y, v0)
4246 return true
4247 }
4248 break
4249 }
4250 return false
4251 }
4252 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4253 v_2 := v.Args[2]
4254 v_1 := v.Args[1]
4255 v_0 := v.Args[0]
4256 b := v.Block
4257
4258
4259 for {
4260 x := v_0
4261 y := v_1
4262 if v_2.Op != OpAMD64InvertFlags {
4263 break
4264 }
4265 cond := v_2.Args[0]
4266 v.reset(OpAMD64CMOVLLE)
4267 v.AddArg3(x, y, cond)
4268 return true
4269 }
4270
4271
4272 for {
4273 x := v_1
4274 if v_2.Op != OpAMD64FlagEQ {
4275 break
4276 }
4277 v.copyOf(x)
4278 return true
4279 }
4280
4281
4282 for {
4283 x := v_1
4284 if v_2.Op != OpAMD64FlagGT_UGT {
4285 break
4286 }
4287 v.copyOf(x)
4288 return true
4289 }
4290
4291
4292 for {
4293 x := v_1
4294 if v_2.Op != OpAMD64FlagGT_ULT {
4295 break
4296 }
4297 v.copyOf(x)
4298 return true
4299 }
4300
4301
4302 for {
4303 y := v_0
4304 if v_2.Op != OpAMD64FlagLT_ULT {
4305 break
4306 }
4307 v.copyOf(y)
4308 return true
4309 }
4310
4311
4312 for {
4313 y := v_0
4314 if v_2.Op != OpAMD64FlagLT_UGT {
4315 break
4316 }
4317 v.copyOf(y)
4318 return true
4319 }
4320
4321
4322
4323 for {
4324 x := v_0
4325 y := v_1
4326 c := v_2
4327 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4328 break
4329 }
4330 z := c.Args[0]
4331 if !(c.Uses == 1) {
4332 break
4333 }
4334 v.reset(OpAMD64CMOVLGT)
4335 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4336 v0.AuxInt = int32ToAuxInt(127)
4337 v0.AddArg(z)
4338 v.AddArg3(x, y, v0)
4339 return true
4340 }
4341
4342
4343
4344 for {
4345 x := v_0
4346 y := v_1
4347 c := v_2
4348 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4349 break
4350 }
4351 z := c.Args[0]
4352 if !(c.Uses == 1) {
4353 break
4354 }
4355 v.reset(OpAMD64CMOVLGT)
4356 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4357 v0.AuxInt = int32ToAuxInt(127)
4358 v0.AddArg(z)
4359 v.AddArg3(x, y, v0)
4360 return true
4361 }
4362 return false
4363 }
4364 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4365 v_2 := v.Args[2]
4366 v_1 := v.Args[1]
4367 v_0 := v.Args[0]
4368
4369
4370 for {
4371 x := v_0
4372 y := v_1
4373 if v_2.Op != OpAMD64InvertFlags {
4374 break
4375 }
4376 cond := v_2.Args[0]
4377 v.reset(OpAMD64CMOVLLT)
4378 v.AddArg3(x, y, cond)
4379 return true
4380 }
4381
4382
4383 for {
4384 y := v_0
4385 if v_2.Op != OpAMD64FlagEQ {
4386 break
4387 }
4388 v.copyOf(y)
4389 return true
4390 }
4391
4392
4393 for {
4394 x := v_1
4395 if v_2.Op != OpAMD64FlagGT_UGT {
4396 break
4397 }
4398 v.copyOf(x)
4399 return true
4400 }
4401
4402
4403 for {
4404 x := v_1
4405 if v_2.Op != OpAMD64FlagGT_ULT {
4406 break
4407 }
4408 v.copyOf(x)
4409 return true
4410 }
4411
4412
4413 for {
4414 y := v_0
4415 if v_2.Op != OpAMD64FlagLT_ULT {
4416 break
4417 }
4418 v.copyOf(y)
4419 return true
4420 }
4421
4422
4423 for {
4424 y := v_0
4425 if v_2.Op != OpAMD64FlagLT_UGT {
4426 break
4427 }
4428 v.copyOf(y)
4429 return true
4430 }
4431 return false
4432 }
4433 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4434 v_2 := v.Args[2]
4435 v_1 := v.Args[1]
4436 v_0 := v.Args[0]
4437
4438
4439 for {
4440 x := v_0
4441 y := v_1
4442 if v_2.Op != OpAMD64InvertFlags {
4443 break
4444 }
4445 cond := v_2.Args[0]
4446 v.reset(OpAMD64CMOVLCS)
4447 v.AddArg3(x, y, cond)
4448 return true
4449 }
4450
4451
4452 for {
4453 y := v_0
4454 if v_2.Op != OpAMD64FlagEQ {
4455 break
4456 }
4457 v.copyOf(y)
4458 return true
4459 }
4460
4461
4462 for {
4463 x := v_1
4464 if v_2.Op != OpAMD64FlagGT_UGT {
4465 break
4466 }
4467 v.copyOf(x)
4468 return true
4469 }
4470
4471
4472 for {
4473 y := v_0
4474 if v_2.Op != OpAMD64FlagGT_ULT {
4475 break
4476 }
4477 v.copyOf(y)
4478 return true
4479 }
4480
4481
4482 for {
4483 y := v_0
4484 if v_2.Op != OpAMD64FlagLT_ULT {
4485 break
4486 }
4487 v.copyOf(y)
4488 return true
4489 }
4490
4491
4492 for {
4493 x := v_1
4494 if v_2.Op != OpAMD64FlagLT_UGT {
4495 break
4496 }
4497 v.copyOf(x)
4498 return true
4499 }
4500 return false
4501 }
4502 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4503 v_2 := v.Args[2]
4504 v_1 := v.Args[1]
4505 v_0 := v.Args[0]
4506
4507
4508 for {
4509 x := v_0
4510 y := v_1
4511 if v_2.Op != OpAMD64InvertFlags {
4512 break
4513 }
4514 cond := v_2.Args[0]
4515 v.reset(OpAMD64CMOVLGE)
4516 v.AddArg3(x, y, cond)
4517 return true
4518 }
4519
4520
4521 for {
4522 x := v_1
4523 if v_2.Op != OpAMD64FlagEQ {
4524 break
4525 }
4526 v.copyOf(x)
4527 return true
4528 }
4529
4530
4531 for {
4532 y := v_0
4533 if v_2.Op != OpAMD64FlagGT_UGT {
4534 break
4535 }
4536 v.copyOf(y)
4537 return true
4538 }
4539
4540
4541 for {
4542 y := v_0
4543 if v_2.Op != OpAMD64FlagGT_ULT {
4544 break
4545 }
4546 v.copyOf(y)
4547 return true
4548 }
4549
4550
4551 for {
4552 x := v_1
4553 if v_2.Op != OpAMD64FlagLT_ULT {
4554 break
4555 }
4556 v.copyOf(x)
4557 return true
4558 }
4559
4560
4561 for {
4562 x := v_1
4563 if v_2.Op != OpAMD64FlagLT_UGT {
4564 break
4565 }
4566 v.copyOf(x)
4567 return true
4568 }
4569 return false
4570 }
4571 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4572 v_2 := v.Args[2]
4573 v_1 := v.Args[1]
4574 v_0 := v.Args[0]
4575
4576
4577 for {
4578 x := v_0
4579 y := v_1
4580 if v_2.Op != OpAMD64InvertFlags {
4581 break
4582 }
4583 cond := v_2.Args[0]
4584 v.reset(OpAMD64CMOVLCC)
4585 v.AddArg3(x, y, cond)
4586 return true
4587 }
4588
4589
4590 for {
4591 x := v_1
4592 if v_2.Op != OpAMD64FlagEQ {
4593 break
4594 }
4595 v.copyOf(x)
4596 return true
4597 }
4598
4599
4600 for {
4601 y := v_0
4602 if v_2.Op != OpAMD64FlagGT_UGT {
4603 break
4604 }
4605 v.copyOf(y)
4606 return true
4607 }
4608
4609
4610 for {
4611 x := v_1
4612 if v_2.Op != OpAMD64FlagGT_ULT {
4613 break
4614 }
4615 v.copyOf(x)
4616 return true
4617 }
4618
4619
4620 for {
4621 x := v_1
4622 if v_2.Op != OpAMD64FlagLT_ULT {
4623 break
4624 }
4625 v.copyOf(x)
4626 return true
4627 }
4628
4629
4630 for {
4631 y := v_0
4632 if v_2.Op != OpAMD64FlagLT_UGT {
4633 break
4634 }
4635 v.copyOf(y)
4636 return true
4637 }
4638 return false
4639 }
4640 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4641 v_2 := v.Args[2]
4642 v_1 := v.Args[1]
4643 v_0 := v.Args[0]
4644 b := v.Block
4645
4646
4647 for {
4648 x := v_0
4649 y := v_1
4650 if v_2.Op != OpAMD64InvertFlags {
4651 break
4652 }
4653 cond := v_2.Args[0]
4654 v.reset(OpAMD64CMOVLGT)
4655 v.AddArg3(x, y, cond)
4656 return true
4657 }
4658
4659
4660 for {
4661 y := v_0
4662 if v_2.Op != OpAMD64FlagEQ {
4663 break
4664 }
4665 v.copyOf(y)
4666 return true
4667 }
4668
4669
4670 for {
4671 y := v_0
4672 if v_2.Op != OpAMD64FlagGT_UGT {
4673 break
4674 }
4675 v.copyOf(y)
4676 return true
4677 }
4678
4679
4680 for {
4681 y := v_0
4682 if v_2.Op != OpAMD64FlagGT_ULT {
4683 break
4684 }
4685 v.copyOf(y)
4686 return true
4687 }
4688
4689
4690 for {
4691 x := v_1
4692 if v_2.Op != OpAMD64FlagLT_ULT {
4693 break
4694 }
4695 v.copyOf(x)
4696 return true
4697 }
4698
4699
4700 for {
4701 x := v_1
4702 if v_2.Op != OpAMD64FlagLT_UGT {
4703 break
4704 }
4705 v.copyOf(x)
4706 return true
4707 }
4708
4709
4710
4711 for {
4712 x := v_0
4713 y := v_1
4714 c := v_2
4715 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4716 break
4717 }
4718 z := c.Args[0]
4719 if !(c.Uses == 1) {
4720 break
4721 }
4722 v.reset(OpAMD64CMOVLLE)
4723 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4724 v0.AuxInt = int32ToAuxInt(127)
4725 v0.AddArg(z)
4726 v.AddArg3(x, y, v0)
4727 return true
4728 }
4729
4730
4731
4732 for {
4733 x := v_0
4734 y := v_1
4735 c := v_2
4736 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4737 break
4738 }
4739 z := c.Args[0]
4740 if !(c.Uses == 1) {
4741 break
4742 }
4743 v.reset(OpAMD64CMOVLLE)
4744 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4745 v0.AuxInt = int32ToAuxInt(127)
4746 v0.AddArg(z)
4747 v.AddArg3(x, y, v0)
4748 return true
4749 }
4750 return false
4751 }
4752 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4753 v_2 := v.Args[2]
4754 v_1 := v.Args[1]
4755 v_0 := v.Args[0]
4756 b := v.Block
4757
4758
4759 for {
4760 x := v_0
4761 y := v_1
4762 if v_2.Op != OpAMD64InvertFlags {
4763 break
4764 }
4765 cond := v_2.Args[0]
4766 v.reset(OpAMD64CMOVLNE)
4767 v.AddArg3(x, y, cond)
4768 return true
4769 }
4770
4771
4772 for {
4773 y := v_0
4774 if v_2.Op != OpAMD64FlagEQ {
4775 break
4776 }
4777 v.copyOf(y)
4778 return true
4779 }
4780
4781
4782 for {
4783 x := v_1
4784 if v_2.Op != OpAMD64FlagGT_UGT {
4785 break
4786 }
4787 v.copyOf(x)
4788 return true
4789 }
4790
4791
4792 for {
4793 x := v_1
4794 if v_2.Op != OpAMD64FlagGT_ULT {
4795 break
4796 }
4797 v.copyOf(x)
4798 return true
4799 }
4800
4801
4802 for {
4803 x := v_1
4804 if v_2.Op != OpAMD64FlagLT_ULT {
4805 break
4806 }
4807 v.copyOf(x)
4808 return true
4809 }
4810
4811
4812 for {
4813 x := v_1
4814 if v_2.Op != OpAMD64FlagLT_UGT {
4815 break
4816 }
4817 v.copyOf(x)
4818 return true
4819 }
4820
4821
4822 for {
4823 x := v_0
4824 y := v_1
4825 if v_2.Op != OpAMD64TESTQ {
4826 break
4827 }
4828 _ = v_2.Args[1]
4829 v_2_0 := v_2.Args[0]
4830 v_2_1 := v_2.Args[1]
4831 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4832 s := v_2_0
4833 if s.Op != OpSelect0 {
4834 continue
4835 }
4836 blsr := s.Args[0]
4837 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4838 continue
4839 }
4840 v.reset(OpAMD64CMOVLNE)
4841 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4842 v0.AddArg(blsr)
4843 v.AddArg3(x, y, v0)
4844 return true
4845 }
4846 break
4847 }
4848
4849
4850 for {
4851 x := v_0
4852 y := v_1
4853 if v_2.Op != OpAMD64TESTL {
4854 break
4855 }
4856 _ = v_2.Args[1]
4857 v_2_0 := v_2.Args[0]
4858 v_2_1 := v_2.Args[1]
4859 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4860 s := v_2_0
4861 if s.Op != OpSelect0 {
4862 continue
4863 }
4864 blsr := s.Args[0]
4865 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4866 continue
4867 }
4868 v.reset(OpAMD64CMOVLNE)
4869 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4870 v0.AddArg(blsr)
4871 v.AddArg3(x, y, v0)
4872 return true
4873 }
4874 break
4875 }
4876 return false
4877 }
4878 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4879 v_2 := v.Args[2]
4880 v_1 := v.Args[1]
4881 v_0 := v.Args[0]
4882
4883
4884 for {
4885 x := v_0
4886 y := v_1
4887 if v_2.Op != OpAMD64InvertFlags {
4888 break
4889 }
4890 cond := v_2.Args[0]
4891 v.reset(OpAMD64CMOVQLS)
4892 v.AddArg3(x, y, cond)
4893 return true
4894 }
4895
4896
4897 for {
4898 x := v_1
4899 if v_2.Op != OpAMD64FlagEQ {
4900 break
4901 }
4902 v.copyOf(x)
4903 return true
4904 }
4905
4906
4907 for {
4908 x := v_1
4909 if v_2.Op != OpAMD64FlagGT_UGT {
4910 break
4911 }
4912 v.copyOf(x)
4913 return true
4914 }
4915
4916
4917 for {
4918 y := v_0
4919 if v_2.Op != OpAMD64FlagGT_ULT {
4920 break
4921 }
4922 v.copyOf(y)
4923 return true
4924 }
4925
4926
4927 for {
4928 y := v_0
4929 if v_2.Op != OpAMD64FlagLT_ULT {
4930 break
4931 }
4932 v.copyOf(y)
4933 return true
4934 }
4935
4936
4937 for {
4938 x := v_1
4939 if v_2.Op != OpAMD64FlagLT_UGT {
4940 break
4941 }
4942 v.copyOf(x)
4943 return true
4944 }
4945 return false
4946 }
4947 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4948 v_2 := v.Args[2]
4949 v_1 := v.Args[1]
4950 v_0 := v.Args[0]
4951
4952
4953 for {
4954 x := v_0
4955 y := v_1
4956 if v_2.Op != OpAMD64InvertFlags {
4957 break
4958 }
4959 cond := v_2.Args[0]
4960 v.reset(OpAMD64CMOVQHI)
4961 v.AddArg3(x, y, cond)
4962 return true
4963 }
4964
4965
4966 for {
4967 y := v_0
4968 if v_2.Op != OpAMD64FlagEQ {
4969 break
4970 }
4971 v.copyOf(y)
4972 return true
4973 }
4974
4975
4976 for {
4977 y := v_0
4978 if v_2.Op != OpAMD64FlagGT_UGT {
4979 break
4980 }
4981 v.copyOf(y)
4982 return true
4983 }
4984
4985
4986 for {
4987 x := v_1
4988 if v_2.Op != OpAMD64FlagGT_ULT {
4989 break
4990 }
4991 v.copyOf(x)
4992 return true
4993 }
4994
4995
4996 for {
4997 x := v_1
4998 if v_2.Op != OpAMD64FlagLT_ULT {
4999 break
5000 }
5001 v.copyOf(x)
5002 return true
5003 }
5004
5005
5006 for {
5007 y := v_0
5008 if v_2.Op != OpAMD64FlagLT_UGT {
5009 break
5010 }
5011 v.copyOf(y)
5012 return true
5013 }
5014 return false
5015 }
5016 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5017 v_2 := v.Args[2]
5018 v_1 := v.Args[1]
5019 v_0 := v.Args[0]
5020 b := v.Block
5021
5022
5023 for {
5024 x := v_0
5025 y := v_1
5026 if v_2.Op != OpAMD64InvertFlags {
5027 break
5028 }
5029 cond := v_2.Args[0]
5030 v.reset(OpAMD64CMOVQEQ)
5031 v.AddArg3(x, y, cond)
5032 return true
5033 }
5034
5035
5036 for {
5037 x := v_1
5038 if v_2.Op != OpAMD64FlagEQ {
5039 break
5040 }
5041 v.copyOf(x)
5042 return true
5043 }
5044
5045
5046 for {
5047 y := v_0
5048 if v_2.Op != OpAMD64FlagGT_UGT {
5049 break
5050 }
5051 v.copyOf(y)
5052 return true
5053 }
5054
5055
5056 for {
5057 y := v_0
5058 if v_2.Op != OpAMD64FlagGT_ULT {
5059 break
5060 }
5061 v.copyOf(y)
5062 return true
5063 }
5064
5065
5066 for {
5067 y := v_0
5068 if v_2.Op != OpAMD64FlagLT_ULT {
5069 break
5070 }
5071 v.copyOf(y)
5072 return true
5073 }
5074
5075
5076 for {
5077 y := v_0
5078 if v_2.Op != OpAMD64FlagLT_UGT {
5079 break
5080 }
5081 v.copyOf(y)
5082 return true
5083 }
5084
5085
5086
5087 for {
5088 x := v_0
5089 if v_2.Op != OpSelect1 {
5090 break
5091 }
5092 v_2_0 := v_2.Args[0]
5093 if v_2_0.Op != OpAMD64BSFQ {
5094 break
5095 }
5096 v_2_0_0 := v_2_0.Args[0]
5097 if v_2_0_0.Op != OpAMD64ORQconst {
5098 break
5099 }
5100 c := auxIntToInt32(v_2_0_0.AuxInt)
5101 if !(c != 0) {
5102 break
5103 }
5104 v.copyOf(x)
5105 return true
5106 }
5107
5108
5109
5110 for {
5111 x := v_0
5112 if v_2.Op != OpSelect1 {
5113 break
5114 }
5115 v_2_0 := v_2.Args[0]
5116 if v_2_0.Op != OpAMD64BSRQ {
5117 break
5118 }
5119 v_2_0_0 := v_2_0.Args[0]
5120 if v_2_0_0.Op != OpAMD64ORQconst {
5121 break
5122 }
5123 c := auxIntToInt32(v_2_0_0.AuxInt)
5124 if !(c != 0) {
5125 break
5126 }
5127 v.copyOf(x)
5128 return true
5129 }
5130
5131
5132 for {
5133 x := v_0
5134 y := v_1
5135 if v_2.Op != OpAMD64TESTQ {
5136 break
5137 }
5138 _ = v_2.Args[1]
5139 v_2_0 := v_2.Args[0]
5140 v_2_1 := v_2.Args[1]
5141 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5142 s := v_2_0
5143 if s.Op != OpSelect0 {
5144 continue
5145 }
5146 blsr := s.Args[0]
5147 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5148 continue
5149 }
5150 v.reset(OpAMD64CMOVQEQ)
5151 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5152 v0.AddArg(blsr)
5153 v.AddArg3(x, y, v0)
5154 return true
5155 }
5156 break
5157 }
5158
5159
5160 for {
5161 x := v_0
5162 y := v_1
5163 if v_2.Op != OpAMD64TESTL {
5164 break
5165 }
5166 _ = v_2.Args[1]
5167 v_2_0 := v_2.Args[0]
5168 v_2_1 := v_2.Args[1]
5169 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5170 s := v_2_0
5171 if s.Op != OpSelect0 {
5172 continue
5173 }
5174 blsr := s.Args[0]
5175 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5176 continue
5177 }
5178 v.reset(OpAMD64CMOVQEQ)
5179 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5180 v0.AddArg(blsr)
5181 v.AddArg3(x, y, v0)
5182 return true
5183 }
5184 break
5185 }
5186 return false
5187 }
5188 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5189 v_2 := v.Args[2]
5190 v_1 := v.Args[1]
5191 v_0 := v.Args[0]
5192 b := v.Block
5193
5194
5195 for {
5196 x := v_0
5197 y := v_1
5198 if v_2.Op != OpAMD64InvertFlags {
5199 break
5200 }
5201 cond := v_2.Args[0]
5202 v.reset(OpAMD64CMOVQLE)
5203 v.AddArg3(x, y, cond)
5204 return true
5205 }
5206
5207
5208 for {
5209 x := v_1
5210 if v_2.Op != OpAMD64FlagEQ {
5211 break
5212 }
5213 v.copyOf(x)
5214 return true
5215 }
5216
5217
5218 for {
5219 x := v_1
5220 if v_2.Op != OpAMD64FlagGT_UGT {
5221 break
5222 }
5223 v.copyOf(x)
5224 return true
5225 }
5226
5227
5228 for {
5229 x := v_1
5230 if v_2.Op != OpAMD64FlagGT_ULT {
5231 break
5232 }
5233 v.copyOf(x)
5234 return true
5235 }
5236
5237
5238 for {
5239 y := v_0
5240 if v_2.Op != OpAMD64FlagLT_ULT {
5241 break
5242 }
5243 v.copyOf(y)
5244 return true
5245 }
5246
5247
5248 for {
5249 y := v_0
5250 if v_2.Op != OpAMD64FlagLT_UGT {
5251 break
5252 }
5253 v.copyOf(y)
5254 return true
5255 }
5256
5257
5258
5259 for {
5260 x := v_0
5261 y := v_1
5262 c := v_2
5263 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5264 break
5265 }
5266 z := c.Args[0]
5267 if !(c.Uses == 1) {
5268 break
5269 }
5270 v.reset(OpAMD64CMOVQGT)
5271 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5272 v0.AuxInt = int32ToAuxInt(127)
5273 v0.AddArg(z)
5274 v.AddArg3(x, y, v0)
5275 return true
5276 }
5277
5278
5279
5280 for {
5281 x := v_0
5282 y := v_1
5283 c := v_2
5284 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5285 break
5286 }
5287 z := c.Args[0]
5288 if !(c.Uses == 1) {
5289 break
5290 }
5291 v.reset(OpAMD64CMOVQGT)
5292 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5293 v0.AuxInt = int32ToAuxInt(127)
5294 v0.AddArg(z)
5295 v.AddArg3(x, y, v0)
5296 return true
5297 }
5298 return false
5299 }
5300 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5301 v_2 := v.Args[2]
5302 v_1 := v.Args[1]
5303 v_0 := v.Args[0]
5304
5305
5306 for {
5307 x := v_0
5308 y := v_1
5309 if v_2.Op != OpAMD64InvertFlags {
5310 break
5311 }
5312 cond := v_2.Args[0]
5313 v.reset(OpAMD64CMOVQLT)
5314 v.AddArg3(x, y, cond)
5315 return true
5316 }
5317
5318
5319 for {
5320 y := v_0
5321 if v_2.Op != OpAMD64FlagEQ {
5322 break
5323 }
5324 v.copyOf(y)
5325 return true
5326 }
5327
5328
5329 for {
5330 x := v_1
5331 if v_2.Op != OpAMD64FlagGT_UGT {
5332 break
5333 }
5334 v.copyOf(x)
5335 return true
5336 }
5337
5338
5339 for {
5340 x := v_1
5341 if v_2.Op != OpAMD64FlagGT_ULT {
5342 break
5343 }
5344 v.copyOf(x)
5345 return true
5346 }
5347
5348
5349 for {
5350 y := v_0
5351 if v_2.Op != OpAMD64FlagLT_ULT {
5352 break
5353 }
5354 v.copyOf(y)
5355 return true
5356 }
5357
5358
5359 for {
5360 y := v_0
5361 if v_2.Op != OpAMD64FlagLT_UGT {
5362 break
5363 }
5364 v.copyOf(y)
5365 return true
5366 }
5367 return false
5368 }
5369 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5370 v_2 := v.Args[2]
5371 v_1 := v.Args[1]
5372 v_0 := v.Args[0]
5373
5374
5375 for {
5376 x := v_0
5377 y := v_1
5378 if v_2.Op != OpAMD64InvertFlags {
5379 break
5380 }
5381 cond := v_2.Args[0]
5382 v.reset(OpAMD64CMOVQCS)
5383 v.AddArg3(x, y, cond)
5384 return true
5385 }
5386
5387
5388 for {
5389 y := v_0
5390 if v_2.Op != OpAMD64FlagEQ {
5391 break
5392 }
5393 v.copyOf(y)
5394 return true
5395 }
5396
5397
5398 for {
5399 x := v_1
5400 if v_2.Op != OpAMD64FlagGT_UGT {
5401 break
5402 }
5403 v.copyOf(x)
5404 return true
5405 }
5406
5407
5408 for {
5409 y := v_0
5410 if v_2.Op != OpAMD64FlagGT_ULT {
5411 break
5412 }
5413 v.copyOf(y)
5414 return true
5415 }
5416
5417
5418 for {
5419 y := v_0
5420 if v_2.Op != OpAMD64FlagLT_ULT {
5421 break
5422 }
5423 v.copyOf(y)
5424 return true
5425 }
5426
5427
5428 for {
5429 x := v_1
5430 if v_2.Op != OpAMD64FlagLT_UGT {
5431 break
5432 }
5433 v.copyOf(x)
5434 return true
5435 }
5436 return false
5437 }
5438 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5439 v_2 := v.Args[2]
5440 v_1 := v.Args[1]
5441 v_0 := v.Args[0]
5442
5443
5444 for {
5445 x := v_0
5446 y := v_1
5447 if v_2.Op != OpAMD64InvertFlags {
5448 break
5449 }
5450 cond := v_2.Args[0]
5451 v.reset(OpAMD64CMOVQGE)
5452 v.AddArg3(x, y, cond)
5453 return true
5454 }
5455
5456
5457 for {
5458 x := v_1
5459 if v_2.Op != OpAMD64FlagEQ {
5460 break
5461 }
5462 v.copyOf(x)
5463 return true
5464 }
5465
5466
5467 for {
5468 y := v_0
5469 if v_2.Op != OpAMD64FlagGT_UGT {
5470 break
5471 }
5472 v.copyOf(y)
5473 return true
5474 }
5475
5476
5477 for {
5478 y := v_0
5479 if v_2.Op != OpAMD64FlagGT_ULT {
5480 break
5481 }
5482 v.copyOf(y)
5483 return true
5484 }
5485
5486
5487 for {
5488 x := v_1
5489 if v_2.Op != OpAMD64FlagLT_ULT {
5490 break
5491 }
5492 v.copyOf(x)
5493 return true
5494 }
5495
5496
5497 for {
5498 x := v_1
5499 if v_2.Op != OpAMD64FlagLT_UGT {
5500 break
5501 }
5502 v.copyOf(x)
5503 return true
5504 }
5505 return false
5506 }
5507 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5508 v_2 := v.Args[2]
5509 v_1 := v.Args[1]
5510 v_0 := v.Args[0]
5511
5512
5513 for {
5514 x := v_0
5515 y := v_1
5516 if v_2.Op != OpAMD64InvertFlags {
5517 break
5518 }
5519 cond := v_2.Args[0]
5520 v.reset(OpAMD64CMOVQCC)
5521 v.AddArg3(x, y, cond)
5522 return true
5523 }
5524
5525
5526 for {
5527 x := v_1
5528 if v_2.Op != OpAMD64FlagEQ {
5529 break
5530 }
5531 v.copyOf(x)
5532 return true
5533 }
5534
5535
5536 for {
5537 y := v_0
5538 if v_2.Op != OpAMD64FlagGT_UGT {
5539 break
5540 }
5541 v.copyOf(y)
5542 return true
5543 }
5544
5545
5546 for {
5547 x := v_1
5548 if v_2.Op != OpAMD64FlagGT_ULT {
5549 break
5550 }
5551 v.copyOf(x)
5552 return true
5553 }
5554
5555
5556 for {
5557 x := v_1
5558 if v_2.Op != OpAMD64FlagLT_ULT {
5559 break
5560 }
5561 v.copyOf(x)
5562 return true
5563 }
5564
5565
5566 for {
5567 y := v_0
5568 if v_2.Op != OpAMD64FlagLT_UGT {
5569 break
5570 }
5571 v.copyOf(y)
5572 return true
5573 }
5574 return false
5575 }
5576 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5577 v_2 := v.Args[2]
5578 v_1 := v.Args[1]
5579 v_0 := v.Args[0]
5580 b := v.Block
5581
5582
5583 for {
5584 x := v_0
5585 y := v_1
5586 if v_2.Op != OpAMD64InvertFlags {
5587 break
5588 }
5589 cond := v_2.Args[0]
5590 v.reset(OpAMD64CMOVQGT)
5591 v.AddArg3(x, y, cond)
5592 return true
5593 }
5594
5595
5596 for {
5597 y := v_0
5598 if v_2.Op != OpAMD64FlagEQ {
5599 break
5600 }
5601 v.copyOf(y)
5602 return true
5603 }
5604
5605
5606 for {
5607 y := v_0
5608 if v_2.Op != OpAMD64FlagGT_UGT {
5609 break
5610 }
5611 v.copyOf(y)
5612 return true
5613 }
5614
5615
5616 for {
5617 y := v_0
5618 if v_2.Op != OpAMD64FlagGT_ULT {
5619 break
5620 }
5621 v.copyOf(y)
5622 return true
5623 }
5624
5625
5626 for {
5627 x := v_1
5628 if v_2.Op != OpAMD64FlagLT_ULT {
5629 break
5630 }
5631 v.copyOf(x)
5632 return true
5633 }
5634
5635
5636 for {
5637 x := v_1
5638 if v_2.Op != OpAMD64FlagLT_UGT {
5639 break
5640 }
5641 v.copyOf(x)
5642 return true
5643 }
5644
5645
5646
5647 for {
5648 x := v_0
5649 y := v_1
5650 c := v_2
5651 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5652 break
5653 }
5654 z := c.Args[0]
5655 if !(c.Uses == 1) {
5656 break
5657 }
5658 v.reset(OpAMD64CMOVQLE)
5659 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5660 v0.AuxInt = int32ToAuxInt(127)
5661 v0.AddArg(z)
5662 v.AddArg3(x, y, v0)
5663 return true
5664 }
5665
5666
5667
5668 for {
5669 x := v_0
5670 y := v_1
5671 c := v_2
5672 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5673 break
5674 }
5675 z := c.Args[0]
5676 if !(c.Uses == 1) {
5677 break
5678 }
5679 v.reset(OpAMD64CMOVQLE)
5680 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5681 v0.AuxInt = int32ToAuxInt(127)
5682 v0.AddArg(z)
5683 v.AddArg3(x, y, v0)
5684 return true
5685 }
5686 return false
5687 }
5688 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5689 v_2 := v.Args[2]
5690 v_1 := v.Args[1]
5691 v_0 := v.Args[0]
5692 b := v.Block
5693
5694
5695 for {
5696 x := v_0
5697 y := v_1
5698 if v_2.Op != OpAMD64InvertFlags {
5699 break
5700 }
5701 cond := v_2.Args[0]
5702 v.reset(OpAMD64CMOVQNE)
5703 v.AddArg3(x, y, cond)
5704 return true
5705 }
5706
5707
5708 for {
5709 y := v_0
5710 if v_2.Op != OpAMD64FlagEQ {
5711 break
5712 }
5713 v.copyOf(y)
5714 return true
5715 }
5716
5717
5718 for {
5719 x := v_1
5720 if v_2.Op != OpAMD64FlagGT_UGT {
5721 break
5722 }
5723 v.copyOf(x)
5724 return true
5725 }
5726
5727
5728 for {
5729 x := v_1
5730 if v_2.Op != OpAMD64FlagGT_ULT {
5731 break
5732 }
5733 v.copyOf(x)
5734 return true
5735 }
5736
5737
5738 for {
5739 x := v_1
5740 if v_2.Op != OpAMD64FlagLT_ULT {
5741 break
5742 }
5743 v.copyOf(x)
5744 return true
5745 }
5746
5747
5748 for {
5749 x := v_1
5750 if v_2.Op != OpAMD64FlagLT_UGT {
5751 break
5752 }
5753 v.copyOf(x)
5754 return true
5755 }
5756
5757
5758 for {
5759 x := v_0
5760 y := v_1
5761 if v_2.Op != OpAMD64TESTQ {
5762 break
5763 }
5764 _ = v_2.Args[1]
5765 v_2_0 := v_2.Args[0]
5766 v_2_1 := v_2.Args[1]
5767 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5768 s := v_2_0
5769 if s.Op != OpSelect0 {
5770 continue
5771 }
5772 blsr := s.Args[0]
5773 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5774 continue
5775 }
5776 v.reset(OpAMD64CMOVQNE)
5777 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5778 v0.AddArg(blsr)
5779 v.AddArg3(x, y, v0)
5780 return true
5781 }
5782 break
5783 }
5784
5785
5786 for {
5787 x := v_0
5788 y := v_1
5789 if v_2.Op != OpAMD64TESTL {
5790 break
5791 }
5792 _ = v_2.Args[1]
5793 v_2_0 := v_2.Args[0]
5794 v_2_1 := v_2.Args[1]
5795 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5796 s := v_2_0
5797 if s.Op != OpSelect0 {
5798 continue
5799 }
5800 blsr := s.Args[0]
5801 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5802 continue
5803 }
5804 v.reset(OpAMD64CMOVQNE)
5805 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5806 v0.AddArg(blsr)
5807 v.AddArg3(x, y, v0)
5808 return true
5809 }
5810 break
5811 }
5812 return false
5813 }
5814 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5815 v_2 := v.Args[2]
5816 v_1 := v.Args[1]
5817 v_0 := v.Args[0]
5818
5819
5820 for {
5821 x := v_0
5822 y := v_1
5823 if v_2.Op != OpAMD64InvertFlags {
5824 break
5825 }
5826 cond := v_2.Args[0]
5827 v.reset(OpAMD64CMOVWLS)
5828 v.AddArg3(x, y, cond)
5829 return true
5830 }
5831
5832
5833 for {
5834 x := v_1
5835 if v_2.Op != OpAMD64FlagEQ {
5836 break
5837 }
5838 v.copyOf(x)
5839 return true
5840 }
5841
5842
5843 for {
5844 x := v_1
5845 if v_2.Op != OpAMD64FlagGT_UGT {
5846 break
5847 }
5848 v.copyOf(x)
5849 return true
5850 }
5851
5852
5853 for {
5854 y := v_0
5855 if v_2.Op != OpAMD64FlagGT_ULT {
5856 break
5857 }
5858 v.copyOf(y)
5859 return true
5860 }
5861
5862
5863 for {
5864 y := v_0
5865 if v_2.Op != OpAMD64FlagLT_ULT {
5866 break
5867 }
5868 v.copyOf(y)
5869 return true
5870 }
5871
5872
5873 for {
5874 x := v_1
5875 if v_2.Op != OpAMD64FlagLT_UGT {
5876 break
5877 }
5878 v.copyOf(x)
5879 return true
5880 }
5881 return false
5882 }
5883 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5884 v_2 := v.Args[2]
5885 v_1 := v.Args[1]
5886 v_0 := v.Args[0]
5887
5888
5889 for {
5890 x := v_0
5891 y := v_1
5892 if v_2.Op != OpAMD64InvertFlags {
5893 break
5894 }
5895 cond := v_2.Args[0]
5896 v.reset(OpAMD64CMOVWHI)
5897 v.AddArg3(x, y, cond)
5898 return true
5899 }
5900
5901
5902 for {
5903 y := v_0
5904 if v_2.Op != OpAMD64FlagEQ {
5905 break
5906 }
5907 v.copyOf(y)
5908 return true
5909 }
5910
5911
5912 for {
5913 y := v_0
5914 if v_2.Op != OpAMD64FlagGT_UGT {
5915 break
5916 }
5917 v.copyOf(y)
5918 return true
5919 }
5920
5921
5922 for {
5923 x := v_1
5924 if v_2.Op != OpAMD64FlagGT_ULT {
5925 break
5926 }
5927 v.copyOf(x)
5928 return true
5929 }
5930
5931
5932 for {
5933 x := v_1
5934 if v_2.Op != OpAMD64FlagLT_ULT {
5935 break
5936 }
5937 v.copyOf(x)
5938 return true
5939 }
5940
5941
5942 for {
5943 y := v_0
5944 if v_2.Op != OpAMD64FlagLT_UGT {
5945 break
5946 }
5947 v.copyOf(y)
5948 return true
5949 }
5950 return false
5951 }
5952 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5953 v_2 := v.Args[2]
5954 v_1 := v.Args[1]
5955 v_0 := v.Args[0]
5956
5957
5958 for {
5959 x := v_0
5960 y := v_1
5961 if v_2.Op != OpAMD64InvertFlags {
5962 break
5963 }
5964 cond := v_2.Args[0]
5965 v.reset(OpAMD64CMOVWEQ)
5966 v.AddArg3(x, y, cond)
5967 return true
5968 }
5969
5970
5971 for {
5972 x := v_1
5973 if v_2.Op != OpAMD64FlagEQ {
5974 break
5975 }
5976 v.copyOf(x)
5977 return true
5978 }
5979
5980
5981 for {
5982 y := v_0
5983 if v_2.Op != OpAMD64FlagGT_UGT {
5984 break
5985 }
5986 v.copyOf(y)
5987 return true
5988 }
5989
5990
5991 for {
5992 y := v_0
5993 if v_2.Op != OpAMD64FlagGT_ULT {
5994 break
5995 }
5996 v.copyOf(y)
5997 return true
5998 }
5999
6000
6001 for {
6002 y := v_0
6003 if v_2.Op != OpAMD64FlagLT_ULT {
6004 break
6005 }
6006 v.copyOf(y)
6007 return true
6008 }
6009
6010
6011 for {
6012 y := v_0
6013 if v_2.Op != OpAMD64FlagLT_UGT {
6014 break
6015 }
6016 v.copyOf(y)
6017 return true
6018 }
6019 return false
6020 }
6021 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6022 v_2 := v.Args[2]
6023 v_1 := v.Args[1]
6024 v_0 := v.Args[0]
6025
6026
6027 for {
6028 x := v_0
6029 y := v_1
6030 if v_2.Op != OpAMD64InvertFlags {
6031 break
6032 }
6033 cond := v_2.Args[0]
6034 v.reset(OpAMD64CMOVWLE)
6035 v.AddArg3(x, y, cond)
6036 return true
6037 }
6038
6039
6040 for {
6041 x := v_1
6042 if v_2.Op != OpAMD64FlagEQ {
6043 break
6044 }
6045 v.copyOf(x)
6046 return true
6047 }
6048
6049
6050 for {
6051 x := v_1
6052 if v_2.Op != OpAMD64FlagGT_UGT {
6053 break
6054 }
6055 v.copyOf(x)
6056 return true
6057 }
6058
6059
6060 for {
6061 x := v_1
6062 if v_2.Op != OpAMD64FlagGT_ULT {
6063 break
6064 }
6065 v.copyOf(x)
6066 return true
6067 }
6068
6069
6070 for {
6071 y := v_0
6072 if v_2.Op != OpAMD64FlagLT_ULT {
6073 break
6074 }
6075 v.copyOf(y)
6076 return true
6077 }
6078
6079
6080 for {
6081 y := v_0
6082 if v_2.Op != OpAMD64FlagLT_UGT {
6083 break
6084 }
6085 v.copyOf(y)
6086 return true
6087 }
6088 return false
6089 }
6090 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6091 v_2 := v.Args[2]
6092 v_1 := v.Args[1]
6093 v_0 := v.Args[0]
6094
6095
6096 for {
6097 x := v_0
6098 y := v_1
6099 if v_2.Op != OpAMD64InvertFlags {
6100 break
6101 }
6102 cond := v_2.Args[0]
6103 v.reset(OpAMD64CMOVWLT)
6104 v.AddArg3(x, y, cond)
6105 return true
6106 }
6107
6108
6109 for {
6110 y := v_0
6111 if v_2.Op != OpAMD64FlagEQ {
6112 break
6113 }
6114 v.copyOf(y)
6115 return true
6116 }
6117
6118
6119 for {
6120 x := v_1
6121 if v_2.Op != OpAMD64FlagGT_UGT {
6122 break
6123 }
6124 v.copyOf(x)
6125 return true
6126 }
6127
6128
6129 for {
6130 x := v_1
6131 if v_2.Op != OpAMD64FlagGT_ULT {
6132 break
6133 }
6134 v.copyOf(x)
6135 return true
6136 }
6137
6138
6139 for {
6140 y := v_0
6141 if v_2.Op != OpAMD64FlagLT_ULT {
6142 break
6143 }
6144 v.copyOf(y)
6145 return true
6146 }
6147
6148
6149 for {
6150 y := v_0
6151 if v_2.Op != OpAMD64FlagLT_UGT {
6152 break
6153 }
6154 v.copyOf(y)
6155 return true
6156 }
6157 return false
6158 }
6159 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6160 v_2 := v.Args[2]
6161 v_1 := v.Args[1]
6162 v_0 := v.Args[0]
6163
6164
6165 for {
6166 x := v_0
6167 y := v_1
6168 if v_2.Op != OpAMD64InvertFlags {
6169 break
6170 }
6171 cond := v_2.Args[0]
6172 v.reset(OpAMD64CMOVWCS)
6173 v.AddArg3(x, y, cond)
6174 return true
6175 }
6176
6177
6178 for {
6179 y := v_0
6180 if v_2.Op != OpAMD64FlagEQ {
6181 break
6182 }
6183 v.copyOf(y)
6184 return true
6185 }
6186
6187
6188 for {
6189 x := v_1
6190 if v_2.Op != OpAMD64FlagGT_UGT {
6191 break
6192 }
6193 v.copyOf(x)
6194 return true
6195 }
6196
6197
6198 for {
6199 y := v_0
6200 if v_2.Op != OpAMD64FlagGT_ULT {
6201 break
6202 }
6203 v.copyOf(y)
6204 return true
6205 }
6206
6207
6208 for {
6209 y := v_0
6210 if v_2.Op != OpAMD64FlagLT_ULT {
6211 break
6212 }
6213 v.copyOf(y)
6214 return true
6215 }
6216
6217
6218 for {
6219 x := v_1
6220 if v_2.Op != OpAMD64FlagLT_UGT {
6221 break
6222 }
6223 v.copyOf(x)
6224 return true
6225 }
6226 return false
6227 }
6228 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6229 v_2 := v.Args[2]
6230 v_1 := v.Args[1]
6231 v_0 := v.Args[0]
6232
6233
6234 for {
6235 x := v_0
6236 y := v_1
6237 if v_2.Op != OpAMD64InvertFlags {
6238 break
6239 }
6240 cond := v_2.Args[0]
6241 v.reset(OpAMD64CMOVWGE)
6242 v.AddArg3(x, y, cond)
6243 return true
6244 }
6245
6246
6247 for {
6248 x := v_1
6249 if v_2.Op != OpAMD64FlagEQ {
6250 break
6251 }
6252 v.copyOf(x)
6253 return true
6254 }
6255
6256
6257 for {
6258 y := v_0
6259 if v_2.Op != OpAMD64FlagGT_UGT {
6260 break
6261 }
6262 v.copyOf(y)
6263 return true
6264 }
6265
6266
6267 for {
6268 y := v_0
6269 if v_2.Op != OpAMD64FlagGT_ULT {
6270 break
6271 }
6272 v.copyOf(y)
6273 return true
6274 }
6275
6276
6277 for {
6278 x := v_1
6279 if v_2.Op != OpAMD64FlagLT_ULT {
6280 break
6281 }
6282 v.copyOf(x)
6283 return true
6284 }
6285
6286
6287 for {
6288 x := v_1
6289 if v_2.Op != OpAMD64FlagLT_UGT {
6290 break
6291 }
6292 v.copyOf(x)
6293 return true
6294 }
6295 return false
6296 }
6297 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6298 v_2 := v.Args[2]
6299 v_1 := v.Args[1]
6300 v_0 := v.Args[0]
6301
6302
6303 for {
6304 x := v_0
6305 y := v_1
6306 if v_2.Op != OpAMD64InvertFlags {
6307 break
6308 }
6309 cond := v_2.Args[0]
6310 v.reset(OpAMD64CMOVWCC)
6311 v.AddArg3(x, y, cond)
6312 return true
6313 }
6314
6315
6316 for {
6317 x := v_1
6318 if v_2.Op != OpAMD64FlagEQ {
6319 break
6320 }
6321 v.copyOf(x)
6322 return true
6323 }
6324
6325
6326 for {
6327 y := v_0
6328 if v_2.Op != OpAMD64FlagGT_UGT {
6329 break
6330 }
6331 v.copyOf(y)
6332 return true
6333 }
6334
6335
6336 for {
6337 x := v_1
6338 if v_2.Op != OpAMD64FlagGT_ULT {
6339 break
6340 }
6341 v.copyOf(x)
6342 return true
6343 }
6344
6345
6346 for {
6347 x := v_1
6348 if v_2.Op != OpAMD64FlagLT_ULT {
6349 break
6350 }
6351 v.copyOf(x)
6352 return true
6353 }
6354
6355
6356 for {
6357 y := v_0
6358 if v_2.Op != OpAMD64FlagLT_UGT {
6359 break
6360 }
6361 v.copyOf(y)
6362 return true
6363 }
6364 return false
6365 }
6366 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6367 v_2 := v.Args[2]
6368 v_1 := v.Args[1]
6369 v_0 := v.Args[0]
6370
6371
6372 for {
6373 x := v_0
6374 y := v_1
6375 if v_2.Op != OpAMD64InvertFlags {
6376 break
6377 }
6378 cond := v_2.Args[0]
6379 v.reset(OpAMD64CMOVWGT)
6380 v.AddArg3(x, y, cond)
6381 return true
6382 }
6383
6384
6385 for {
6386 y := v_0
6387 if v_2.Op != OpAMD64FlagEQ {
6388 break
6389 }
6390 v.copyOf(y)
6391 return true
6392 }
6393
6394
6395 for {
6396 y := v_0
6397 if v_2.Op != OpAMD64FlagGT_UGT {
6398 break
6399 }
6400 v.copyOf(y)
6401 return true
6402 }
6403
6404
6405 for {
6406 y := v_0
6407 if v_2.Op != OpAMD64FlagGT_ULT {
6408 break
6409 }
6410 v.copyOf(y)
6411 return true
6412 }
6413
6414
6415 for {
6416 x := v_1
6417 if v_2.Op != OpAMD64FlagLT_ULT {
6418 break
6419 }
6420 v.copyOf(x)
6421 return true
6422 }
6423
6424
6425 for {
6426 x := v_1
6427 if v_2.Op != OpAMD64FlagLT_UGT {
6428 break
6429 }
6430 v.copyOf(x)
6431 return true
6432 }
6433 return false
6434 }
6435 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6436 v_2 := v.Args[2]
6437 v_1 := v.Args[1]
6438 v_0 := v.Args[0]
6439
6440
6441 for {
6442 x := v_0
6443 y := v_1
6444 if v_2.Op != OpAMD64InvertFlags {
6445 break
6446 }
6447 cond := v_2.Args[0]
6448 v.reset(OpAMD64CMOVWNE)
6449 v.AddArg3(x, y, cond)
6450 return true
6451 }
6452
6453
6454 for {
6455 y := v_0
6456 if v_2.Op != OpAMD64FlagEQ {
6457 break
6458 }
6459 v.copyOf(y)
6460 return true
6461 }
6462
6463
6464 for {
6465 x := v_1
6466 if v_2.Op != OpAMD64FlagGT_UGT {
6467 break
6468 }
6469 v.copyOf(x)
6470 return true
6471 }
6472
6473
6474 for {
6475 x := v_1
6476 if v_2.Op != OpAMD64FlagGT_ULT {
6477 break
6478 }
6479 v.copyOf(x)
6480 return true
6481 }
6482
6483
6484 for {
6485 x := v_1
6486 if v_2.Op != OpAMD64FlagLT_ULT {
6487 break
6488 }
6489 v.copyOf(x)
6490 return true
6491 }
6492
6493
6494 for {
6495 x := v_1
6496 if v_2.Op != OpAMD64FlagLT_UGT {
6497 break
6498 }
6499 v.copyOf(x)
6500 return true
6501 }
6502 return false
6503 }
6504 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6505 v_1 := v.Args[1]
6506 v_0 := v.Args[0]
6507 b := v.Block
6508
6509
6510 for {
6511 x := v_0
6512 if v_1.Op != OpAMD64MOVLconst {
6513 break
6514 }
6515 c := auxIntToInt32(v_1.AuxInt)
6516 v.reset(OpAMD64CMPBconst)
6517 v.AuxInt = int8ToAuxInt(int8(c))
6518 v.AddArg(x)
6519 return true
6520 }
6521
6522
6523 for {
6524 if v_0.Op != OpAMD64MOVLconst {
6525 break
6526 }
6527 c := auxIntToInt32(v_0.AuxInt)
6528 x := v_1
6529 v.reset(OpAMD64InvertFlags)
6530 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6531 v0.AuxInt = int8ToAuxInt(int8(c))
6532 v0.AddArg(x)
6533 v.AddArg(v0)
6534 return true
6535 }
6536
6537
6538
6539 for {
6540 x := v_0
6541 y := v_1
6542 if !(canonLessThan(x, y)) {
6543 break
6544 }
6545 v.reset(OpAMD64InvertFlags)
6546 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6547 v0.AddArg2(y, x)
6548 v.AddArg(v0)
6549 return true
6550 }
6551
6552
6553
6554 for {
6555 l := v_0
6556 if l.Op != OpAMD64MOVBload {
6557 break
6558 }
6559 off := auxIntToInt32(l.AuxInt)
6560 sym := auxToSym(l.Aux)
6561 mem := l.Args[1]
6562 ptr := l.Args[0]
6563 x := v_1
6564 if !(canMergeLoad(v, l) && clobber(l)) {
6565 break
6566 }
6567 v.reset(OpAMD64CMPBload)
6568 v.AuxInt = int32ToAuxInt(off)
6569 v.Aux = symToAux(sym)
6570 v.AddArg3(ptr, x, mem)
6571 return true
6572 }
6573
6574
6575
6576 for {
6577 x := v_0
6578 l := v_1
6579 if l.Op != OpAMD64MOVBload {
6580 break
6581 }
6582 off := auxIntToInt32(l.AuxInt)
6583 sym := auxToSym(l.Aux)
6584 mem := l.Args[1]
6585 ptr := l.Args[0]
6586 if !(canMergeLoad(v, l) && clobber(l)) {
6587 break
6588 }
6589 v.reset(OpAMD64InvertFlags)
6590 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6591 v0.AuxInt = int32ToAuxInt(off)
6592 v0.Aux = symToAux(sym)
6593 v0.AddArg3(ptr, x, mem)
6594 v.AddArg(v0)
6595 return true
6596 }
6597 return false
6598 }
6599 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6600 v_0 := v.Args[0]
6601 b := v.Block
6602
6603
6604
6605 for {
6606 y := auxIntToInt8(v.AuxInt)
6607 if v_0.Op != OpAMD64MOVLconst {
6608 break
6609 }
6610 x := auxIntToInt32(v_0.AuxInt)
6611 if !(int8(x) == y) {
6612 break
6613 }
6614 v.reset(OpAMD64FlagEQ)
6615 return true
6616 }
6617
6618
6619
6620 for {
6621 y := auxIntToInt8(v.AuxInt)
6622 if v_0.Op != OpAMD64MOVLconst {
6623 break
6624 }
6625 x := auxIntToInt32(v_0.AuxInt)
6626 if !(int8(x) < y && uint8(x) < uint8(y)) {
6627 break
6628 }
6629 v.reset(OpAMD64FlagLT_ULT)
6630 return true
6631 }
6632
6633
6634
6635 for {
6636 y := auxIntToInt8(v.AuxInt)
6637 if v_0.Op != OpAMD64MOVLconst {
6638 break
6639 }
6640 x := auxIntToInt32(v_0.AuxInt)
6641 if !(int8(x) < y && uint8(x) > uint8(y)) {
6642 break
6643 }
6644 v.reset(OpAMD64FlagLT_UGT)
6645 return true
6646 }
6647
6648
6649
6650 for {
6651 y := auxIntToInt8(v.AuxInt)
6652 if v_0.Op != OpAMD64MOVLconst {
6653 break
6654 }
6655 x := auxIntToInt32(v_0.AuxInt)
6656 if !(int8(x) > y && uint8(x) < uint8(y)) {
6657 break
6658 }
6659 v.reset(OpAMD64FlagGT_ULT)
6660 return true
6661 }
6662
6663
6664
6665 for {
6666 y := auxIntToInt8(v.AuxInt)
6667 if v_0.Op != OpAMD64MOVLconst {
6668 break
6669 }
6670 x := auxIntToInt32(v_0.AuxInt)
6671 if !(int8(x) > y && uint8(x) > uint8(y)) {
6672 break
6673 }
6674 v.reset(OpAMD64FlagGT_UGT)
6675 return true
6676 }
6677
6678
6679
6680 for {
6681 n := auxIntToInt8(v.AuxInt)
6682 if v_0.Op != OpAMD64ANDLconst {
6683 break
6684 }
6685 m := auxIntToInt32(v_0.AuxInt)
6686 if !(0 <= int8(m) && int8(m) < n) {
6687 break
6688 }
6689 v.reset(OpAMD64FlagLT_ULT)
6690 return true
6691 }
6692
6693
6694
6695 for {
6696 if auxIntToInt8(v.AuxInt) != 0 {
6697 break
6698 }
6699 a := v_0
6700 if a.Op != OpAMD64ANDL {
6701 break
6702 }
6703 y := a.Args[1]
6704 x := a.Args[0]
6705 if !(a.Uses == 1) {
6706 break
6707 }
6708 v.reset(OpAMD64TESTB)
6709 v.AddArg2(x, y)
6710 return true
6711 }
6712
6713
6714
6715 for {
6716 if auxIntToInt8(v.AuxInt) != 0 {
6717 break
6718 }
6719 a := v_0
6720 if a.Op != OpAMD64ANDLconst {
6721 break
6722 }
6723 c := auxIntToInt32(a.AuxInt)
6724 x := a.Args[0]
6725 if !(a.Uses == 1) {
6726 break
6727 }
6728 v.reset(OpAMD64TESTBconst)
6729 v.AuxInt = int8ToAuxInt(int8(c))
6730 v.AddArg(x)
6731 return true
6732 }
6733
6734
6735 for {
6736 if auxIntToInt8(v.AuxInt) != 0 {
6737 break
6738 }
6739 x := v_0
6740 v.reset(OpAMD64TESTB)
6741 v.AddArg2(x, x)
6742 return true
6743 }
6744
6745
6746
6747 for {
6748 c := auxIntToInt8(v.AuxInt)
6749 l := v_0
6750 if l.Op != OpAMD64MOVBload {
6751 break
6752 }
6753 off := auxIntToInt32(l.AuxInt)
6754 sym := auxToSym(l.Aux)
6755 mem := l.Args[1]
6756 ptr := l.Args[0]
6757 if !(l.Uses == 1 && clobber(l)) {
6758 break
6759 }
6760 b = l.Block
6761 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6762 v.copyOf(v0)
6763 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6764 v0.Aux = symToAux(sym)
6765 v0.AddArg2(ptr, mem)
6766 return true
6767 }
6768 return false
6769 }
6770 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6771 v_1 := v.Args[1]
6772 v_0 := v.Args[0]
6773
6774
6775
6776 for {
6777 valoff1 := auxIntToValAndOff(v.AuxInt)
6778 sym := auxToSym(v.Aux)
6779 if v_0.Op != OpAMD64ADDQconst {
6780 break
6781 }
6782 off2 := auxIntToInt32(v_0.AuxInt)
6783 base := v_0.Args[0]
6784 mem := v_1
6785 if !(ValAndOff(valoff1).canAdd32(off2)) {
6786 break
6787 }
6788 v.reset(OpAMD64CMPBconstload)
6789 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6790 v.Aux = symToAux(sym)
6791 v.AddArg2(base, mem)
6792 return true
6793 }
6794
6795
6796
6797 for {
6798 valoff1 := auxIntToValAndOff(v.AuxInt)
6799 sym1 := auxToSym(v.Aux)
6800 if v_0.Op != OpAMD64LEAQ {
6801 break
6802 }
6803 off2 := auxIntToInt32(v_0.AuxInt)
6804 sym2 := auxToSym(v_0.Aux)
6805 base := v_0.Args[0]
6806 mem := v_1
6807 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6808 break
6809 }
6810 v.reset(OpAMD64CMPBconstload)
6811 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6812 v.Aux = symToAux(mergeSym(sym1, sym2))
6813 v.AddArg2(base, mem)
6814 return true
6815 }
6816 return false
6817 }
6818 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6819 v_2 := v.Args[2]
6820 v_1 := v.Args[1]
6821 v_0 := v.Args[0]
6822
6823
6824
6825 for {
6826 off1 := auxIntToInt32(v.AuxInt)
6827 sym := auxToSym(v.Aux)
6828 if v_0.Op != OpAMD64ADDQconst {
6829 break
6830 }
6831 off2 := auxIntToInt32(v_0.AuxInt)
6832 base := v_0.Args[0]
6833 val := v_1
6834 mem := v_2
6835 if !(is32Bit(int64(off1) + int64(off2))) {
6836 break
6837 }
6838 v.reset(OpAMD64CMPBload)
6839 v.AuxInt = int32ToAuxInt(off1 + off2)
6840 v.Aux = symToAux(sym)
6841 v.AddArg3(base, val, mem)
6842 return true
6843 }
6844
6845
6846
6847 for {
6848 off1 := auxIntToInt32(v.AuxInt)
6849 sym1 := auxToSym(v.Aux)
6850 if v_0.Op != OpAMD64LEAQ {
6851 break
6852 }
6853 off2 := auxIntToInt32(v_0.AuxInt)
6854 sym2 := auxToSym(v_0.Aux)
6855 base := v_0.Args[0]
6856 val := v_1
6857 mem := v_2
6858 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6859 break
6860 }
6861 v.reset(OpAMD64CMPBload)
6862 v.AuxInt = int32ToAuxInt(off1 + off2)
6863 v.Aux = symToAux(mergeSym(sym1, sym2))
6864 v.AddArg3(base, val, mem)
6865 return true
6866 }
6867
6868
6869 for {
6870 off := auxIntToInt32(v.AuxInt)
6871 sym := auxToSym(v.Aux)
6872 ptr := v_0
6873 if v_1.Op != OpAMD64MOVLconst {
6874 break
6875 }
6876 c := auxIntToInt32(v_1.AuxInt)
6877 mem := v_2
6878 v.reset(OpAMD64CMPBconstload)
6879 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6880 v.Aux = symToAux(sym)
6881 v.AddArg2(ptr, mem)
6882 return true
6883 }
6884 return false
6885 }
6886 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6887 v_1 := v.Args[1]
6888 v_0 := v.Args[0]
6889 b := v.Block
6890
6891
6892 for {
6893 x := v_0
6894 if v_1.Op != OpAMD64MOVLconst {
6895 break
6896 }
6897 c := auxIntToInt32(v_1.AuxInt)
6898 v.reset(OpAMD64CMPLconst)
6899 v.AuxInt = int32ToAuxInt(c)
6900 v.AddArg(x)
6901 return true
6902 }
6903
6904
6905 for {
6906 if v_0.Op != OpAMD64MOVLconst {
6907 break
6908 }
6909 c := auxIntToInt32(v_0.AuxInt)
6910 x := v_1
6911 v.reset(OpAMD64InvertFlags)
6912 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6913 v0.AuxInt = int32ToAuxInt(c)
6914 v0.AddArg(x)
6915 v.AddArg(v0)
6916 return true
6917 }
6918
6919
6920
6921 for {
6922 x := v_0
6923 y := v_1
6924 if !(canonLessThan(x, y)) {
6925 break
6926 }
6927 v.reset(OpAMD64InvertFlags)
6928 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6929 v0.AddArg2(y, x)
6930 v.AddArg(v0)
6931 return true
6932 }
6933
6934
6935
6936 for {
6937 l := v_0
6938 if l.Op != OpAMD64MOVLload {
6939 break
6940 }
6941 off := auxIntToInt32(l.AuxInt)
6942 sym := auxToSym(l.Aux)
6943 mem := l.Args[1]
6944 ptr := l.Args[0]
6945 x := v_1
6946 if !(canMergeLoad(v, l) && clobber(l)) {
6947 break
6948 }
6949 v.reset(OpAMD64CMPLload)
6950 v.AuxInt = int32ToAuxInt(off)
6951 v.Aux = symToAux(sym)
6952 v.AddArg3(ptr, x, mem)
6953 return true
6954 }
6955
6956
6957
6958 for {
6959 x := v_0
6960 l := v_1
6961 if l.Op != OpAMD64MOVLload {
6962 break
6963 }
6964 off := auxIntToInt32(l.AuxInt)
6965 sym := auxToSym(l.Aux)
6966 mem := l.Args[1]
6967 ptr := l.Args[0]
6968 if !(canMergeLoad(v, l) && clobber(l)) {
6969 break
6970 }
6971 v.reset(OpAMD64InvertFlags)
6972 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6973 v0.AuxInt = int32ToAuxInt(off)
6974 v0.Aux = symToAux(sym)
6975 v0.AddArg3(ptr, x, mem)
6976 v.AddArg(v0)
6977 return true
6978 }
6979 return false
6980 }
6981 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6982 v_0 := v.Args[0]
6983 b := v.Block
6984
6985
6986
6987 for {
6988 y := auxIntToInt32(v.AuxInt)
6989 if v_0.Op != OpAMD64MOVLconst {
6990 break
6991 }
6992 x := auxIntToInt32(v_0.AuxInt)
6993 if !(x == y) {
6994 break
6995 }
6996 v.reset(OpAMD64FlagEQ)
6997 return true
6998 }
6999
7000
7001
7002 for {
7003 y := auxIntToInt32(v.AuxInt)
7004 if v_0.Op != OpAMD64MOVLconst {
7005 break
7006 }
7007 x := auxIntToInt32(v_0.AuxInt)
7008 if !(x < y && uint32(x) < uint32(y)) {
7009 break
7010 }
7011 v.reset(OpAMD64FlagLT_ULT)
7012 return true
7013 }
7014
7015
7016
7017 for {
7018 y := auxIntToInt32(v.AuxInt)
7019 if v_0.Op != OpAMD64MOVLconst {
7020 break
7021 }
7022 x := auxIntToInt32(v_0.AuxInt)
7023 if !(x < y && uint32(x) > uint32(y)) {
7024 break
7025 }
7026 v.reset(OpAMD64FlagLT_UGT)
7027 return true
7028 }
7029
7030
7031
7032 for {
7033 y := auxIntToInt32(v.AuxInt)
7034 if v_0.Op != OpAMD64MOVLconst {
7035 break
7036 }
7037 x := auxIntToInt32(v_0.AuxInt)
7038 if !(x > y && uint32(x) < uint32(y)) {
7039 break
7040 }
7041 v.reset(OpAMD64FlagGT_ULT)
7042 return true
7043 }
7044
7045
7046
7047 for {
7048 y := auxIntToInt32(v.AuxInt)
7049 if v_0.Op != OpAMD64MOVLconst {
7050 break
7051 }
7052 x := auxIntToInt32(v_0.AuxInt)
7053 if !(x > y && uint32(x) > uint32(y)) {
7054 break
7055 }
7056 v.reset(OpAMD64FlagGT_UGT)
7057 return true
7058 }
7059
7060
7061
7062 for {
7063 n := auxIntToInt32(v.AuxInt)
7064 if v_0.Op != OpAMD64SHRLconst {
7065 break
7066 }
7067 c := auxIntToInt8(v_0.AuxInt)
7068 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7069 break
7070 }
7071 v.reset(OpAMD64FlagLT_ULT)
7072 return true
7073 }
7074
7075
7076
7077 for {
7078 n := auxIntToInt32(v.AuxInt)
7079 if v_0.Op != OpAMD64ANDLconst {
7080 break
7081 }
7082 m := auxIntToInt32(v_0.AuxInt)
7083 if !(0 <= m && m < n) {
7084 break
7085 }
7086 v.reset(OpAMD64FlagLT_ULT)
7087 return true
7088 }
7089
7090
7091
7092 for {
7093 if auxIntToInt32(v.AuxInt) != 0 {
7094 break
7095 }
7096 a := v_0
7097 if a.Op != OpAMD64ANDL {
7098 break
7099 }
7100 y := a.Args[1]
7101 x := a.Args[0]
7102 if !(a.Uses == 1) {
7103 break
7104 }
7105 v.reset(OpAMD64TESTL)
7106 v.AddArg2(x, y)
7107 return true
7108 }
7109
7110
7111
7112 for {
7113 if auxIntToInt32(v.AuxInt) != 0 {
7114 break
7115 }
7116 a := v_0
7117 if a.Op != OpAMD64ANDLconst {
7118 break
7119 }
7120 c := auxIntToInt32(a.AuxInt)
7121 x := a.Args[0]
7122 if !(a.Uses == 1) {
7123 break
7124 }
7125 v.reset(OpAMD64TESTLconst)
7126 v.AuxInt = int32ToAuxInt(c)
7127 v.AddArg(x)
7128 return true
7129 }
7130
7131
7132 for {
7133 if auxIntToInt32(v.AuxInt) != 0 {
7134 break
7135 }
7136 x := v_0
7137 v.reset(OpAMD64TESTL)
7138 v.AddArg2(x, x)
7139 return true
7140 }
7141
7142
7143
7144 for {
7145 c := auxIntToInt32(v.AuxInt)
7146 l := v_0
7147 if l.Op != OpAMD64MOVLload {
7148 break
7149 }
7150 off := auxIntToInt32(l.AuxInt)
7151 sym := auxToSym(l.Aux)
7152 mem := l.Args[1]
7153 ptr := l.Args[0]
7154 if !(l.Uses == 1 && clobber(l)) {
7155 break
7156 }
7157 b = l.Block
7158 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7159 v.copyOf(v0)
7160 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7161 v0.Aux = symToAux(sym)
7162 v0.AddArg2(ptr, mem)
7163 return true
7164 }
7165 return false
7166 }
7167 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7168 v_1 := v.Args[1]
7169 v_0 := v.Args[0]
7170
7171
7172
7173 for {
7174 valoff1 := auxIntToValAndOff(v.AuxInt)
7175 sym := auxToSym(v.Aux)
7176 if v_0.Op != OpAMD64ADDQconst {
7177 break
7178 }
7179 off2 := auxIntToInt32(v_0.AuxInt)
7180 base := v_0.Args[0]
7181 mem := v_1
7182 if !(ValAndOff(valoff1).canAdd32(off2)) {
7183 break
7184 }
7185 v.reset(OpAMD64CMPLconstload)
7186 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7187 v.Aux = symToAux(sym)
7188 v.AddArg2(base, mem)
7189 return true
7190 }
7191
7192
7193
7194 for {
7195 valoff1 := auxIntToValAndOff(v.AuxInt)
7196 sym1 := auxToSym(v.Aux)
7197 if v_0.Op != OpAMD64LEAQ {
7198 break
7199 }
7200 off2 := auxIntToInt32(v_0.AuxInt)
7201 sym2 := auxToSym(v_0.Aux)
7202 base := v_0.Args[0]
7203 mem := v_1
7204 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7205 break
7206 }
7207 v.reset(OpAMD64CMPLconstload)
7208 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7209 v.Aux = symToAux(mergeSym(sym1, sym2))
7210 v.AddArg2(base, mem)
7211 return true
7212 }
7213 return false
7214 }
7215 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7216 v_2 := v.Args[2]
7217 v_1 := v.Args[1]
7218 v_0 := v.Args[0]
7219
7220
7221
7222 for {
7223 off1 := auxIntToInt32(v.AuxInt)
7224 sym := auxToSym(v.Aux)
7225 if v_0.Op != OpAMD64ADDQconst {
7226 break
7227 }
7228 off2 := auxIntToInt32(v_0.AuxInt)
7229 base := v_0.Args[0]
7230 val := v_1
7231 mem := v_2
7232 if !(is32Bit(int64(off1) + int64(off2))) {
7233 break
7234 }
7235 v.reset(OpAMD64CMPLload)
7236 v.AuxInt = int32ToAuxInt(off1 + off2)
7237 v.Aux = symToAux(sym)
7238 v.AddArg3(base, val, mem)
7239 return true
7240 }
7241
7242
7243
7244 for {
7245 off1 := auxIntToInt32(v.AuxInt)
7246 sym1 := auxToSym(v.Aux)
7247 if v_0.Op != OpAMD64LEAQ {
7248 break
7249 }
7250 off2 := auxIntToInt32(v_0.AuxInt)
7251 sym2 := auxToSym(v_0.Aux)
7252 base := v_0.Args[0]
7253 val := v_1
7254 mem := v_2
7255 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7256 break
7257 }
7258 v.reset(OpAMD64CMPLload)
7259 v.AuxInt = int32ToAuxInt(off1 + off2)
7260 v.Aux = symToAux(mergeSym(sym1, sym2))
7261 v.AddArg3(base, val, mem)
7262 return true
7263 }
7264
7265
7266 for {
7267 off := auxIntToInt32(v.AuxInt)
7268 sym := auxToSym(v.Aux)
7269 ptr := v_0
7270 if v_1.Op != OpAMD64MOVLconst {
7271 break
7272 }
7273 c := auxIntToInt32(v_1.AuxInt)
7274 mem := v_2
7275 v.reset(OpAMD64CMPLconstload)
7276 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7277 v.Aux = symToAux(sym)
7278 v.AddArg2(ptr, mem)
7279 return true
7280 }
7281 return false
7282 }
7283 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7284 v_1 := v.Args[1]
7285 v_0 := v.Args[0]
7286 b := v.Block
7287
7288
7289
7290 for {
7291 x := v_0
7292 if v_1.Op != OpAMD64MOVQconst {
7293 break
7294 }
7295 c := auxIntToInt64(v_1.AuxInt)
7296 if !(is32Bit(c)) {
7297 break
7298 }
7299 v.reset(OpAMD64CMPQconst)
7300 v.AuxInt = int32ToAuxInt(int32(c))
7301 v.AddArg(x)
7302 return true
7303 }
7304
7305
7306
7307 for {
7308 if v_0.Op != OpAMD64MOVQconst {
7309 break
7310 }
7311 c := auxIntToInt64(v_0.AuxInt)
7312 x := v_1
7313 if !(is32Bit(c)) {
7314 break
7315 }
7316 v.reset(OpAMD64InvertFlags)
7317 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7318 v0.AuxInt = int32ToAuxInt(int32(c))
7319 v0.AddArg(x)
7320 v.AddArg(v0)
7321 return true
7322 }
7323
7324
7325
7326 for {
7327 x := v_0
7328 y := v_1
7329 if !(canonLessThan(x, y)) {
7330 break
7331 }
7332 v.reset(OpAMD64InvertFlags)
7333 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7334 v0.AddArg2(y, x)
7335 v.AddArg(v0)
7336 return true
7337 }
7338
7339
7340
7341 for {
7342 if v_0.Op != OpAMD64MOVQconst {
7343 break
7344 }
7345 x := auxIntToInt64(v_0.AuxInt)
7346 if v_1.Op != OpAMD64MOVQconst {
7347 break
7348 }
7349 y := auxIntToInt64(v_1.AuxInt)
7350 if !(x == y) {
7351 break
7352 }
7353 v.reset(OpAMD64FlagEQ)
7354 return true
7355 }
7356
7357
7358
7359 for {
7360 if v_0.Op != OpAMD64MOVQconst {
7361 break
7362 }
7363 x := auxIntToInt64(v_0.AuxInt)
7364 if v_1.Op != OpAMD64MOVQconst {
7365 break
7366 }
7367 y := auxIntToInt64(v_1.AuxInt)
7368 if !(x < y && uint64(x) < uint64(y)) {
7369 break
7370 }
7371 v.reset(OpAMD64FlagLT_ULT)
7372 return true
7373 }
7374
7375
7376
7377 for {
7378 if v_0.Op != OpAMD64MOVQconst {
7379 break
7380 }
7381 x := auxIntToInt64(v_0.AuxInt)
7382 if v_1.Op != OpAMD64MOVQconst {
7383 break
7384 }
7385 y := auxIntToInt64(v_1.AuxInt)
7386 if !(x < y && uint64(x) > uint64(y)) {
7387 break
7388 }
7389 v.reset(OpAMD64FlagLT_UGT)
7390 return true
7391 }
7392
7393
7394
7395 for {
7396 if v_0.Op != OpAMD64MOVQconst {
7397 break
7398 }
7399 x := auxIntToInt64(v_0.AuxInt)
7400 if v_1.Op != OpAMD64MOVQconst {
7401 break
7402 }
7403 y := auxIntToInt64(v_1.AuxInt)
7404 if !(x > y && uint64(x) < uint64(y)) {
7405 break
7406 }
7407 v.reset(OpAMD64FlagGT_ULT)
7408 return true
7409 }
7410
7411
7412
7413 for {
7414 if v_0.Op != OpAMD64MOVQconst {
7415 break
7416 }
7417 x := auxIntToInt64(v_0.AuxInt)
7418 if v_1.Op != OpAMD64MOVQconst {
7419 break
7420 }
7421 y := auxIntToInt64(v_1.AuxInt)
7422 if !(x > y && uint64(x) > uint64(y)) {
7423 break
7424 }
7425 v.reset(OpAMD64FlagGT_UGT)
7426 return true
7427 }
7428
7429
7430
7431 for {
7432 l := v_0
7433 if l.Op != OpAMD64MOVQload {
7434 break
7435 }
7436 off := auxIntToInt32(l.AuxInt)
7437 sym := auxToSym(l.Aux)
7438 mem := l.Args[1]
7439 ptr := l.Args[0]
7440 x := v_1
7441 if !(canMergeLoad(v, l) && clobber(l)) {
7442 break
7443 }
7444 v.reset(OpAMD64CMPQload)
7445 v.AuxInt = int32ToAuxInt(off)
7446 v.Aux = symToAux(sym)
7447 v.AddArg3(ptr, x, mem)
7448 return true
7449 }
7450
7451
7452
7453 for {
7454 x := v_0
7455 l := v_1
7456 if l.Op != OpAMD64MOVQload {
7457 break
7458 }
7459 off := auxIntToInt32(l.AuxInt)
7460 sym := auxToSym(l.Aux)
7461 mem := l.Args[1]
7462 ptr := l.Args[0]
7463 if !(canMergeLoad(v, l) && clobber(l)) {
7464 break
7465 }
7466 v.reset(OpAMD64InvertFlags)
7467 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7468 v0.AuxInt = int32ToAuxInt(off)
7469 v0.Aux = symToAux(sym)
7470 v0.AddArg3(ptr, x, mem)
7471 v.AddArg(v0)
7472 return true
7473 }
7474 return false
7475 }
7476 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7477 v_0 := v.Args[0]
7478 b := v.Block
7479
7480
7481
7482 for {
7483 y := auxIntToInt32(v.AuxInt)
7484 if v_0.Op != OpAMD64MOVQconst {
7485 break
7486 }
7487 x := auxIntToInt64(v_0.AuxInt)
7488 if !(x == int64(y)) {
7489 break
7490 }
7491 v.reset(OpAMD64FlagEQ)
7492 return true
7493 }
7494
7495
7496
7497 for {
7498 y := auxIntToInt32(v.AuxInt)
7499 if v_0.Op != OpAMD64MOVQconst {
7500 break
7501 }
7502 x := auxIntToInt64(v_0.AuxInt)
7503 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7504 break
7505 }
7506 v.reset(OpAMD64FlagLT_ULT)
7507 return true
7508 }
7509
7510
7511
7512 for {
7513 y := auxIntToInt32(v.AuxInt)
7514 if v_0.Op != OpAMD64MOVQconst {
7515 break
7516 }
7517 x := auxIntToInt64(v_0.AuxInt)
7518 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7519 break
7520 }
7521 v.reset(OpAMD64FlagLT_UGT)
7522 return true
7523 }
7524
7525
7526
7527 for {
7528 y := auxIntToInt32(v.AuxInt)
7529 if v_0.Op != OpAMD64MOVQconst {
7530 break
7531 }
7532 x := auxIntToInt64(v_0.AuxInt)
7533 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7534 break
7535 }
7536 v.reset(OpAMD64FlagGT_ULT)
7537 return true
7538 }
7539
7540
7541
7542 for {
7543 y := auxIntToInt32(v.AuxInt)
7544 if v_0.Op != OpAMD64MOVQconst {
7545 break
7546 }
7547 x := auxIntToInt64(v_0.AuxInt)
7548 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7549 break
7550 }
7551 v.reset(OpAMD64FlagGT_UGT)
7552 return true
7553 }
7554
7555
7556
7557 for {
7558 c := auxIntToInt32(v.AuxInt)
7559 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7560 break
7561 }
7562 v.reset(OpAMD64FlagLT_ULT)
7563 return true
7564 }
7565
7566
7567
7568 for {
7569 c := auxIntToInt32(v.AuxInt)
7570 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7571 break
7572 }
7573 v.reset(OpAMD64FlagLT_ULT)
7574 return true
7575 }
7576
7577
7578
7579 for {
7580 n := auxIntToInt32(v.AuxInt)
7581 if v_0.Op != OpAMD64SHRQconst {
7582 break
7583 }
7584 c := auxIntToInt8(v_0.AuxInt)
7585 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7586 break
7587 }
7588 v.reset(OpAMD64FlagLT_ULT)
7589 return true
7590 }
7591
7592
7593
7594 for {
7595 n := auxIntToInt32(v.AuxInt)
7596 if v_0.Op != OpAMD64ANDQconst {
7597 break
7598 }
7599 m := auxIntToInt32(v_0.AuxInt)
7600 if !(0 <= m && m < n) {
7601 break
7602 }
7603 v.reset(OpAMD64FlagLT_ULT)
7604 return true
7605 }
7606
7607
7608
7609 for {
7610 n := auxIntToInt32(v.AuxInt)
7611 if v_0.Op != OpAMD64ANDLconst {
7612 break
7613 }
7614 m := auxIntToInt32(v_0.AuxInt)
7615 if !(0 <= m && m < n) {
7616 break
7617 }
7618 v.reset(OpAMD64FlagLT_ULT)
7619 return true
7620 }
7621
7622
7623
7624 for {
7625 if auxIntToInt32(v.AuxInt) != 0 {
7626 break
7627 }
7628 a := v_0
7629 if a.Op != OpAMD64ANDQ {
7630 break
7631 }
7632 y := a.Args[1]
7633 x := a.Args[0]
7634 if !(a.Uses == 1) {
7635 break
7636 }
7637 v.reset(OpAMD64TESTQ)
7638 v.AddArg2(x, y)
7639 return true
7640 }
7641
7642
7643
7644 for {
7645 if auxIntToInt32(v.AuxInt) != 0 {
7646 break
7647 }
7648 a := v_0
7649 if a.Op != OpAMD64ANDQconst {
7650 break
7651 }
7652 c := auxIntToInt32(a.AuxInt)
7653 x := a.Args[0]
7654 if !(a.Uses == 1) {
7655 break
7656 }
7657 v.reset(OpAMD64TESTQconst)
7658 v.AuxInt = int32ToAuxInt(c)
7659 v.AddArg(x)
7660 return true
7661 }
7662
7663
7664 for {
7665 if auxIntToInt32(v.AuxInt) != 0 {
7666 break
7667 }
7668 x := v_0
7669 v.reset(OpAMD64TESTQ)
7670 v.AddArg2(x, x)
7671 return true
7672 }
7673
7674
7675
7676 for {
7677 c := auxIntToInt32(v.AuxInt)
7678 l := v_0
7679 if l.Op != OpAMD64MOVQload {
7680 break
7681 }
7682 off := auxIntToInt32(l.AuxInt)
7683 sym := auxToSym(l.Aux)
7684 mem := l.Args[1]
7685 ptr := l.Args[0]
7686 if !(l.Uses == 1 && clobber(l)) {
7687 break
7688 }
7689 b = l.Block
7690 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7691 v.copyOf(v0)
7692 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7693 v0.Aux = symToAux(sym)
7694 v0.AddArg2(ptr, mem)
7695 return true
7696 }
7697 return false
7698 }
7699 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7700 v_1 := v.Args[1]
7701 v_0 := v.Args[0]
7702
7703
7704
7705 for {
7706 valoff1 := auxIntToValAndOff(v.AuxInt)
7707 sym := auxToSym(v.Aux)
7708 if v_0.Op != OpAMD64ADDQconst {
7709 break
7710 }
7711 off2 := auxIntToInt32(v_0.AuxInt)
7712 base := v_0.Args[0]
7713 mem := v_1
7714 if !(ValAndOff(valoff1).canAdd32(off2)) {
7715 break
7716 }
7717 v.reset(OpAMD64CMPQconstload)
7718 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7719 v.Aux = symToAux(sym)
7720 v.AddArg2(base, mem)
7721 return true
7722 }
7723
7724
7725
7726 for {
7727 valoff1 := auxIntToValAndOff(v.AuxInt)
7728 sym1 := auxToSym(v.Aux)
7729 if v_0.Op != OpAMD64LEAQ {
7730 break
7731 }
7732 off2 := auxIntToInt32(v_0.AuxInt)
7733 sym2 := auxToSym(v_0.Aux)
7734 base := v_0.Args[0]
7735 mem := v_1
7736 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7737 break
7738 }
7739 v.reset(OpAMD64CMPQconstload)
7740 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7741 v.Aux = symToAux(mergeSym(sym1, sym2))
7742 v.AddArg2(base, mem)
7743 return true
7744 }
7745 return false
7746 }
7747 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7748 v_2 := v.Args[2]
7749 v_1 := v.Args[1]
7750 v_0 := v.Args[0]
7751
7752
7753
7754 for {
7755 off1 := auxIntToInt32(v.AuxInt)
7756 sym := auxToSym(v.Aux)
7757 if v_0.Op != OpAMD64ADDQconst {
7758 break
7759 }
7760 off2 := auxIntToInt32(v_0.AuxInt)
7761 base := v_0.Args[0]
7762 val := v_1
7763 mem := v_2
7764 if !(is32Bit(int64(off1) + int64(off2))) {
7765 break
7766 }
7767 v.reset(OpAMD64CMPQload)
7768 v.AuxInt = int32ToAuxInt(off1 + off2)
7769 v.Aux = symToAux(sym)
7770 v.AddArg3(base, val, mem)
7771 return true
7772 }
7773
7774
7775
7776 for {
7777 off1 := auxIntToInt32(v.AuxInt)
7778 sym1 := auxToSym(v.Aux)
7779 if v_0.Op != OpAMD64LEAQ {
7780 break
7781 }
7782 off2 := auxIntToInt32(v_0.AuxInt)
7783 sym2 := auxToSym(v_0.Aux)
7784 base := v_0.Args[0]
7785 val := v_1
7786 mem := v_2
7787 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7788 break
7789 }
7790 v.reset(OpAMD64CMPQload)
7791 v.AuxInt = int32ToAuxInt(off1 + off2)
7792 v.Aux = symToAux(mergeSym(sym1, sym2))
7793 v.AddArg3(base, val, mem)
7794 return true
7795 }
7796
7797
7798
7799 for {
7800 off := auxIntToInt32(v.AuxInt)
7801 sym := auxToSym(v.Aux)
7802 ptr := v_0
7803 if v_1.Op != OpAMD64MOVQconst {
7804 break
7805 }
7806 c := auxIntToInt64(v_1.AuxInt)
7807 mem := v_2
7808 if !(validVal(c)) {
7809 break
7810 }
7811 v.reset(OpAMD64CMPQconstload)
7812 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7813 v.Aux = symToAux(sym)
7814 v.AddArg2(ptr, mem)
7815 return true
7816 }
7817 return false
7818 }
7819 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7820 v_1 := v.Args[1]
7821 v_0 := v.Args[0]
7822 b := v.Block
7823
7824
7825 for {
7826 x := v_0
7827 if v_1.Op != OpAMD64MOVLconst {
7828 break
7829 }
7830 c := auxIntToInt32(v_1.AuxInt)
7831 v.reset(OpAMD64CMPWconst)
7832 v.AuxInt = int16ToAuxInt(int16(c))
7833 v.AddArg(x)
7834 return true
7835 }
7836
7837
7838 for {
7839 if v_0.Op != OpAMD64MOVLconst {
7840 break
7841 }
7842 c := auxIntToInt32(v_0.AuxInt)
7843 x := v_1
7844 v.reset(OpAMD64InvertFlags)
7845 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7846 v0.AuxInt = int16ToAuxInt(int16(c))
7847 v0.AddArg(x)
7848 v.AddArg(v0)
7849 return true
7850 }
7851
7852
7853
7854 for {
7855 x := v_0
7856 y := v_1
7857 if !(canonLessThan(x, y)) {
7858 break
7859 }
7860 v.reset(OpAMD64InvertFlags)
7861 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7862 v0.AddArg2(y, x)
7863 v.AddArg(v0)
7864 return true
7865 }
7866
7867
7868
7869 for {
7870 l := v_0
7871 if l.Op != OpAMD64MOVWload {
7872 break
7873 }
7874 off := auxIntToInt32(l.AuxInt)
7875 sym := auxToSym(l.Aux)
7876 mem := l.Args[1]
7877 ptr := l.Args[0]
7878 x := v_1
7879 if !(canMergeLoad(v, l) && clobber(l)) {
7880 break
7881 }
7882 v.reset(OpAMD64CMPWload)
7883 v.AuxInt = int32ToAuxInt(off)
7884 v.Aux = symToAux(sym)
7885 v.AddArg3(ptr, x, mem)
7886 return true
7887 }
7888
7889
7890
7891 for {
7892 x := v_0
7893 l := v_1
7894 if l.Op != OpAMD64MOVWload {
7895 break
7896 }
7897 off := auxIntToInt32(l.AuxInt)
7898 sym := auxToSym(l.Aux)
7899 mem := l.Args[1]
7900 ptr := l.Args[0]
7901 if !(canMergeLoad(v, l) && clobber(l)) {
7902 break
7903 }
7904 v.reset(OpAMD64InvertFlags)
7905 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7906 v0.AuxInt = int32ToAuxInt(off)
7907 v0.Aux = symToAux(sym)
7908 v0.AddArg3(ptr, x, mem)
7909 v.AddArg(v0)
7910 return true
7911 }
7912 return false
7913 }
7914 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7915 v_0 := v.Args[0]
7916 b := v.Block
7917
7918
7919
7920 for {
7921 y := auxIntToInt16(v.AuxInt)
7922 if v_0.Op != OpAMD64MOVLconst {
7923 break
7924 }
7925 x := auxIntToInt32(v_0.AuxInt)
7926 if !(int16(x) == y) {
7927 break
7928 }
7929 v.reset(OpAMD64FlagEQ)
7930 return true
7931 }
7932
7933
7934
7935 for {
7936 y := auxIntToInt16(v.AuxInt)
7937 if v_0.Op != OpAMD64MOVLconst {
7938 break
7939 }
7940 x := auxIntToInt32(v_0.AuxInt)
7941 if !(int16(x) < y && uint16(x) < uint16(y)) {
7942 break
7943 }
7944 v.reset(OpAMD64FlagLT_ULT)
7945 return true
7946 }
7947
7948
7949
7950 for {
7951 y := auxIntToInt16(v.AuxInt)
7952 if v_0.Op != OpAMD64MOVLconst {
7953 break
7954 }
7955 x := auxIntToInt32(v_0.AuxInt)
7956 if !(int16(x) < y && uint16(x) > uint16(y)) {
7957 break
7958 }
7959 v.reset(OpAMD64FlagLT_UGT)
7960 return true
7961 }
7962
7963
7964
7965 for {
7966 y := auxIntToInt16(v.AuxInt)
7967 if v_0.Op != OpAMD64MOVLconst {
7968 break
7969 }
7970 x := auxIntToInt32(v_0.AuxInt)
7971 if !(int16(x) > y && uint16(x) < uint16(y)) {
7972 break
7973 }
7974 v.reset(OpAMD64FlagGT_ULT)
7975 return true
7976 }
7977
7978
7979
7980 for {
7981 y := auxIntToInt16(v.AuxInt)
7982 if v_0.Op != OpAMD64MOVLconst {
7983 break
7984 }
7985 x := auxIntToInt32(v_0.AuxInt)
7986 if !(int16(x) > y && uint16(x) > uint16(y)) {
7987 break
7988 }
7989 v.reset(OpAMD64FlagGT_UGT)
7990 return true
7991 }
7992
7993
7994
7995 for {
7996 n := auxIntToInt16(v.AuxInt)
7997 if v_0.Op != OpAMD64ANDLconst {
7998 break
7999 }
8000 m := auxIntToInt32(v_0.AuxInt)
8001 if !(0 <= int16(m) && int16(m) < n) {
8002 break
8003 }
8004 v.reset(OpAMD64FlagLT_ULT)
8005 return true
8006 }
8007
8008
8009
8010 for {
8011 if auxIntToInt16(v.AuxInt) != 0 {
8012 break
8013 }
8014 a := v_0
8015 if a.Op != OpAMD64ANDL {
8016 break
8017 }
8018 y := a.Args[1]
8019 x := a.Args[0]
8020 if !(a.Uses == 1) {
8021 break
8022 }
8023 v.reset(OpAMD64TESTW)
8024 v.AddArg2(x, y)
8025 return true
8026 }
8027
8028
8029
8030 for {
8031 if auxIntToInt16(v.AuxInt) != 0 {
8032 break
8033 }
8034 a := v_0
8035 if a.Op != OpAMD64ANDLconst {
8036 break
8037 }
8038 c := auxIntToInt32(a.AuxInt)
8039 x := a.Args[0]
8040 if !(a.Uses == 1) {
8041 break
8042 }
8043 v.reset(OpAMD64TESTWconst)
8044 v.AuxInt = int16ToAuxInt(int16(c))
8045 v.AddArg(x)
8046 return true
8047 }
8048
8049
8050 for {
8051 if auxIntToInt16(v.AuxInt) != 0 {
8052 break
8053 }
8054 x := v_0
8055 v.reset(OpAMD64TESTW)
8056 v.AddArg2(x, x)
8057 return true
8058 }
8059
8060
8061
8062 for {
8063 c := auxIntToInt16(v.AuxInt)
8064 l := v_0
8065 if l.Op != OpAMD64MOVWload {
8066 break
8067 }
8068 off := auxIntToInt32(l.AuxInt)
8069 sym := auxToSym(l.Aux)
8070 mem := l.Args[1]
8071 ptr := l.Args[0]
8072 if !(l.Uses == 1 && clobber(l)) {
8073 break
8074 }
8075 b = l.Block
8076 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8077 v.copyOf(v0)
8078 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8079 v0.Aux = symToAux(sym)
8080 v0.AddArg2(ptr, mem)
8081 return true
8082 }
8083 return false
8084 }
8085 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8086 v_1 := v.Args[1]
8087 v_0 := v.Args[0]
8088
8089
8090
8091 for {
8092 valoff1 := auxIntToValAndOff(v.AuxInt)
8093 sym := auxToSym(v.Aux)
8094 if v_0.Op != OpAMD64ADDQconst {
8095 break
8096 }
8097 off2 := auxIntToInt32(v_0.AuxInt)
8098 base := v_0.Args[0]
8099 mem := v_1
8100 if !(ValAndOff(valoff1).canAdd32(off2)) {
8101 break
8102 }
8103 v.reset(OpAMD64CMPWconstload)
8104 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8105 v.Aux = symToAux(sym)
8106 v.AddArg2(base, mem)
8107 return true
8108 }
8109
8110
8111
8112 for {
8113 valoff1 := auxIntToValAndOff(v.AuxInt)
8114 sym1 := auxToSym(v.Aux)
8115 if v_0.Op != OpAMD64LEAQ {
8116 break
8117 }
8118 off2 := auxIntToInt32(v_0.AuxInt)
8119 sym2 := auxToSym(v_0.Aux)
8120 base := v_0.Args[0]
8121 mem := v_1
8122 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8123 break
8124 }
8125 v.reset(OpAMD64CMPWconstload)
8126 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8127 v.Aux = symToAux(mergeSym(sym1, sym2))
8128 v.AddArg2(base, mem)
8129 return true
8130 }
8131 return false
8132 }
8133 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8134 v_2 := v.Args[2]
8135 v_1 := v.Args[1]
8136 v_0 := v.Args[0]
8137
8138
8139
8140 for {
8141 off1 := auxIntToInt32(v.AuxInt)
8142 sym := auxToSym(v.Aux)
8143 if v_0.Op != OpAMD64ADDQconst {
8144 break
8145 }
8146 off2 := auxIntToInt32(v_0.AuxInt)
8147 base := v_0.Args[0]
8148 val := v_1
8149 mem := v_2
8150 if !(is32Bit(int64(off1) + int64(off2))) {
8151 break
8152 }
8153 v.reset(OpAMD64CMPWload)
8154 v.AuxInt = int32ToAuxInt(off1 + off2)
8155 v.Aux = symToAux(sym)
8156 v.AddArg3(base, val, mem)
8157 return true
8158 }
8159
8160
8161
8162 for {
8163 off1 := auxIntToInt32(v.AuxInt)
8164 sym1 := auxToSym(v.Aux)
8165 if v_0.Op != OpAMD64LEAQ {
8166 break
8167 }
8168 off2 := auxIntToInt32(v_0.AuxInt)
8169 sym2 := auxToSym(v_0.Aux)
8170 base := v_0.Args[0]
8171 val := v_1
8172 mem := v_2
8173 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8174 break
8175 }
8176 v.reset(OpAMD64CMPWload)
8177 v.AuxInt = int32ToAuxInt(off1 + off2)
8178 v.Aux = symToAux(mergeSym(sym1, sym2))
8179 v.AddArg3(base, val, mem)
8180 return true
8181 }
8182
8183
8184 for {
8185 off := auxIntToInt32(v.AuxInt)
8186 sym := auxToSym(v.Aux)
8187 ptr := v_0
8188 if v_1.Op != OpAMD64MOVLconst {
8189 break
8190 }
8191 c := auxIntToInt32(v_1.AuxInt)
8192 mem := v_2
8193 v.reset(OpAMD64CMPWconstload)
8194 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8195 v.Aux = symToAux(sym)
8196 v.AddArg2(ptr, mem)
8197 return true
8198 }
8199 return false
8200 }
8201 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8202 v_3 := v.Args[3]
8203 v_2 := v.Args[2]
8204 v_1 := v.Args[1]
8205 v_0 := v.Args[0]
8206
8207
8208
8209 for {
8210 off1 := auxIntToInt32(v.AuxInt)
8211 sym := auxToSym(v.Aux)
8212 if v_0.Op != OpAMD64ADDQconst {
8213 break
8214 }
8215 off2 := auxIntToInt32(v_0.AuxInt)
8216 ptr := v_0.Args[0]
8217 old := v_1
8218 new_ := v_2
8219 mem := v_3
8220 if !(is32Bit(int64(off1) + int64(off2))) {
8221 break
8222 }
8223 v.reset(OpAMD64CMPXCHGLlock)
8224 v.AuxInt = int32ToAuxInt(off1 + off2)
8225 v.Aux = symToAux(sym)
8226 v.AddArg4(ptr, old, new_, mem)
8227 return true
8228 }
8229 return false
8230 }
8231 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8232 v_3 := v.Args[3]
8233 v_2 := v.Args[2]
8234 v_1 := v.Args[1]
8235 v_0 := v.Args[0]
8236
8237
8238
8239 for {
8240 off1 := auxIntToInt32(v.AuxInt)
8241 sym := auxToSym(v.Aux)
8242 if v_0.Op != OpAMD64ADDQconst {
8243 break
8244 }
8245 off2 := auxIntToInt32(v_0.AuxInt)
8246 ptr := v_0.Args[0]
8247 old := v_1
8248 new_ := v_2
8249 mem := v_3
8250 if !(is32Bit(int64(off1) + int64(off2))) {
8251 break
8252 }
8253 v.reset(OpAMD64CMPXCHGQlock)
8254 v.AuxInt = int32ToAuxInt(off1 + off2)
8255 v.Aux = symToAux(sym)
8256 v.AddArg4(ptr, old, new_, mem)
8257 return true
8258 }
8259 return false
8260 }
8261 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8262 v_1 := v.Args[1]
8263 v_0 := v.Args[0]
8264
8265
8266
8267 for {
8268 x := v_0
8269 l := v_1
8270 if l.Op != OpAMD64MOVSDload {
8271 break
8272 }
8273 off := auxIntToInt32(l.AuxInt)
8274 sym := auxToSym(l.Aux)
8275 mem := l.Args[1]
8276 ptr := l.Args[0]
8277 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8278 break
8279 }
8280 v.reset(OpAMD64DIVSDload)
8281 v.AuxInt = int32ToAuxInt(off)
8282 v.Aux = symToAux(sym)
8283 v.AddArg3(x, ptr, mem)
8284 return true
8285 }
8286 return false
8287 }
8288 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8289 v_2 := v.Args[2]
8290 v_1 := v.Args[1]
8291 v_0 := v.Args[0]
8292
8293
8294
8295 for {
8296 off1 := auxIntToInt32(v.AuxInt)
8297 sym := auxToSym(v.Aux)
8298 val := v_0
8299 if v_1.Op != OpAMD64ADDQconst {
8300 break
8301 }
8302 off2 := auxIntToInt32(v_1.AuxInt)
8303 base := v_1.Args[0]
8304 mem := v_2
8305 if !(is32Bit(int64(off1) + int64(off2))) {
8306 break
8307 }
8308 v.reset(OpAMD64DIVSDload)
8309 v.AuxInt = int32ToAuxInt(off1 + off2)
8310 v.Aux = symToAux(sym)
8311 v.AddArg3(val, base, mem)
8312 return true
8313 }
8314
8315
8316
8317 for {
8318 off1 := auxIntToInt32(v.AuxInt)
8319 sym1 := auxToSym(v.Aux)
8320 val := v_0
8321 if v_1.Op != OpAMD64LEAQ {
8322 break
8323 }
8324 off2 := auxIntToInt32(v_1.AuxInt)
8325 sym2 := auxToSym(v_1.Aux)
8326 base := v_1.Args[0]
8327 mem := v_2
8328 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8329 break
8330 }
8331 v.reset(OpAMD64DIVSDload)
8332 v.AuxInt = int32ToAuxInt(off1 + off2)
8333 v.Aux = symToAux(mergeSym(sym1, sym2))
8334 v.AddArg3(val, base, mem)
8335 return true
8336 }
8337 return false
8338 }
8339 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8340 v_1 := v.Args[1]
8341 v_0 := v.Args[0]
8342
8343
8344
8345 for {
8346 x := v_0
8347 l := v_1
8348 if l.Op != OpAMD64MOVSSload {
8349 break
8350 }
8351 off := auxIntToInt32(l.AuxInt)
8352 sym := auxToSym(l.Aux)
8353 mem := l.Args[1]
8354 ptr := l.Args[0]
8355 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8356 break
8357 }
8358 v.reset(OpAMD64DIVSSload)
8359 v.AuxInt = int32ToAuxInt(off)
8360 v.Aux = symToAux(sym)
8361 v.AddArg3(x, ptr, mem)
8362 return true
8363 }
8364 return false
8365 }
8366 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8367 v_2 := v.Args[2]
8368 v_1 := v.Args[1]
8369 v_0 := v.Args[0]
8370
8371
8372
8373 for {
8374 off1 := auxIntToInt32(v.AuxInt)
8375 sym := auxToSym(v.Aux)
8376 val := v_0
8377 if v_1.Op != OpAMD64ADDQconst {
8378 break
8379 }
8380 off2 := auxIntToInt32(v_1.AuxInt)
8381 base := v_1.Args[0]
8382 mem := v_2
8383 if !(is32Bit(int64(off1) + int64(off2))) {
8384 break
8385 }
8386 v.reset(OpAMD64DIVSSload)
8387 v.AuxInt = int32ToAuxInt(off1 + off2)
8388 v.Aux = symToAux(sym)
8389 v.AddArg3(val, base, mem)
8390 return true
8391 }
8392
8393
8394
8395 for {
8396 off1 := auxIntToInt32(v.AuxInt)
8397 sym1 := auxToSym(v.Aux)
8398 val := v_0
8399 if v_1.Op != OpAMD64LEAQ {
8400 break
8401 }
8402 off2 := auxIntToInt32(v_1.AuxInt)
8403 sym2 := auxToSym(v_1.Aux)
8404 base := v_1.Args[0]
8405 mem := v_2
8406 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8407 break
8408 }
8409 v.reset(OpAMD64DIVSSload)
8410 v.AuxInt = int32ToAuxInt(off1 + off2)
8411 v.Aux = symToAux(mergeSym(sym1, sym2))
8412 v.AddArg3(val, base, mem)
8413 return true
8414 }
8415 return false
8416 }
8417 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8418 v_1 := v.Args[1]
8419 v_0 := v.Args[0]
8420
8421
8422
8423 for {
8424 x := v_0
8425 y := v_1
8426 if !(!x.rematerializeable() && y.rematerializeable()) {
8427 break
8428 }
8429 v.reset(OpAMD64HMULL)
8430 v.AddArg2(y, x)
8431 return true
8432 }
8433 return false
8434 }
8435 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8436 v_1 := v.Args[1]
8437 v_0 := v.Args[0]
8438
8439
8440
8441 for {
8442 x := v_0
8443 y := v_1
8444 if !(!x.rematerializeable() && y.rematerializeable()) {
8445 break
8446 }
8447 v.reset(OpAMD64HMULLU)
8448 v.AddArg2(y, x)
8449 return true
8450 }
8451 return false
8452 }
8453 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8454 v_1 := v.Args[1]
8455 v_0 := v.Args[0]
8456
8457
8458
8459 for {
8460 x := v_0
8461 y := v_1
8462 if !(!x.rematerializeable() && y.rematerializeable()) {
8463 break
8464 }
8465 v.reset(OpAMD64HMULQ)
8466 v.AddArg2(y, x)
8467 return true
8468 }
8469 return false
8470 }
8471 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8472 v_1 := v.Args[1]
8473 v_0 := v.Args[0]
8474
8475
8476
8477 for {
8478 x := v_0
8479 y := v_1
8480 if !(!x.rematerializeable() && y.rematerializeable()) {
8481 break
8482 }
8483 v.reset(OpAMD64HMULQU)
8484 v.AddArg2(y, x)
8485 return true
8486 }
8487 return false
8488 }
8489 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8490 v_0 := v.Args[0]
8491
8492
8493
8494 for {
8495 c := auxIntToInt32(v.AuxInt)
8496 s := auxToSym(v.Aux)
8497 if v_0.Op != OpAMD64ADDLconst {
8498 break
8499 }
8500 d := auxIntToInt32(v_0.AuxInt)
8501 x := v_0.Args[0]
8502 if !(is32Bit(int64(c) + int64(d))) {
8503 break
8504 }
8505 v.reset(OpAMD64LEAL)
8506 v.AuxInt = int32ToAuxInt(c + d)
8507 v.Aux = symToAux(s)
8508 v.AddArg(x)
8509 return true
8510 }
8511
8512
8513
8514 for {
8515 c := auxIntToInt32(v.AuxInt)
8516 s := auxToSym(v.Aux)
8517 if v_0.Op != OpAMD64ADDL {
8518 break
8519 }
8520 _ = v_0.Args[1]
8521 v_0_0 := v_0.Args[0]
8522 v_0_1 := v_0.Args[1]
8523 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8524 x := v_0_0
8525 y := v_0_1
8526 if !(x.Op != OpSB && y.Op != OpSB) {
8527 continue
8528 }
8529 v.reset(OpAMD64LEAL1)
8530 v.AuxInt = int32ToAuxInt(c)
8531 v.Aux = symToAux(s)
8532 v.AddArg2(x, y)
8533 return true
8534 }
8535 break
8536 }
8537 return false
8538 }
8539 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8540 v_1 := v.Args[1]
8541 v_0 := v.Args[0]
8542
8543
8544
8545 for {
8546 c := auxIntToInt32(v.AuxInt)
8547 s := auxToSym(v.Aux)
8548 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8549 if v_0.Op != OpAMD64ADDLconst {
8550 continue
8551 }
8552 d := auxIntToInt32(v_0.AuxInt)
8553 x := v_0.Args[0]
8554 y := v_1
8555 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8556 continue
8557 }
8558 v.reset(OpAMD64LEAL1)
8559 v.AuxInt = int32ToAuxInt(c + d)
8560 v.Aux = symToAux(s)
8561 v.AddArg2(x, y)
8562 return true
8563 }
8564 break
8565 }
8566
8567
8568
8569 for {
8570 c := auxIntToInt32(v.AuxInt)
8571 s := auxToSym(v.Aux)
8572 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8573 x := v_0
8574 z := v_1
8575 if z.Op != OpAMD64ADDL {
8576 continue
8577 }
8578 y := z.Args[1]
8579 if y != z.Args[0] || !(x != z) {
8580 continue
8581 }
8582 v.reset(OpAMD64LEAL2)
8583 v.AuxInt = int32ToAuxInt(c)
8584 v.Aux = symToAux(s)
8585 v.AddArg2(x, y)
8586 return true
8587 }
8588 break
8589 }
8590
8591
8592 for {
8593 c := auxIntToInt32(v.AuxInt)
8594 s := auxToSym(v.Aux)
8595 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8596 x := v_0
8597 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8598 continue
8599 }
8600 y := v_1.Args[0]
8601 v.reset(OpAMD64LEAL4)
8602 v.AuxInt = int32ToAuxInt(c)
8603 v.Aux = symToAux(s)
8604 v.AddArg2(x, y)
8605 return true
8606 }
8607 break
8608 }
8609
8610
8611 for {
8612 c := auxIntToInt32(v.AuxInt)
8613 s := auxToSym(v.Aux)
8614 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8615 x := v_0
8616 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8617 continue
8618 }
8619 y := v_1.Args[0]
8620 v.reset(OpAMD64LEAL8)
8621 v.AuxInt = int32ToAuxInt(c)
8622 v.Aux = symToAux(s)
8623 v.AddArg2(x, y)
8624 return true
8625 }
8626 break
8627 }
8628 return false
8629 }
8630 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8631 v_1 := v.Args[1]
8632 v_0 := v.Args[0]
8633
8634
8635
8636 for {
8637 c := auxIntToInt32(v.AuxInt)
8638 s := auxToSym(v.Aux)
8639 if v_0.Op != OpAMD64ADDLconst {
8640 break
8641 }
8642 d := auxIntToInt32(v_0.AuxInt)
8643 x := v_0.Args[0]
8644 y := v_1
8645 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8646 break
8647 }
8648 v.reset(OpAMD64LEAL2)
8649 v.AuxInt = int32ToAuxInt(c + d)
8650 v.Aux = symToAux(s)
8651 v.AddArg2(x, y)
8652 return true
8653 }
8654
8655
8656
8657 for {
8658 c := auxIntToInt32(v.AuxInt)
8659 s := auxToSym(v.Aux)
8660 x := v_0
8661 if v_1.Op != OpAMD64ADDLconst {
8662 break
8663 }
8664 d := auxIntToInt32(v_1.AuxInt)
8665 y := v_1.Args[0]
8666 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8667 break
8668 }
8669 v.reset(OpAMD64LEAL2)
8670 v.AuxInt = int32ToAuxInt(c + 2*d)
8671 v.Aux = symToAux(s)
8672 v.AddArg2(x, y)
8673 return true
8674 }
8675
8676
8677
8678 for {
8679 c := auxIntToInt32(v.AuxInt)
8680 s := auxToSym(v.Aux)
8681 x := v_0
8682 z := v_1
8683 if z.Op != OpAMD64ADDL {
8684 break
8685 }
8686 y := z.Args[1]
8687 if y != z.Args[0] || !(x != z) {
8688 break
8689 }
8690 v.reset(OpAMD64LEAL4)
8691 v.AuxInt = int32ToAuxInt(c)
8692 v.Aux = symToAux(s)
8693 v.AddArg2(x, y)
8694 return true
8695 }
8696
8697
8698 for {
8699 c := auxIntToInt32(v.AuxInt)
8700 s := auxToSym(v.Aux)
8701 x := v_0
8702 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8703 break
8704 }
8705 y := v_1.Args[0]
8706 v.reset(OpAMD64LEAL8)
8707 v.AuxInt = int32ToAuxInt(c)
8708 v.Aux = symToAux(s)
8709 v.AddArg2(x, y)
8710 return true
8711 }
8712
8713
8714
8715 for {
8716 if auxIntToInt32(v.AuxInt) != 0 {
8717 break
8718 }
8719 s := auxToSym(v.Aux)
8720 if v_0.Op != OpAMD64ADDL {
8721 break
8722 }
8723 x := v_0.Args[1]
8724 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
8725 break
8726 }
8727 v.reset(OpAMD64SHLLconst)
8728 v.AuxInt = int8ToAuxInt(2)
8729 v.AddArg(x)
8730 return true
8731 }
8732 return false
8733 }
8734 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8735 v_1 := v.Args[1]
8736 v_0 := v.Args[0]
8737
8738
8739
8740 for {
8741 c := auxIntToInt32(v.AuxInt)
8742 s := auxToSym(v.Aux)
8743 if v_0.Op != OpAMD64ADDLconst {
8744 break
8745 }
8746 d := auxIntToInt32(v_0.AuxInt)
8747 x := v_0.Args[0]
8748 y := v_1
8749 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8750 break
8751 }
8752 v.reset(OpAMD64LEAL4)
8753 v.AuxInt = int32ToAuxInt(c + d)
8754 v.Aux = symToAux(s)
8755 v.AddArg2(x, y)
8756 return true
8757 }
8758
8759
8760
8761 for {
8762 c := auxIntToInt32(v.AuxInt)
8763 s := auxToSym(v.Aux)
8764 x := v_0
8765 if v_1.Op != OpAMD64ADDLconst {
8766 break
8767 }
8768 d := auxIntToInt32(v_1.AuxInt)
8769 y := v_1.Args[0]
8770 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8771 break
8772 }
8773 v.reset(OpAMD64LEAL4)
8774 v.AuxInt = int32ToAuxInt(c + 4*d)
8775 v.Aux = symToAux(s)
8776 v.AddArg2(x, y)
8777 return true
8778 }
8779
8780
8781
8782 for {
8783 c := auxIntToInt32(v.AuxInt)
8784 s := auxToSym(v.Aux)
8785 x := v_0
8786 z := v_1
8787 if z.Op != OpAMD64ADDL {
8788 break
8789 }
8790 y := z.Args[1]
8791 if y != z.Args[0] || !(x != z) {
8792 break
8793 }
8794 v.reset(OpAMD64LEAL8)
8795 v.AuxInt = int32ToAuxInt(c)
8796 v.Aux = symToAux(s)
8797 v.AddArg2(x, y)
8798 return true
8799 }
8800 return false
8801 }
8802 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8803 v_1 := v.Args[1]
8804 v_0 := v.Args[0]
8805
8806
8807
8808 for {
8809 c := auxIntToInt32(v.AuxInt)
8810 s := auxToSym(v.Aux)
8811 if v_0.Op != OpAMD64ADDLconst {
8812 break
8813 }
8814 d := auxIntToInt32(v_0.AuxInt)
8815 x := v_0.Args[0]
8816 y := v_1
8817 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8818 break
8819 }
8820 v.reset(OpAMD64LEAL8)
8821 v.AuxInt = int32ToAuxInt(c + d)
8822 v.Aux = symToAux(s)
8823 v.AddArg2(x, y)
8824 return true
8825 }
8826
8827
8828
8829 for {
8830 c := auxIntToInt32(v.AuxInt)
8831 s := auxToSym(v.Aux)
8832 x := v_0
8833 if v_1.Op != OpAMD64ADDLconst {
8834 break
8835 }
8836 d := auxIntToInt32(v_1.AuxInt)
8837 y := v_1.Args[0]
8838 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8839 break
8840 }
8841 v.reset(OpAMD64LEAL8)
8842 v.AuxInt = int32ToAuxInt(c + 8*d)
8843 v.Aux = symToAux(s)
8844 v.AddArg2(x, y)
8845 return true
8846 }
8847 return false
8848 }
8849 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8850 v_0 := v.Args[0]
8851
8852
8853
8854 for {
8855 c := auxIntToInt32(v.AuxInt)
8856 s := auxToSym(v.Aux)
8857 if v_0.Op != OpAMD64ADDQconst {
8858 break
8859 }
8860 d := auxIntToInt32(v_0.AuxInt)
8861 x := v_0.Args[0]
8862 if !(is32Bit(int64(c) + int64(d))) {
8863 break
8864 }
8865 v.reset(OpAMD64LEAQ)
8866 v.AuxInt = int32ToAuxInt(c + d)
8867 v.Aux = symToAux(s)
8868 v.AddArg(x)
8869 return true
8870 }
8871
8872
8873
8874 for {
8875 c := auxIntToInt32(v.AuxInt)
8876 s := auxToSym(v.Aux)
8877 if v_0.Op != OpAMD64ADDQ {
8878 break
8879 }
8880 _ = v_0.Args[1]
8881 v_0_0 := v_0.Args[0]
8882 v_0_1 := v_0.Args[1]
8883 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8884 x := v_0_0
8885 y := v_0_1
8886 if !(x.Op != OpSB && y.Op != OpSB) {
8887 continue
8888 }
8889 v.reset(OpAMD64LEAQ1)
8890 v.AuxInt = int32ToAuxInt(c)
8891 v.Aux = symToAux(s)
8892 v.AddArg2(x, y)
8893 return true
8894 }
8895 break
8896 }
8897
8898
8899
8900 for {
8901 off1 := auxIntToInt32(v.AuxInt)
8902 sym1 := auxToSym(v.Aux)
8903 if v_0.Op != OpAMD64LEAQ {
8904 break
8905 }
8906 off2 := auxIntToInt32(v_0.AuxInt)
8907 sym2 := auxToSym(v_0.Aux)
8908 x := v_0.Args[0]
8909 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8910 break
8911 }
8912 v.reset(OpAMD64LEAQ)
8913 v.AuxInt = int32ToAuxInt(off1 + off2)
8914 v.Aux = symToAux(mergeSym(sym1, sym2))
8915 v.AddArg(x)
8916 return true
8917 }
8918
8919
8920
8921 for {
8922 off1 := auxIntToInt32(v.AuxInt)
8923 sym1 := auxToSym(v.Aux)
8924 if v_0.Op != OpAMD64LEAQ1 {
8925 break
8926 }
8927 off2 := auxIntToInt32(v_0.AuxInt)
8928 sym2 := auxToSym(v_0.Aux)
8929 y := v_0.Args[1]
8930 x := v_0.Args[0]
8931 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8932 break
8933 }
8934 v.reset(OpAMD64LEAQ1)
8935 v.AuxInt = int32ToAuxInt(off1 + off2)
8936 v.Aux = symToAux(mergeSym(sym1, sym2))
8937 v.AddArg2(x, y)
8938 return true
8939 }
8940
8941
8942
8943 for {
8944 off1 := auxIntToInt32(v.AuxInt)
8945 sym1 := auxToSym(v.Aux)
8946 if v_0.Op != OpAMD64LEAQ2 {
8947 break
8948 }
8949 off2 := auxIntToInt32(v_0.AuxInt)
8950 sym2 := auxToSym(v_0.Aux)
8951 y := v_0.Args[1]
8952 x := v_0.Args[0]
8953 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8954 break
8955 }
8956 v.reset(OpAMD64LEAQ2)
8957 v.AuxInt = int32ToAuxInt(off1 + off2)
8958 v.Aux = symToAux(mergeSym(sym1, sym2))
8959 v.AddArg2(x, y)
8960 return true
8961 }
8962
8963
8964
8965 for {
8966 off1 := auxIntToInt32(v.AuxInt)
8967 sym1 := auxToSym(v.Aux)
8968 if v_0.Op != OpAMD64LEAQ4 {
8969 break
8970 }
8971 off2 := auxIntToInt32(v_0.AuxInt)
8972 sym2 := auxToSym(v_0.Aux)
8973 y := v_0.Args[1]
8974 x := v_0.Args[0]
8975 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8976 break
8977 }
8978 v.reset(OpAMD64LEAQ4)
8979 v.AuxInt = int32ToAuxInt(off1 + off2)
8980 v.Aux = symToAux(mergeSym(sym1, sym2))
8981 v.AddArg2(x, y)
8982 return true
8983 }
8984
8985
8986
8987 for {
8988 off1 := auxIntToInt32(v.AuxInt)
8989 sym1 := auxToSym(v.Aux)
8990 if v_0.Op != OpAMD64LEAQ8 {
8991 break
8992 }
8993 off2 := auxIntToInt32(v_0.AuxInt)
8994 sym2 := auxToSym(v_0.Aux)
8995 y := v_0.Args[1]
8996 x := v_0.Args[0]
8997 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8998 break
8999 }
9000 v.reset(OpAMD64LEAQ8)
9001 v.AuxInt = int32ToAuxInt(off1 + off2)
9002 v.Aux = symToAux(mergeSym(sym1, sym2))
9003 v.AddArg2(x, y)
9004 return true
9005 }
9006 return false
9007 }
9008 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9009 v_1 := v.Args[1]
9010 v_0 := v.Args[0]
9011
9012
9013
9014 for {
9015 c := auxIntToInt32(v.AuxInt)
9016 s := auxToSym(v.Aux)
9017 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9018 if v_0.Op != OpAMD64ADDQconst {
9019 continue
9020 }
9021 d := auxIntToInt32(v_0.AuxInt)
9022 x := v_0.Args[0]
9023 y := v_1
9024 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9025 continue
9026 }
9027 v.reset(OpAMD64LEAQ1)
9028 v.AuxInt = int32ToAuxInt(c + d)
9029 v.Aux = symToAux(s)
9030 v.AddArg2(x, y)
9031 return true
9032 }
9033 break
9034 }
9035
9036
9037
9038 for {
9039 c := auxIntToInt32(v.AuxInt)
9040 s := auxToSym(v.Aux)
9041 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9042 x := v_0
9043 z := v_1
9044 if z.Op != OpAMD64ADDQ {
9045 continue
9046 }
9047 y := z.Args[1]
9048 if y != z.Args[0] || !(x != z) {
9049 continue
9050 }
9051 v.reset(OpAMD64LEAQ2)
9052 v.AuxInt = int32ToAuxInt(c)
9053 v.Aux = symToAux(s)
9054 v.AddArg2(x, y)
9055 return true
9056 }
9057 break
9058 }
9059
9060
9061 for {
9062 c := auxIntToInt32(v.AuxInt)
9063 s := auxToSym(v.Aux)
9064 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9065 x := v_0
9066 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9067 continue
9068 }
9069 y := v_1.Args[0]
9070 v.reset(OpAMD64LEAQ4)
9071 v.AuxInt = int32ToAuxInt(c)
9072 v.Aux = symToAux(s)
9073 v.AddArg2(x, y)
9074 return true
9075 }
9076 break
9077 }
9078
9079
9080 for {
9081 c := auxIntToInt32(v.AuxInt)
9082 s := auxToSym(v.Aux)
9083 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9084 x := v_0
9085 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9086 continue
9087 }
9088 y := v_1.Args[0]
9089 v.reset(OpAMD64LEAQ8)
9090 v.AuxInt = int32ToAuxInt(c)
9091 v.Aux = symToAux(s)
9092 v.AddArg2(x, y)
9093 return true
9094 }
9095 break
9096 }
9097
9098
9099
9100 for {
9101 off1 := auxIntToInt32(v.AuxInt)
9102 sym1 := auxToSym(v.Aux)
9103 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9104 if v_0.Op != OpAMD64LEAQ {
9105 continue
9106 }
9107 off2 := auxIntToInt32(v_0.AuxInt)
9108 sym2 := auxToSym(v_0.Aux)
9109 x := v_0.Args[0]
9110 y := v_1
9111 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9112 continue
9113 }
9114 v.reset(OpAMD64LEAQ1)
9115 v.AuxInt = int32ToAuxInt(off1 + off2)
9116 v.Aux = symToAux(mergeSym(sym1, sym2))
9117 v.AddArg2(x, y)
9118 return true
9119 }
9120 break
9121 }
9122
9123
9124
9125 for {
9126 off1 := auxIntToInt32(v.AuxInt)
9127 sym1 := auxToSym(v.Aux)
9128 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9129 x := v_0
9130 if v_1.Op != OpAMD64LEAQ1 {
9131 continue
9132 }
9133 off2 := auxIntToInt32(v_1.AuxInt)
9134 sym2 := auxToSym(v_1.Aux)
9135 y := v_1.Args[1]
9136 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9137 continue
9138 }
9139 v.reset(OpAMD64LEAQ2)
9140 v.AuxInt = int32ToAuxInt(off1 + off2)
9141 v.Aux = symToAux(mergeSym(sym1, sym2))
9142 v.AddArg2(x, y)
9143 return true
9144 }
9145 break
9146 }
9147
9148
9149
9150 for {
9151 off1 := auxIntToInt32(v.AuxInt)
9152 sym1 := auxToSym(v.Aux)
9153 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9154 x := v_0
9155 if v_1.Op != OpAMD64LEAQ1 {
9156 continue
9157 }
9158 off2 := auxIntToInt32(v_1.AuxInt)
9159 sym2 := auxToSym(v_1.Aux)
9160 _ = v_1.Args[1]
9161 v_1_0 := v_1.Args[0]
9162 v_1_1 := v_1.Args[1]
9163 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9164 if x != v_1_0 {
9165 continue
9166 }
9167 y := v_1_1
9168 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9169 continue
9170 }
9171 v.reset(OpAMD64LEAQ2)
9172 v.AuxInt = int32ToAuxInt(off1 + off2)
9173 v.Aux = symToAux(mergeSym(sym1, sym2))
9174 v.AddArg2(y, x)
9175 return true
9176 }
9177 }
9178 break
9179 }
9180
9181
9182
9183 for {
9184 if auxIntToInt32(v.AuxInt) != 0 {
9185 break
9186 }
9187 x := v_0
9188 y := v_1
9189 if !(v.Aux == nil) {
9190 break
9191 }
9192 v.reset(OpAMD64ADDQ)
9193 v.AddArg2(x, y)
9194 return true
9195 }
9196 return false
9197 }
9198 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9199 v_1 := v.Args[1]
9200 v_0 := v.Args[0]
9201
9202
9203
9204 for {
9205 c := auxIntToInt32(v.AuxInt)
9206 s := auxToSym(v.Aux)
9207 if v_0.Op != OpAMD64ADDQconst {
9208 break
9209 }
9210 d := auxIntToInt32(v_0.AuxInt)
9211 x := v_0.Args[0]
9212 y := v_1
9213 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9214 break
9215 }
9216 v.reset(OpAMD64LEAQ2)
9217 v.AuxInt = int32ToAuxInt(c + d)
9218 v.Aux = symToAux(s)
9219 v.AddArg2(x, y)
9220 return true
9221 }
9222
9223
9224
9225 for {
9226 c := auxIntToInt32(v.AuxInt)
9227 s := auxToSym(v.Aux)
9228 x := v_0
9229 if v_1.Op != OpAMD64ADDQconst {
9230 break
9231 }
9232 d := auxIntToInt32(v_1.AuxInt)
9233 y := v_1.Args[0]
9234 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9235 break
9236 }
9237 v.reset(OpAMD64LEAQ2)
9238 v.AuxInt = int32ToAuxInt(c + 2*d)
9239 v.Aux = symToAux(s)
9240 v.AddArg2(x, y)
9241 return true
9242 }
9243
9244
9245
9246 for {
9247 c := auxIntToInt32(v.AuxInt)
9248 s := auxToSym(v.Aux)
9249 x := v_0
9250 z := v_1
9251 if z.Op != OpAMD64ADDQ {
9252 break
9253 }
9254 y := z.Args[1]
9255 if y != z.Args[0] || !(x != z) {
9256 break
9257 }
9258 v.reset(OpAMD64LEAQ4)
9259 v.AuxInt = int32ToAuxInt(c)
9260 v.Aux = symToAux(s)
9261 v.AddArg2(x, y)
9262 return true
9263 }
9264
9265
9266 for {
9267 c := auxIntToInt32(v.AuxInt)
9268 s := auxToSym(v.Aux)
9269 x := v_0
9270 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9271 break
9272 }
9273 y := v_1.Args[0]
9274 v.reset(OpAMD64LEAQ8)
9275 v.AuxInt = int32ToAuxInt(c)
9276 v.Aux = symToAux(s)
9277 v.AddArg2(x, y)
9278 return true
9279 }
9280
9281
9282
9283 for {
9284 if auxIntToInt32(v.AuxInt) != 0 {
9285 break
9286 }
9287 s := auxToSym(v.Aux)
9288 if v_0.Op != OpAMD64ADDQ {
9289 break
9290 }
9291 x := v_0.Args[1]
9292 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
9293 break
9294 }
9295 v.reset(OpAMD64SHLQconst)
9296 v.AuxInt = int8ToAuxInt(2)
9297 v.AddArg(x)
9298 return true
9299 }
9300
9301
9302
9303 for {
9304 off1 := auxIntToInt32(v.AuxInt)
9305 sym1 := auxToSym(v.Aux)
9306 if v_0.Op != OpAMD64LEAQ {
9307 break
9308 }
9309 off2 := auxIntToInt32(v_0.AuxInt)
9310 sym2 := auxToSym(v_0.Aux)
9311 x := v_0.Args[0]
9312 y := v_1
9313 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9314 break
9315 }
9316 v.reset(OpAMD64LEAQ2)
9317 v.AuxInt = int32ToAuxInt(off1 + off2)
9318 v.Aux = symToAux(mergeSym(sym1, sym2))
9319 v.AddArg2(x, y)
9320 return true
9321 }
9322
9323
9324
9325 for {
9326 off1 := auxIntToInt32(v.AuxInt)
9327 sym1 := auxToSym(v.Aux)
9328 x := v_0
9329 if v_1.Op != OpAMD64LEAQ1 {
9330 break
9331 }
9332 off2 := auxIntToInt32(v_1.AuxInt)
9333 sym2 := auxToSym(v_1.Aux)
9334 y := v_1.Args[1]
9335 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9336 break
9337 }
9338 v.reset(OpAMD64LEAQ4)
9339 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9340 v.Aux = symToAux(sym1)
9341 v.AddArg2(x, y)
9342 return true
9343 }
9344
9345
9346
9347 for {
9348 off := auxIntToInt32(v.AuxInt)
9349 sym := auxToSym(v.Aux)
9350 x := v_0
9351 if v_1.Op != OpAMD64MOVQconst {
9352 break
9353 }
9354 scale := auxIntToInt64(v_1.AuxInt)
9355 if !(is32Bit(int64(off) + int64(scale)*2)) {
9356 break
9357 }
9358 v.reset(OpAMD64LEAQ)
9359 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9360 v.Aux = symToAux(sym)
9361 v.AddArg(x)
9362 return true
9363 }
9364
9365
9366
9367 for {
9368 off := auxIntToInt32(v.AuxInt)
9369 sym := auxToSym(v.Aux)
9370 x := v_0
9371 if v_1.Op != OpAMD64MOVLconst {
9372 break
9373 }
9374 scale := auxIntToInt32(v_1.AuxInt)
9375 if !(is32Bit(int64(off) + int64(scale)*2)) {
9376 break
9377 }
9378 v.reset(OpAMD64LEAQ)
9379 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9380 v.Aux = symToAux(sym)
9381 v.AddArg(x)
9382 return true
9383 }
9384 return false
9385 }
9386 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9387 v_1 := v.Args[1]
9388 v_0 := v.Args[0]
9389
9390
9391
9392 for {
9393 c := auxIntToInt32(v.AuxInt)
9394 s := auxToSym(v.Aux)
9395 if v_0.Op != OpAMD64ADDQconst {
9396 break
9397 }
9398 d := auxIntToInt32(v_0.AuxInt)
9399 x := v_0.Args[0]
9400 y := v_1
9401 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9402 break
9403 }
9404 v.reset(OpAMD64LEAQ4)
9405 v.AuxInt = int32ToAuxInt(c + d)
9406 v.Aux = symToAux(s)
9407 v.AddArg2(x, y)
9408 return true
9409 }
9410
9411
9412
9413 for {
9414 c := auxIntToInt32(v.AuxInt)
9415 s := auxToSym(v.Aux)
9416 x := v_0
9417 if v_1.Op != OpAMD64ADDQconst {
9418 break
9419 }
9420 d := auxIntToInt32(v_1.AuxInt)
9421 y := v_1.Args[0]
9422 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9423 break
9424 }
9425 v.reset(OpAMD64LEAQ4)
9426 v.AuxInt = int32ToAuxInt(c + 4*d)
9427 v.Aux = symToAux(s)
9428 v.AddArg2(x, y)
9429 return true
9430 }
9431
9432
9433
9434 for {
9435 c := auxIntToInt32(v.AuxInt)
9436 s := auxToSym(v.Aux)
9437 x := v_0
9438 z := v_1
9439 if z.Op != OpAMD64ADDQ {
9440 break
9441 }
9442 y := z.Args[1]
9443 if y != z.Args[0] || !(x != z) {
9444 break
9445 }
9446 v.reset(OpAMD64LEAQ8)
9447 v.AuxInt = int32ToAuxInt(c)
9448 v.Aux = symToAux(s)
9449 v.AddArg2(x, y)
9450 return true
9451 }
9452
9453
9454
9455 for {
9456 off1 := auxIntToInt32(v.AuxInt)
9457 sym1 := auxToSym(v.Aux)
9458 if v_0.Op != OpAMD64LEAQ {
9459 break
9460 }
9461 off2 := auxIntToInt32(v_0.AuxInt)
9462 sym2 := auxToSym(v_0.Aux)
9463 x := v_0.Args[0]
9464 y := v_1
9465 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9466 break
9467 }
9468 v.reset(OpAMD64LEAQ4)
9469 v.AuxInt = int32ToAuxInt(off1 + off2)
9470 v.Aux = symToAux(mergeSym(sym1, sym2))
9471 v.AddArg2(x, y)
9472 return true
9473 }
9474
9475
9476
9477 for {
9478 off1 := auxIntToInt32(v.AuxInt)
9479 sym1 := auxToSym(v.Aux)
9480 x := v_0
9481 if v_1.Op != OpAMD64LEAQ1 {
9482 break
9483 }
9484 off2 := auxIntToInt32(v_1.AuxInt)
9485 sym2 := auxToSym(v_1.Aux)
9486 y := v_1.Args[1]
9487 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9488 break
9489 }
9490 v.reset(OpAMD64LEAQ8)
9491 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9492 v.Aux = symToAux(sym1)
9493 v.AddArg2(x, y)
9494 return true
9495 }
9496
9497
9498
9499 for {
9500 off := auxIntToInt32(v.AuxInt)
9501 sym := auxToSym(v.Aux)
9502 x := v_0
9503 if v_1.Op != OpAMD64MOVQconst {
9504 break
9505 }
9506 scale := auxIntToInt64(v_1.AuxInt)
9507 if !(is32Bit(int64(off) + int64(scale)*4)) {
9508 break
9509 }
9510 v.reset(OpAMD64LEAQ)
9511 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9512 v.Aux = symToAux(sym)
9513 v.AddArg(x)
9514 return true
9515 }
9516
9517
9518
9519 for {
9520 off := auxIntToInt32(v.AuxInt)
9521 sym := auxToSym(v.Aux)
9522 x := v_0
9523 if v_1.Op != OpAMD64MOVLconst {
9524 break
9525 }
9526 scale := auxIntToInt32(v_1.AuxInt)
9527 if !(is32Bit(int64(off) + int64(scale)*4)) {
9528 break
9529 }
9530 v.reset(OpAMD64LEAQ)
9531 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9532 v.Aux = symToAux(sym)
9533 v.AddArg(x)
9534 return true
9535 }
9536 return false
9537 }
9538 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9539 v_1 := v.Args[1]
9540 v_0 := v.Args[0]
9541
9542
9543
9544 for {
9545 c := auxIntToInt32(v.AuxInt)
9546 s := auxToSym(v.Aux)
9547 if v_0.Op != OpAMD64ADDQconst {
9548 break
9549 }
9550 d := auxIntToInt32(v_0.AuxInt)
9551 x := v_0.Args[0]
9552 y := v_1
9553 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9554 break
9555 }
9556 v.reset(OpAMD64LEAQ8)
9557 v.AuxInt = int32ToAuxInt(c + d)
9558 v.Aux = symToAux(s)
9559 v.AddArg2(x, y)
9560 return true
9561 }
9562
9563
9564
9565 for {
9566 c := auxIntToInt32(v.AuxInt)
9567 s := auxToSym(v.Aux)
9568 x := v_0
9569 if v_1.Op != OpAMD64ADDQconst {
9570 break
9571 }
9572 d := auxIntToInt32(v_1.AuxInt)
9573 y := v_1.Args[0]
9574 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9575 break
9576 }
9577 v.reset(OpAMD64LEAQ8)
9578 v.AuxInt = int32ToAuxInt(c + 8*d)
9579 v.Aux = symToAux(s)
9580 v.AddArg2(x, y)
9581 return true
9582 }
9583
9584
9585
9586 for {
9587 off1 := auxIntToInt32(v.AuxInt)
9588 sym1 := auxToSym(v.Aux)
9589 if v_0.Op != OpAMD64LEAQ {
9590 break
9591 }
9592 off2 := auxIntToInt32(v_0.AuxInt)
9593 sym2 := auxToSym(v_0.Aux)
9594 x := v_0.Args[0]
9595 y := v_1
9596 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9597 break
9598 }
9599 v.reset(OpAMD64LEAQ8)
9600 v.AuxInt = int32ToAuxInt(off1 + off2)
9601 v.Aux = symToAux(mergeSym(sym1, sym2))
9602 v.AddArg2(x, y)
9603 return true
9604 }
9605
9606
9607
9608 for {
9609 off := auxIntToInt32(v.AuxInt)
9610 sym := auxToSym(v.Aux)
9611 x := v_0
9612 if v_1.Op != OpAMD64MOVQconst {
9613 break
9614 }
9615 scale := auxIntToInt64(v_1.AuxInt)
9616 if !(is32Bit(int64(off) + int64(scale)*8)) {
9617 break
9618 }
9619 v.reset(OpAMD64LEAQ)
9620 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9621 v.Aux = symToAux(sym)
9622 v.AddArg(x)
9623 return true
9624 }
9625
9626
9627
9628 for {
9629 off := auxIntToInt32(v.AuxInt)
9630 sym := auxToSym(v.Aux)
9631 x := v_0
9632 if v_1.Op != OpAMD64MOVLconst {
9633 break
9634 }
9635 scale := auxIntToInt32(v_1.AuxInt)
9636 if !(is32Bit(int64(off) + int64(scale)*8)) {
9637 break
9638 }
9639 v.reset(OpAMD64LEAQ)
9640 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9641 v.Aux = symToAux(sym)
9642 v.AddArg(x)
9643 return true
9644 }
9645 return false
9646 }
9647 func rewriteValueAMD64_OpAMD64LoweredPanicBoundsCR(v *Value) bool {
9648 v_1 := v.Args[1]
9649 v_0 := v.Args[0]
9650
9651
9652 for {
9653 kind := auxIntToInt64(v.AuxInt)
9654 p := auxToPanicBoundsC(v.Aux)
9655 if v_0.Op != OpAMD64MOVQconst {
9656 break
9657 }
9658 c := auxIntToInt64(v_0.AuxInt)
9659 mem := v_1
9660 v.reset(OpAMD64LoweredPanicBoundsCC)
9661 v.AuxInt = int64ToAuxInt(kind)
9662 v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
9663 v.AddArg(mem)
9664 return true
9665 }
9666 return false
9667 }
9668 func rewriteValueAMD64_OpAMD64LoweredPanicBoundsRC(v *Value) bool {
9669 v_1 := v.Args[1]
9670 v_0 := v.Args[0]
9671
9672
9673 for {
9674 kind := auxIntToInt64(v.AuxInt)
9675 p := auxToPanicBoundsC(v.Aux)
9676 if v_0.Op != OpAMD64MOVQconst {
9677 break
9678 }
9679 c := auxIntToInt64(v_0.AuxInt)
9680 mem := v_1
9681 v.reset(OpAMD64LoweredPanicBoundsCC)
9682 v.AuxInt = int64ToAuxInt(kind)
9683 v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
9684 v.AddArg(mem)
9685 return true
9686 }
9687 return false
9688 }
9689 func rewriteValueAMD64_OpAMD64LoweredPanicBoundsRR(v *Value) bool {
9690 v_2 := v.Args[2]
9691 v_1 := v.Args[1]
9692 v_0 := v.Args[0]
9693
9694
9695 for {
9696 kind := auxIntToInt64(v.AuxInt)
9697 x := v_0
9698 if v_1.Op != OpAMD64MOVQconst {
9699 break
9700 }
9701 c := auxIntToInt64(v_1.AuxInt)
9702 mem := v_2
9703 v.reset(OpAMD64LoweredPanicBoundsRC)
9704 v.AuxInt = int64ToAuxInt(kind)
9705 v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
9706 v.AddArg2(x, mem)
9707 return true
9708 }
9709
9710
9711 for {
9712 kind := auxIntToInt64(v.AuxInt)
9713 if v_0.Op != OpAMD64MOVQconst {
9714 break
9715 }
9716 c := auxIntToInt64(v_0.AuxInt)
9717 y := v_1
9718 mem := v_2
9719 v.reset(OpAMD64LoweredPanicBoundsCR)
9720 v.AuxInt = int64ToAuxInt(kind)
9721 v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
9722 v.AddArg2(y, mem)
9723 return true
9724 }
9725 return false
9726 }
9727 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9728 v_2 := v.Args[2]
9729 v_1 := v.Args[1]
9730 v_0 := v.Args[0]
9731
9732
9733
9734 for {
9735 i := auxIntToInt32(v.AuxInt)
9736 s := auxToSym(v.Aux)
9737 p := v_0
9738 x := v_1
9739 if x.Op != OpAMD64BSWAPL {
9740 break
9741 }
9742 w := x.Args[0]
9743 mem := v_2
9744 if !(x.Uses == 1) {
9745 break
9746 }
9747 v.reset(OpAMD64MOVLstore)
9748 v.AuxInt = int32ToAuxInt(i)
9749 v.Aux = symToAux(s)
9750 v.AddArg3(p, w, mem)
9751 return true
9752 }
9753 return false
9754 }
9755 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9756 v_2 := v.Args[2]
9757 v_1 := v.Args[1]
9758 v_0 := v.Args[0]
9759
9760
9761
9762 for {
9763 i := auxIntToInt32(v.AuxInt)
9764 s := auxToSym(v.Aux)
9765 p := v_0
9766 x := v_1
9767 if x.Op != OpAMD64BSWAPQ {
9768 break
9769 }
9770 w := x.Args[0]
9771 mem := v_2
9772 if !(x.Uses == 1) {
9773 break
9774 }
9775 v.reset(OpAMD64MOVQstore)
9776 v.AuxInt = int32ToAuxInt(i)
9777 v.Aux = symToAux(s)
9778 v.AddArg3(p, w, mem)
9779 return true
9780 }
9781 return false
9782 }
9783 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9784 v_2 := v.Args[2]
9785 v_1 := v.Args[1]
9786 v_0 := v.Args[0]
9787
9788
9789
9790 for {
9791 i := auxIntToInt32(v.AuxInt)
9792 s := auxToSym(v.Aux)
9793 p := v_0
9794 x := v_1
9795 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9796 break
9797 }
9798 w := x.Args[0]
9799 mem := v_2
9800 if !(x.Uses == 1) {
9801 break
9802 }
9803 v.reset(OpAMD64MOVWstore)
9804 v.AuxInt = int32ToAuxInt(i)
9805 v.Aux = symToAux(s)
9806 v.AddArg3(p, w, mem)
9807 return true
9808 }
9809 return false
9810 }
9811 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9812 v_0 := v.Args[0]
9813 b := v.Block
9814
9815
9816
9817 for {
9818 x := v_0
9819 if x.Op != OpAMD64MOVBload {
9820 break
9821 }
9822 off := auxIntToInt32(x.AuxInt)
9823 sym := auxToSym(x.Aux)
9824 mem := x.Args[1]
9825 ptr := x.Args[0]
9826 if !(x.Uses == 1 && clobber(x)) {
9827 break
9828 }
9829 b = x.Block
9830 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9831 v.copyOf(v0)
9832 v0.AuxInt = int32ToAuxInt(off)
9833 v0.Aux = symToAux(sym)
9834 v0.AddArg2(ptr, mem)
9835 return true
9836 }
9837
9838
9839
9840 for {
9841 x := v_0
9842 if x.Op != OpAMD64MOVWload {
9843 break
9844 }
9845 off := auxIntToInt32(x.AuxInt)
9846 sym := auxToSym(x.Aux)
9847 mem := x.Args[1]
9848 ptr := x.Args[0]
9849 if !(x.Uses == 1 && clobber(x)) {
9850 break
9851 }
9852 b = x.Block
9853 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9854 v.copyOf(v0)
9855 v0.AuxInt = int32ToAuxInt(off)
9856 v0.Aux = symToAux(sym)
9857 v0.AddArg2(ptr, mem)
9858 return true
9859 }
9860
9861
9862
9863 for {
9864 x := v_0
9865 if x.Op != OpAMD64MOVLload {
9866 break
9867 }
9868 off := auxIntToInt32(x.AuxInt)
9869 sym := auxToSym(x.Aux)
9870 mem := x.Args[1]
9871 ptr := x.Args[0]
9872 if !(x.Uses == 1 && clobber(x)) {
9873 break
9874 }
9875 b = x.Block
9876 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9877 v.copyOf(v0)
9878 v0.AuxInt = int32ToAuxInt(off)
9879 v0.Aux = symToAux(sym)
9880 v0.AddArg2(ptr, mem)
9881 return true
9882 }
9883
9884
9885
9886 for {
9887 x := v_0
9888 if x.Op != OpAMD64MOVQload {
9889 break
9890 }
9891 off := auxIntToInt32(x.AuxInt)
9892 sym := auxToSym(x.Aux)
9893 mem := x.Args[1]
9894 ptr := x.Args[0]
9895 if !(x.Uses == 1 && clobber(x)) {
9896 break
9897 }
9898 b = x.Block
9899 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9900 v.copyOf(v0)
9901 v0.AuxInt = int32ToAuxInt(off)
9902 v0.Aux = symToAux(sym)
9903 v0.AddArg2(ptr, mem)
9904 return true
9905 }
9906
9907
9908
9909 for {
9910 if v_0.Op != OpAMD64ANDLconst {
9911 break
9912 }
9913 c := auxIntToInt32(v_0.AuxInt)
9914 x := v_0.Args[0]
9915 if !(c&0x80 == 0) {
9916 break
9917 }
9918 v.reset(OpAMD64ANDLconst)
9919 v.AuxInt = int32ToAuxInt(c & 0x7f)
9920 v.AddArg(x)
9921 return true
9922 }
9923
9924
9925 for {
9926 if v_0.Op != OpAMD64MOVBQSX {
9927 break
9928 }
9929 x := v_0.Args[0]
9930 v.reset(OpAMD64MOVBQSX)
9931 v.AddArg(x)
9932 return true
9933 }
9934 return false
9935 }
9936 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9937 v_1 := v.Args[1]
9938 v_0 := v.Args[0]
9939
9940
9941
9942 for {
9943 off := auxIntToInt32(v.AuxInt)
9944 sym := auxToSym(v.Aux)
9945 ptr := v_0
9946 if v_1.Op != OpAMD64MOVBstore {
9947 break
9948 }
9949 off2 := auxIntToInt32(v_1.AuxInt)
9950 sym2 := auxToSym(v_1.Aux)
9951 x := v_1.Args[1]
9952 ptr2 := v_1.Args[0]
9953 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9954 break
9955 }
9956 v.reset(OpAMD64MOVBQSX)
9957 v.AddArg(x)
9958 return true
9959 }
9960
9961
9962
9963 for {
9964 off1 := auxIntToInt32(v.AuxInt)
9965 sym1 := auxToSym(v.Aux)
9966 if v_0.Op != OpAMD64LEAQ {
9967 break
9968 }
9969 off2 := auxIntToInt32(v_0.AuxInt)
9970 sym2 := auxToSym(v_0.Aux)
9971 base := v_0.Args[0]
9972 mem := v_1
9973 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9974 break
9975 }
9976 v.reset(OpAMD64MOVBQSXload)
9977 v.AuxInt = int32ToAuxInt(off1 + off2)
9978 v.Aux = symToAux(mergeSym(sym1, sym2))
9979 v.AddArg2(base, mem)
9980 return true
9981 }
9982
9983
9984
9985 for {
9986 off := auxIntToInt32(v.AuxInt)
9987 sym := auxToSym(v.Aux)
9988 if v_0.Op != OpSB || !(symIsRO(sym)) {
9989 break
9990 }
9991 v.reset(OpAMD64MOVQconst)
9992 v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
9993 return true
9994 }
9995 return false
9996 }
9997 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9998 v_0 := v.Args[0]
9999 b := v.Block
10000
10001
10002
10003 for {
10004 x := v_0
10005 if x.Op != OpAMD64MOVBload {
10006 break
10007 }
10008 off := auxIntToInt32(x.AuxInt)
10009 sym := auxToSym(x.Aux)
10010 mem := x.Args[1]
10011 ptr := x.Args[0]
10012 if !(x.Uses == 1 && clobber(x)) {
10013 break
10014 }
10015 b = x.Block
10016 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10017 v.copyOf(v0)
10018 v0.AuxInt = int32ToAuxInt(off)
10019 v0.Aux = symToAux(sym)
10020 v0.AddArg2(ptr, mem)
10021 return true
10022 }
10023
10024
10025
10026 for {
10027 x := v_0
10028 if x.Op != OpAMD64MOVWload {
10029 break
10030 }
10031 off := auxIntToInt32(x.AuxInt)
10032 sym := auxToSym(x.Aux)
10033 mem := x.Args[1]
10034 ptr := x.Args[0]
10035 if !(x.Uses == 1 && clobber(x)) {
10036 break
10037 }
10038 b = x.Block
10039 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10040 v.copyOf(v0)
10041 v0.AuxInt = int32ToAuxInt(off)
10042 v0.Aux = symToAux(sym)
10043 v0.AddArg2(ptr, mem)
10044 return true
10045 }
10046
10047
10048
10049 for {
10050 x := v_0
10051 if x.Op != OpAMD64MOVLload {
10052 break
10053 }
10054 off := auxIntToInt32(x.AuxInt)
10055 sym := auxToSym(x.Aux)
10056 mem := x.Args[1]
10057 ptr := x.Args[0]
10058 if !(x.Uses == 1 && clobber(x)) {
10059 break
10060 }
10061 b = x.Block
10062 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10063 v.copyOf(v0)
10064 v0.AuxInt = int32ToAuxInt(off)
10065 v0.Aux = symToAux(sym)
10066 v0.AddArg2(ptr, mem)
10067 return true
10068 }
10069
10070
10071
10072 for {
10073 x := v_0
10074 if x.Op != OpAMD64MOVQload {
10075 break
10076 }
10077 off := auxIntToInt32(x.AuxInt)
10078 sym := auxToSym(x.Aux)
10079 mem := x.Args[1]
10080 ptr := x.Args[0]
10081 if !(x.Uses == 1 && clobber(x)) {
10082 break
10083 }
10084 b = x.Block
10085 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10086 v.copyOf(v0)
10087 v0.AuxInt = int32ToAuxInt(off)
10088 v0.Aux = symToAux(sym)
10089 v0.AddArg2(ptr, mem)
10090 return true
10091 }
10092
10093
10094 for {
10095 if v_0.Op != OpAMD64ANDLconst {
10096 break
10097 }
10098 c := auxIntToInt32(v_0.AuxInt)
10099 x := v_0.Args[0]
10100 v.reset(OpAMD64ANDLconst)
10101 v.AuxInt = int32ToAuxInt(c & 0xff)
10102 v.AddArg(x)
10103 return true
10104 }
10105
10106
10107 for {
10108 if v_0.Op != OpAMD64MOVBQZX {
10109 break
10110 }
10111 x := v_0.Args[0]
10112 v.reset(OpAMD64MOVBQZX)
10113 v.AddArg(x)
10114 return true
10115 }
10116 return false
10117 }
10118 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10119 v_1 := v.Args[1]
10120 v_0 := v.Args[0]
10121
10122
10123
10124 for {
10125 off1 := auxIntToInt32(v.AuxInt)
10126 sym := auxToSym(v.Aux)
10127 if v_0.Op != OpAMD64ADDQconst {
10128 break
10129 }
10130 off2 := auxIntToInt32(v_0.AuxInt)
10131 ptr := v_0.Args[0]
10132 mem := v_1
10133 if !(is32Bit(int64(off1) + int64(off2))) {
10134 break
10135 }
10136 v.reset(OpAMD64MOVBatomicload)
10137 v.AuxInt = int32ToAuxInt(off1 + off2)
10138 v.Aux = symToAux(sym)
10139 v.AddArg2(ptr, mem)
10140 return true
10141 }
10142
10143
10144
10145 for {
10146 off1 := auxIntToInt32(v.AuxInt)
10147 sym1 := auxToSym(v.Aux)
10148 if v_0.Op != OpAMD64LEAQ {
10149 break
10150 }
10151 off2 := auxIntToInt32(v_0.AuxInt)
10152 sym2 := auxToSym(v_0.Aux)
10153 ptr := v_0.Args[0]
10154 mem := v_1
10155 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10156 break
10157 }
10158 v.reset(OpAMD64MOVBatomicload)
10159 v.AuxInt = int32ToAuxInt(off1 + off2)
10160 v.Aux = symToAux(mergeSym(sym1, sym2))
10161 v.AddArg2(ptr, mem)
10162 return true
10163 }
10164 return false
10165 }
10166 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10167 v_1 := v.Args[1]
10168 v_0 := v.Args[0]
10169
10170
10171
10172 for {
10173 off := auxIntToInt32(v.AuxInt)
10174 sym := auxToSym(v.Aux)
10175 ptr := v_0
10176 if v_1.Op != OpAMD64MOVBstore {
10177 break
10178 }
10179 off2 := auxIntToInt32(v_1.AuxInt)
10180 sym2 := auxToSym(v_1.Aux)
10181 x := v_1.Args[1]
10182 ptr2 := v_1.Args[0]
10183 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10184 break
10185 }
10186 v.reset(OpAMD64MOVBQZX)
10187 v.AddArg(x)
10188 return true
10189 }
10190
10191
10192
10193 for {
10194 off1 := auxIntToInt32(v.AuxInt)
10195 sym := auxToSym(v.Aux)
10196 if v_0.Op != OpAMD64ADDQconst {
10197 break
10198 }
10199 off2 := auxIntToInt32(v_0.AuxInt)
10200 ptr := v_0.Args[0]
10201 mem := v_1
10202 if !(is32Bit(int64(off1) + int64(off2))) {
10203 break
10204 }
10205 v.reset(OpAMD64MOVBload)
10206 v.AuxInt = int32ToAuxInt(off1 + off2)
10207 v.Aux = symToAux(sym)
10208 v.AddArg2(ptr, mem)
10209 return true
10210 }
10211
10212
10213
10214 for {
10215 off1 := auxIntToInt32(v.AuxInt)
10216 sym1 := auxToSym(v.Aux)
10217 if v_0.Op != OpAMD64LEAQ {
10218 break
10219 }
10220 off2 := auxIntToInt32(v_0.AuxInt)
10221 sym2 := auxToSym(v_0.Aux)
10222 base := v_0.Args[0]
10223 mem := v_1
10224 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10225 break
10226 }
10227 v.reset(OpAMD64MOVBload)
10228 v.AuxInt = int32ToAuxInt(off1 + off2)
10229 v.Aux = symToAux(mergeSym(sym1, sym2))
10230 v.AddArg2(base, mem)
10231 return true
10232 }
10233
10234
10235
10236 for {
10237 off := auxIntToInt32(v.AuxInt)
10238 sym := auxToSym(v.Aux)
10239 if v_0.Op != OpSB || !(symIsRO(sym)) {
10240 break
10241 }
10242 v.reset(OpAMD64MOVLconst)
10243 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10244 return true
10245 }
10246 return false
10247 }
10248 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10249 v_2 := v.Args[2]
10250 v_1 := v.Args[1]
10251 v_0 := v.Args[0]
10252
10253
10254
10255 for {
10256 off := auxIntToInt32(v.AuxInt)
10257 sym := auxToSym(v.Aux)
10258 ptr := v_0
10259 y := v_1
10260 if y.Op != OpAMD64SETL {
10261 break
10262 }
10263 x := y.Args[0]
10264 mem := v_2
10265 if !(y.Uses == 1) {
10266 break
10267 }
10268 v.reset(OpAMD64SETLstore)
10269 v.AuxInt = int32ToAuxInt(off)
10270 v.Aux = symToAux(sym)
10271 v.AddArg3(ptr, x, mem)
10272 return true
10273 }
10274
10275
10276
10277 for {
10278 off := auxIntToInt32(v.AuxInt)
10279 sym := auxToSym(v.Aux)
10280 ptr := v_0
10281 y := v_1
10282 if y.Op != OpAMD64SETLE {
10283 break
10284 }
10285 x := y.Args[0]
10286 mem := v_2
10287 if !(y.Uses == 1) {
10288 break
10289 }
10290 v.reset(OpAMD64SETLEstore)
10291 v.AuxInt = int32ToAuxInt(off)
10292 v.Aux = symToAux(sym)
10293 v.AddArg3(ptr, x, mem)
10294 return true
10295 }
10296
10297
10298
10299 for {
10300 off := auxIntToInt32(v.AuxInt)
10301 sym := auxToSym(v.Aux)
10302 ptr := v_0
10303 y := v_1
10304 if y.Op != OpAMD64SETG {
10305 break
10306 }
10307 x := y.Args[0]
10308 mem := v_2
10309 if !(y.Uses == 1) {
10310 break
10311 }
10312 v.reset(OpAMD64SETGstore)
10313 v.AuxInt = int32ToAuxInt(off)
10314 v.Aux = symToAux(sym)
10315 v.AddArg3(ptr, x, mem)
10316 return true
10317 }
10318
10319
10320
10321 for {
10322 off := auxIntToInt32(v.AuxInt)
10323 sym := auxToSym(v.Aux)
10324 ptr := v_0
10325 y := v_1
10326 if y.Op != OpAMD64SETGE {
10327 break
10328 }
10329 x := y.Args[0]
10330 mem := v_2
10331 if !(y.Uses == 1) {
10332 break
10333 }
10334 v.reset(OpAMD64SETGEstore)
10335 v.AuxInt = int32ToAuxInt(off)
10336 v.Aux = symToAux(sym)
10337 v.AddArg3(ptr, x, mem)
10338 return true
10339 }
10340
10341
10342
10343 for {
10344 off := auxIntToInt32(v.AuxInt)
10345 sym := auxToSym(v.Aux)
10346 ptr := v_0
10347 y := v_1
10348 if y.Op != OpAMD64SETEQ {
10349 break
10350 }
10351 x := y.Args[0]
10352 mem := v_2
10353 if !(y.Uses == 1) {
10354 break
10355 }
10356 v.reset(OpAMD64SETEQstore)
10357 v.AuxInt = int32ToAuxInt(off)
10358 v.Aux = symToAux(sym)
10359 v.AddArg3(ptr, x, mem)
10360 return true
10361 }
10362
10363
10364
10365 for {
10366 off := auxIntToInt32(v.AuxInt)
10367 sym := auxToSym(v.Aux)
10368 ptr := v_0
10369 y := v_1
10370 if y.Op != OpAMD64SETNE {
10371 break
10372 }
10373 x := y.Args[0]
10374 mem := v_2
10375 if !(y.Uses == 1) {
10376 break
10377 }
10378 v.reset(OpAMD64SETNEstore)
10379 v.AuxInt = int32ToAuxInt(off)
10380 v.Aux = symToAux(sym)
10381 v.AddArg3(ptr, x, mem)
10382 return true
10383 }
10384
10385
10386
10387 for {
10388 off := auxIntToInt32(v.AuxInt)
10389 sym := auxToSym(v.Aux)
10390 ptr := v_0
10391 y := v_1
10392 if y.Op != OpAMD64SETB {
10393 break
10394 }
10395 x := y.Args[0]
10396 mem := v_2
10397 if !(y.Uses == 1) {
10398 break
10399 }
10400 v.reset(OpAMD64SETBstore)
10401 v.AuxInt = int32ToAuxInt(off)
10402 v.Aux = symToAux(sym)
10403 v.AddArg3(ptr, x, mem)
10404 return true
10405 }
10406
10407
10408
10409 for {
10410 off := auxIntToInt32(v.AuxInt)
10411 sym := auxToSym(v.Aux)
10412 ptr := v_0
10413 y := v_1
10414 if y.Op != OpAMD64SETBE {
10415 break
10416 }
10417 x := y.Args[0]
10418 mem := v_2
10419 if !(y.Uses == 1) {
10420 break
10421 }
10422 v.reset(OpAMD64SETBEstore)
10423 v.AuxInt = int32ToAuxInt(off)
10424 v.Aux = symToAux(sym)
10425 v.AddArg3(ptr, x, mem)
10426 return true
10427 }
10428
10429
10430
10431 for {
10432 off := auxIntToInt32(v.AuxInt)
10433 sym := auxToSym(v.Aux)
10434 ptr := v_0
10435 y := v_1
10436 if y.Op != OpAMD64SETA {
10437 break
10438 }
10439 x := y.Args[0]
10440 mem := v_2
10441 if !(y.Uses == 1) {
10442 break
10443 }
10444 v.reset(OpAMD64SETAstore)
10445 v.AuxInt = int32ToAuxInt(off)
10446 v.Aux = symToAux(sym)
10447 v.AddArg3(ptr, x, mem)
10448 return true
10449 }
10450
10451
10452
10453 for {
10454 off := auxIntToInt32(v.AuxInt)
10455 sym := auxToSym(v.Aux)
10456 ptr := v_0
10457 y := v_1
10458 if y.Op != OpAMD64SETAE {
10459 break
10460 }
10461 x := y.Args[0]
10462 mem := v_2
10463 if !(y.Uses == 1) {
10464 break
10465 }
10466 v.reset(OpAMD64SETAEstore)
10467 v.AuxInt = int32ToAuxInt(off)
10468 v.Aux = symToAux(sym)
10469 v.AddArg3(ptr, x, mem)
10470 return true
10471 }
10472
10473
10474 for {
10475 off := auxIntToInt32(v.AuxInt)
10476 sym := auxToSym(v.Aux)
10477 ptr := v_0
10478 if v_1.Op != OpAMD64MOVBQSX {
10479 break
10480 }
10481 x := v_1.Args[0]
10482 mem := v_2
10483 v.reset(OpAMD64MOVBstore)
10484 v.AuxInt = int32ToAuxInt(off)
10485 v.Aux = symToAux(sym)
10486 v.AddArg3(ptr, x, mem)
10487 return true
10488 }
10489
10490
10491 for {
10492 off := auxIntToInt32(v.AuxInt)
10493 sym := auxToSym(v.Aux)
10494 ptr := v_0
10495 if v_1.Op != OpAMD64MOVBQZX {
10496 break
10497 }
10498 x := v_1.Args[0]
10499 mem := v_2
10500 v.reset(OpAMD64MOVBstore)
10501 v.AuxInt = int32ToAuxInt(off)
10502 v.Aux = symToAux(sym)
10503 v.AddArg3(ptr, x, mem)
10504 return true
10505 }
10506
10507
10508
10509 for {
10510 off1 := auxIntToInt32(v.AuxInt)
10511 sym := auxToSym(v.Aux)
10512 if v_0.Op != OpAMD64ADDQconst {
10513 break
10514 }
10515 off2 := auxIntToInt32(v_0.AuxInt)
10516 ptr := v_0.Args[0]
10517 val := v_1
10518 mem := v_2
10519 if !(is32Bit(int64(off1) + int64(off2))) {
10520 break
10521 }
10522 v.reset(OpAMD64MOVBstore)
10523 v.AuxInt = int32ToAuxInt(off1 + off2)
10524 v.Aux = symToAux(sym)
10525 v.AddArg3(ptr, val, mem)
10526 return true
10527 }
10528
10529
10530 for {
10531 off := auxIntToInt32(v.AuxInt)
10532 sym := auxToSym(v.Aux)
10533 ptr := v_0
10534 if v_1.Op != OpAMD64MOVLconst {
10535 break
10536 }
10537 c := auxIntToInt32(v_1.AuxInt)
10538 mem := v_2
10539 v.reset(OpAMD64MOVBstoreconst)
10540 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10541 v.Aux = symToAux(sym)
10542 v.AddArg2(ptr, mem)
10543 return true
10544 }
10545
10546
10547 for {
10548 off := auxIntToInt32(v.AuxInt)
10549 sym := auxToSym(v.Aux)
10550 ptr := v_0
10551 if v_1.Op != OpAMD64MOVQconst {
10552 break
10553 }
10554 c := auxIntToInt64(v_1.AuxInt)
10555 mem := v_2
10556 v.reset(OpAMD64MOVBstoreconst)
10557 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10558 v.Aux = symToAux(sym)
10559 v.AddArg2(ptr, mem)
10560 return true
10561 }
10562
10563
10564
10565 for {
10566 off1 := auxIntToInt32(v.AuxInt)
10567 sym1 := auxToSym(v.Aux)
10568 if v_0.Op != OpAMD64LEAQ {
10569 break
10570 }
10571 off2 := auxIntToInt32(v_0.AuxInt)
10572 sym2 := auxToSym(v_0.Aux)
10573 base := v_0.Args[0]
10574 val := v_1
10575 mem := v_2
10576 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10577 break
10578 }
10579 v.reset(OpAMD64MOVBstore)
10580 v.AuxInt = int32ToAuxInt(off1 + off2)
10581 v.Aux = symToAux(mergeSym(sym1, sym2))
10582 v.AddArg3(base, val, mem)
10583 return true
10584 }
10585 return false
10586 }
10587 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10588 v_1 := v.Args[1]
10589 v_0 := v.Args[0]
10590
10591
10592
10593 for {
10594 sc := auxIntToValAndOff(v.AuxInt)
10595 s := auxToSym(v.Aux)
10596 if v_0.Op != OpAMD64ADDQconst {
10597 break
10598 }
10599 off := auxIntToInt32(v_0.AuxInt)
10600 ptr := v_0.Args[0]
10601 mem := v_1
10602 if !(ValAndOff(sc).canAdd32(off)) {
10603 break
10604 }
10605 v.reset(OpAMD64MOVBstoreconst)
10606 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10607 v.Aux = symToAux(s)
10608 v.AddArg2(ptr, mem)
10609 return true
10610 }
10611
10612
10613
10614 for {
10615 sc := auxIntToValAndOff(v.AuxInt)
10616 sym1 := auxToSym(v.Aux)
10617 if v_0.Op != OpAMD64LEAQ {
10618 break
10619 }
10620 off := auxIntToInt32(v_0.AuxInt)
10621 sym2 := auxToSym(v_0.Aux)
10622 ptr := v_0.Args[0]
10623 mem := v_1
10624 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10625 break
10626 }
10627 v.reset(OpAMD64MOVBstoreconst)
10628 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10629 v.Aux = symToAux(mergeSym(sym1, sym2))
10630 v.AddArg2(ptr, mem)
10631 return true
10632 }
10633 return false
10634 }
10635 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10636 v_0 := v.Args[0]
10637 b := v.Block
10638
10639
10640
10641 for {
10642 x := v_0
10643 if x.Op != OpAMD64MOVLload {
10644 break
10645 }
10646 off := auxIntToInt32(x.AuxInt)
10647 sym := auxToSym(x.Aux)
10648 mem := x.Args[1]
10649 ptr := x.Args[0]
10650 if !(x.Uses == 1 && clobber(x)) {
10651 break
10652 }
10653 b = x.Block
10654 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10655 v.copyOf(v0)
10656 v0.AuxInt = int32ToAuxInt(off)
10657 v0.Aux = symToAux(sym)
10658 v0.AddArg2(ptr, mem)
10659 return true
10660 }
10661
10662
10663
10664 for {
10665 x := v_0
10666 if x.Op != OpAMD64MOVQload {
10667 break
10668 }
10669 off := auxIntToInt32(x.AuxInt)
10670 sym := auxToSym(x.Aux)
10671 mem := x.Args[1]
10672 ptr := x.Args[0]
10673 if !(x.Uses == 1 && clobber(x)) {
10674 break
10675 }
10676 b = x.Block
10677 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10678 v.copyOf(v0)
10679 v0.AuxInt = int32ToAuxInt(off)
10680 v0.Aux = symToAux(sym)
10681 v0.AddArg2(ptr, mem)
10682 return true
10683 }
10684
10685
10686
10687 for {
10688 if v_0.Op != OpAMD64ANDLconst {
10689 break
10690 }
10691 c := auxIntToInt32(v_0.AuxInt)
10692 x := v_0.Args[0]
10693 if !(uint32(c)&0x80000000 == 0) {
10694 break
10695 }
10696 v.reset(OpAMD64ANDLconst)
10697 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10698 v.AddArg(x)
10699 return true
10700 }
10701
10702
10703 for {
10704 if v_0.Op != OpAMD64MOVLQSX {
10705 break
10706 }
10707 x := v_0.Args[0]
10708 v.reset(OpAMD64MOVLQSX)
10709 v.AddArg(x)
10710 return true
10711 }
10712
10713
10714 for {
10715 if v_0.Op != OpAMD64MOVWQSX {
10716 break
10717 }
10718 x := v_0.Args[0]
10719 v.reset(OpAMD64MOVWQSX)
10720 v.AddArg(x)
10721 return true
10722 }
10723
10724
10725 for {
10726 if v_0.Op != OpAMD64MOVBQSX {
10727 break
10728 }
10729 x := v_0.Args[0]
10730 v.reset(OpAMD64MOVBQSX)
10731 v.AddArg(x)
10732 return true
10733 }
10734 return false
10735 }
10736 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10737 v_1 := v.Args[1]
10738 v_0 := v.Args[0]
10739 b := v.Block
10740 config := b.Func.Config
10741
10742
10743
10744 for {
10745 off := auxIntToInt32(v.AuxInt)
10746 sym := auxToSym(v.Aux)
10747 ptr := v_0
10748 if v_1.Op != OpAMD64MOVLstore {
10749 break
10750 }
10751 off2 := auxIntToInt32(v_1.AuxInt)
10752 sym2 := auxToSym(v_1.Aux)
10753 x := v_1.Args[1]
10754 ptr2 := v_1.Args[0]
10755 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10756 break
10757 }
10758 v.reset(OpAMD64MOVLQSX)
10759 v.AddArg(x)
10760 return true
10761 }
10762
10763
10764
10765 for {
10766 off1 := auxIntToInt32(v.AuxInt)
10767 sym1 := auxToSym(v.Aux)
10768 if v_0.Op != OpAMD64LEAQ {
10769 break
10770 }
10771 off2 := auxIntToInt32(v_0.AuxInt)
10772 sym2 := auxToSym(v_0.Aux)
10773 base := v_0.Args[0]
10774 mem := v_1
10775 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10776 break
10777 }
10778 v.reset(OpAMD64MOVLQSXload)
10779 v.AuxInt = int32ToAuxInt(off1 + off2)
10780 v.Aux = symToAux(mergeSym(sym1, sym2))
10781 v.AddArg2(base, mem)
10782 return true
10783 }
10784
10785
10786
10787 for {
10788 off := auxIntToInt32(v.AuxInt)
10789 sym := auxToSym(v.Aux)
10790 if v_0.Op != OpSB || !(symIsRO(sym)) {
10791 break
10792 }
10793 v.reset(OpAMD64MOVQconst)
10794 v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
10795 return true
10796 }
10797 return false
10798 }
10799 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10800 v_0 := v.Args[0]
10801 b := v.Block
10802
10803
10804
10805 for {
10806 x := v_0
10807 if x.Op != OpAMD64MOVLload {
10808 break
10809 }
10810 off := auxIntToInt32(x.AuxInt)
10811 sym := auxToSym(x.Aux)
10812 mem := x.Args[1]
10813 ptr := x.Args[0]
10814 if !(x.Uses == 1 && clobber(x)) {
10815 break
10816 }
10817 b = x.Block
10818 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10819 v.copyOf(v0)
10820 v0.AuxInt = int32ToAuxInt(off)
10821 v0.Aux = symToAux(sym)
10822 v0.AddArg2(ptr, mem)
10823 return true
10824 }
10825
10826
10827
10828 for {
10829 x := v_0
10830 if x.Op != OpAMD64MOVQload {
10831 break
10832 }
10833 off := auxIntToInt32(x.AuxInt)
10834 sym := auxToSym(x.Aux)
10835 mem := x.Args[1]
10836 ptr := x.Args[0]
10837 if !(x.Uses == 1 && clobber(x)) {
10838 break
10839 }
10840 b = x.Block
10841 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10842 v.copyOf(v0)
10843 v0.AuxInt = int32ToAuxInt(off)
10844 v0.Aux = symToAux(sym)
10845 v0.AddArg2(ptr, mem)
10846 return true
10847 }
10848
10849
10850 for {
10851 if v_0.Op != OpAMD64ANDLconst {
10852 break
10853 }
10854 c := auxIntToInt32(v_0.AuxInt)
10855 x := v_0.Args[0]
10856 v.reset(OpAMD64ANDLconst)
10857 v.AuxInt = int32ToAuxInt(c)
10858 v.AddArg(x)
10859 return true
10860 }
10861
10862
10863 for {
10864 if v_0.Op != OpAMD64MOVLQZX {
10865 break
10866 }
10867 x := v_0.Args[0]
10868 v.reset(OpAMD64MOVLQZX)
10869 v.AddArg(x)
10870 return true
10871 }
10872
10873
10874 for {
10875 if v_0.Op != OpAMD64MOVWQZX {
10876 break
10877 }
10878 x := v_0.Args[0]
10879 v.reset(OpAMD64MOVWQZX)
10880 v.AddArg(x)
10881 return true
10882 }
10883
10884
10885 for {
10886 if v_0.Op != OpAMD64MOVBQZX {
10887 break
10888 }
10889 x := v_0.Args[0]
10890 v.reset(OpAMD64MOVBQZX)
10891 v.AddArg(x)
10892 return true
10893 }
10894 return false
10895 }
10896 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10897 v_1 := v.Args[1]
10898 v_0 := v.Args[0]
10899
10900
10901
10902 for {
10903 off1 := auxIntToInt32(v.AuxInt)
10904 sym := auxToSym(v.Aux)
10905 if v_0.Op != OpAMD64ADDQconst {
10906 break
10907 }
10908 off2 := auxIntToInt32(v_0.AuxInt)
10909 ptr := v_0.Args[0]
10910 mem := v_1
10911 if !(is32Bit(int64(off1) + int64(off2))) {
10912 break
10913 }
10914 v.reset(OpAMD64MOVLatomicload)
10915 v.AuxInt = int32ToAuxInt(off1 + off2)
10916 v.Aux = symToAux(sym)
10917 v.AddArg2(ptr, mem)
10918 return true
10919 }
10920
10921
10922
10923 for {
10924 off1 := auxIntToInt32(v.AuxInt)
10925 sym1 := auxToSym(v.Aux)
10926 if v_0.Op != OpAMD64LEAQ {
10927 break
10928 }
10929 off2 := auxIntToInt32(v_0.AuxInt)
10930 sym2 := auxToSym(v_0.Aux)
10931 ptr := v_0.Args[0]
10932 mem := v_1
10933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10934 break
10935 }
10936 v.reset(OpAMD64MOVLatomicload)
10937 v.AuxInt = int32ToAuxInt(off1 + off2)
10938 v.Aux = symToAux(mergeSym(sym1, sym2))
10939 v.AddArg2(ptr, mem)
10940 return true
10941 }
10942 return false
10943 }
10944 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10945 v_0 := v.Args[0]
10946 b := v.Block
10947
10948
10949
10950 for {
10951 t := v.Type
10952 if v_0.Op != OpArg {
10953 break
10954 }
10955 u := v_0.Type
10956 off := auxIntToInt32(v_0.AuxInt)
10957 sym := auxToSym(v_0.Aux)
10958 if !(t.Size() == u.Size()) {
10959 break
10960 }
10961 b = b.Func.Entry
10962 v0 := b.NewValue0(v.Pos, OpArg, t)
10963 v.copyOf(v0)
10964 v0.AuxInt = int32ToAuxInt(off)
10965 v0.Aux = symToAux(sym)
10966 return true
10967 }
10968 return false
10969 }
10970 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10971 v_0 := v.Args[0]
10972 b := v.Block
10973
10974
10975
10976 for {
10977 t := v.Type
10978 if v_0.Op != OpArg {
10979 break
10980 }
10981 u := v_0.Type
10982 off := auxIntToInt32(v_0.AuxInt)
10983 sym := auxToSym(v_0.Aux)
10984 if !(t.Size() == u.Size()) {
10985 break
10986 }
10987 b = b.Func.Entry
10988 v0 := b.NewValue0(v.Pos, OpArg, t)
10989 v.copyOf(v0)
10990 v0.AuxInt = int32ToAuxInt(off)
10991 v0.Aux = symToAux(sym)
10992 return true
10993 }
10994 return false
10995 }
10996 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10997 v_1 := v.Args[1]
10998 v_0 := v.Args[0]
10999 b := v.Block
11000 config := b.Func.Config
11001
11002
11003
11004 for {
11005 off := auxIntToInt32(v.AuxInt)
11006 sym := auxToSym(v.Aux)
11007 ptr := v_0
11008 if v_1.Op != OpAMD64MOVLstore {
11009 break
11010 }
11011 off2 := auxIntToInt32(v_1.AuxInt)
11012 sym2 := auxToSym(v_1.Aux)
11013 x := v_1.Args[1]
11014 ptr2 := v_1.Args[0]
11015 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11016 break
11017 }
11018 v.reset(OpAMD64MOVLQZX)
11019 v.AddArg(x)
11020 return true
11021 }
11022
11023
11024
11025 for {
11026 off1 := auxIntToInt32(v.AuxInt)
11027 sym := auxToSym(v.Aux)
11028 if v_0.Op != OpAMD64ADDQconst {
11029 break
11030 }
11031 off2 := auxIntToInt32(v_0.AuxInt)
11032 ptr := v_0.Args[0]
11033 mem := v_1
11034 if !(is32Bit(int64(off1) + int64(off2))) {
11035 break
11036 }
11037 v.reset(OpAMD64MOVLload)
11038 v.AuxInt = int32ToAuxInt(off1 + off2)
11039 v.Aux = symToAux(sym)
11040 v.AddArg2(ptr, mem)
11041 return true
11042 }
11043
11044
11045
11046 for {
11047 off1 := auxIntToInt32(v.AuxInt)
11048 sym1 := auxToSym(v.Aux)
11049 if v_0.Op != OpAMD64LEAQ {
11050 break
11051 }
11052 off2 := auxIntToInt32(v_0.AuxInt)
11053 sym2 := auxToSym(v_0.Aux)
11054 base := v_0.Args[0]
11055 mem := v_1
11056 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11057 break
11058 }
11059 v.reset(OpAMD64MOVLload)
11060 v.AuxInt = int32ToAuxInt(off1 + off2)
11061 v.Aux = symToAux(mergeSym(sym1, sym2))
11062 v.AddArg2(base, mem)
11063 return true
11064 }
11065
11066
11067 for {
11068 off := auxIntToInt32(v.AuxInt)
11069 sym := auxToSym(v.Aux)
11070 ptr := v_0
11071 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11072 break
11073 }
11074 val := v_1.Args[1]
11075 if ptr != v_1.Args[0] {
11076 break
11077 }
11078 v.reset(OpAMD64MOVLf2i)
11079 v.AddArg(val)
11080 return true
11081 }
11082
11083
11084
11085 for {
11086 off := auxIntToInt32(v.AuxInt)
11087 sym := auxToSym(v.Aux)
11088 if v_0.Op != OpSB || !(symIsRO(sym)) {
11089 break
11090 }
11091 v.reset(OpAMD64MOVLconst)
11092 v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11093 return true
11094 }
11095 return false
11096 }
11097 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
11098 v_2 := v.Args[2]
11099 v_1 := v.Args[1]
11100 v_0 := v.Args[0]
11101
11102
11103 for {
11104 off := auxIntToInt32(v.AuxInt)
11105 sym := auxToSym(v.Aux)
11106 ptr := v_0
11107 if v_1.Op != OpAMD64MOVLQSX {
11108 break
11109 }
11110 x := v_1.Args[0]
11111 mem := v_2
11112 v.reset(OpAMD64MOVLstore)
11113 v.AuxInt = int32ToAuxInt(off)
11114 v.Aux = symToAux(sym)
11115 v.AddArg3(ptr, x, mem)
11116 return true
11117 }
11118
11119
11120 for {
11121 off := auxIntToInt32(v.AuxInt)
11122 sym := auxToSym(v.Aux)
11123 ptr := v_0
11124 if v_1.Op != OpAMD64MOVLQZX {
11125 break
11126 }
11127 x := v_1.Args[0]
11128 mem := v_2
11129 v.reset(OpAMD64MOVLstore)
11130 v.AuxInt = int32ToAuxInt(off)
11131 v.Aux = symToAux(sym)
11132 v.AddArg3(ptr, x, mem)
11133 return true
11134 }
11135
11136
11137
11138 for {
11139 off1 := auxIntToInt32(v.AuxInt)
11140 sym := auxToSym(v.Aux)
11141 if v_0.Op != OpAMD64ADDQconst {
11142 break
11143 }
11144 off2 := auxIntToInt32(v_0.AuxInt)
11145 ptr := v_0.Args[0]
11146 val := v_1
11147 mem := v_2
11148 if !(is32Bit(int64(off1) + int64(off2))) {
11149 break
11150 }
11151 v.reset(OpAMD64MOVLstore)
11152 v.AuxInt = int32ToAuxInt(off1 + off2)
11153 v.Aux = symToAux(sym)
11154 v.AddArg3(ptr, val, mem)
11155 return true
11156 }
11157
11158
11159 for {
11160 off := auxIntToInt32(v.AuxInt)
11161 sym := auxToSym(v.Aux)
11162 ptr := v_0
11163 if v_1.Op != OpAMD64MOVLconst {
11164 break
11165 }
11166 c := auxIntToInt32(v_1.AuxInt)
11167 mem := v_2
11168 v.reset(OpAMD64MOVLstoreconst)
11169 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11170 v.Aux = symToAux(sym)
11171 v.AddArg2(ptr, mem)
11172 return true
11173 }
11174
11175
11176 for {
11177 off := auxIntToInt32(v.AuxInt)
11178 sym := auxToSym(v.Aux)
11179 ptr := v_0
11180 if v_1.Op != OpAMD64MOVQconst {
11181 break
11182 }
11183 c := auxIntToInt64(v_1.AuxInt)
11184 mem := v_2
11185 v.reset(OpAMD64MOVLstoreconst)
11186 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11187 v.Aux = symToAux(sym)
11188 v.AddArg2(ptr, mem)
11189 return true
11190 }
11191
11192
11193
11194 for {
11195 off1 := auxIntToInt32(v.AuxInt)
11196 sym1 := auxToSym(v.Aux)
11197 if v_0.Op != OpAMD64LEAQ {
11198 break
11199 }
11200 off2 := auxIntToInt32(v_0.AuxInt)
11201 sym2 := auxToSym(v_0.Aux)
11202 base := v_0.Args[0]
11203 val := v_1
11204 mem := v_2
11205 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11206 break
11207 }
11208 v.reset(OpAMD64MOVLstore)
11209 v.AuxInt = int32ToAuxInt(off1 + off2)
11210 v.Aux = symToAux(mergeSym(sym1, sym2))
11211 v.AddArg3(base, val, mem)
11212 return true
11213 }
11214
11215
11216
11217 for {
11218 off := auxIntToInt32(v.AuxInt)
11219 sym := auxToSym(v.Aux)
11220 ptr := v_0
11221 y := v_1
11222 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11223 break
11224 }
11225 mem := y.Args[2]
11226 x := y.Args[0]
11227 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11228 break
11229 }
11230 v.reset(OpAMD64ADDLmodify)
11231 v.AuxInt = int32ToAuxInt(off)
11232 v.Aux = symToAux(sym)
11233 v.AddArg3(ptr, x, mem)
11234 return true
11235 }
11236
11237
11238
11239 for {
11240 off := auxIntToInt32(v.AuxInt)
11241 sym := auxToSym(v.Aux)
11242 ptr := v_0
11243 y := v_1
11244 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11245 break
11246 }
11247 mem := y.Args[2]
11248 x := y.Args[0]
11249 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11250 break
11251 }
11252 v.reset(OpAMD64ANDLmodify)
11253 v.AuxInt = int32ToAuxInt(off)
11254 v.Aux = symToAux(sym)
11255 v.AddArg3(ptr, x, mem)
11256 return true
11257 }
11258
11259
11260
11261 for {
11262 off := auxIntToInt32(v.AuxInt)
11263 sym := auxToSym(v.Aux)
11264 ptr := v_0
11265 y := v_1
11266 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11267 break
11268 }
11269 mem := y.Args[2]
11270 x := y.Args[0]
11271 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11272 break
11273 }
11274 v.reset(OpAMD64ORLmodify)
11275 v.AuxInt = int32ToAuxInt(off)
11276 v.Aux = symToAux(sym)
11277 v.AddArg3(ptr, x, mem)
11278 return true
11279 }
11280
11281
11282
11283 for {
11284 off := auxIntToInt32(v.AuxInt)
11285 sym := auxToSym(v.Aux)
11286 ptr := v_0
11287 y := v_1
11288 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11289 break
11290 }
11291 mem := y.Args[2]
11292 x := y.Args[0]
11293 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11294 break
11295 }
11296 v.reset(OpAMD64XORLmodify)
11297 v.AuxInt = int32ToAuxInt(off)
11298 v.Aux = symToAux(sym)
11299 v.AddArg3(ptr, x, mem)
11300 return true
11301 }
11302
11303
11304
11305 for {
11306 off := auxIntToInt32(v.AuxInt)
11307 sym := auxToSym(v.Aux)
11308 ptr := v_0
11309 y := v_1
11310 if y.Op != OpAMD64ADDL {
11311 break
11312 }
11313 _ = y.Args[1]
11314 y_0 := y.Args[0]
11315 y_1 := y.Args[1]
11316 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11317 l := y_0
11318 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11319 continue
11320 }
11321 mem := l.Args[1]
11322 if ptr != l.Args[0] {
11323 continue
11324 }
11325 x := y_1
11326 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11327 continue
11328 }
11329 v.reset(OpAMD64ADDLmodify)
11330 v.AuxInt = int32ToAuxInt(off)
11331 v.Aux = symToAux(sym)
11332 v.AddArg3(ptr, x, mem)
11333 return true
11334 }
11335 break
11336 }
11337
11338
11339
11340 for {
11341 off := auxIntToInt32(v.AuxInt)
11342 sym := auxToSym(v.Aux)
11343 ptr := v_0
11344 y := v_1
11345 if y.Op != OpAMD64SUBL {
11346 break
11347 }
11348 x := y.Args[1]
11349 l := y.Args[0]
11350 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11351 break
11352 }
11353 mem := l.Args[1]
11354 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11355 break
11356 }
11357 v.reset(OpAMD64SUBLmodify)
11358 v.AuxInt = int32ToAuxInt(off)
11359 v.Aux = symToAux(sym)
11360 v.AddArg3(ptr, x, mem)
11361 return true
11362 }
11363
11364
11365
11366 for {
11367 off := auxIntToInt32(v.AuxInt)
11368 sym := auxToSym(v.Aux)
11369 ptr := v_0
11370 y := v_1
11371 if y.Op != OpAMD64ANDL {
11372 break
11373 }
11374 _ = y.Args[1]
11375 y_0 := y.Args[0]
11376 y_1 := y.Args[1]
11377 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11378 l := y_0
11379 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11380 continue
11381 }
11382 mem := l.Args[1]
11383 if ptr != l.Args[0] {
11384 continue
11385 }
11386 x := y_1
11387 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11388 continue
11389 }
11390 v.reset(OpAMD64ANDLmodify)
11391 v.AuxInt = int32ToAuxInt(off)
11392 v.Aux = symToAux(sym)
11393 v.AddArg3(ptr, x, mem)
11394 return true
11395 }
11396 break
11397 }
11398
11399
11400
11401 for {
11402 off := auxIntToInt32(v.AuxInt)
11403 sym := auxToSym(v.Aux)
11404 ptr := v_0
11405 y := v_1
11406 if y.Op != OpAMD64ORL {
11407 break
11408 }
11409 _ = y.Args[1]
11410 y_0 := y.Args[0]
11411 y_1 := y.Args[1]
11412 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11413 l := y_0
11414 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11415 continue
11416 }
11417 mem := l.Args[1]
11418 if ptr != l.Args[0] {
11419 continue
11420 }
11421 x := y_1
11422 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11423 continue
11424 }
11425 v.reset(OpAMD64ORLmodify)
11426 v.AuxInt = int32ToAuxInt(off)
11427 v.Aux = symToAux(sym)
11428 v.AddArg3(ptr, x, mem)
11429 return true
11430 }
11431 break
11432 }
11433
11434
11435
11436 for {
11437 off := auxIntToInt32(v.AuxInt)
11438 sym := auxToSym(v.Aux)
11439 ptr := v_0
11440 y := v_1
11441 if y.Op != OpAMD64XORL {
11442 break
11443 }
11444 _ = y.Args[1]
11445 y_0 := y.Args[0]
11446 y_1 := y.Args[1]
11447 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11448 l := y_0
11449 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11450 continue
11451 }
11452 mem := l.Args[1]
11453 if ptr != l.Args[0] {
11454 continue
11455 }
11456 x := y_1
11457 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11458 continue
11459 }
11460 v.reset(OpAMD64XORLmodify)
11461 v.AuxInt = int32ToAuxInt(off)
11462 v.Aux = symToAux(sym)
11463 v.AddArg3(ptr, x, mem)
11464 return true
11465 }
11466 break
11467 }
11468
11469
11470
11471 for {
11472 off := auxIntToInt32(v.AuxInt)
11473 sym := auxToSym(v.Aux)
11474 ptr := v_0
11475 a := v_1
11476 if a.Op != OpAMD64ADDLconst {
11477 break
11478 }
11479 c := auxIntToInt32(a.AuxInt)
11480 l := a.Args[0]
11481 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11482 break
11483 }
11484 mem := l.Args[1]
11485 ptr2 := l.Args[0]
11486 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11487 break
11488 }
11489 v.reset(OpAMD64ADDLconstmodify)
11490 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11491 v.Aux = symToAux(sym)
11492 v.AddArg2(ptr, mem)
11493 return true
11494 }
11495
11496
11497
11498 for {
11499 off := auxIntToInt32(v.AuxInt)
11500 sym := auxToSym(v.Aux)
11501 ptr := v_0
11502 a := v_1
11503 if a.Op != OpAMD64ANDLconst {
11504 break
11505 }
11506 c := auxIntToInt32(a.AuxInt)
11507 l := a.Args[0]
11508 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11509 break
11510 }
11511 mem := l.Args[1]
11512 ptr2 := l.Args[0]
11513 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11514 break
11515 }
11516 v.reset(OpAMD64ANDLconstmodify)
11517 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11518 v.Aux = symToAux(sym)
11519 v.AddArg2(ptr, mem)
11520 return true
11521 }
11522
11523
11524
11525 for {
11526 off := auxIntToInt32(v.AuxInt)
11527 sym := auxToSym(v.Aux)
11528 ptr := v_0
11529 a := v_1
11530 if a.Op != OpAMD64ORLconst {
11531 break
11532 }
11533 c := auxIntToInt32(a.AuxInt)
11534 l := a.Args[0]
11535 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11536 break
11537 }
11538 mem := l.Args[1]
11539 ptr2 := l.Args[0]
11540 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11541 break
11542 }
11543 v.reset(OpAMD64ORLconstmodify)
11544 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11545 v.Aux = symToAux(sym)
11546 v.AddArg2(ptr, mem)
11547 return true
11548 }
11549
11550
11551
11552 for {
11553 off := auxIntToInt32(v.AuxInt)
11554 sym := auxToSym(v.Aux)
11555 ptr := v_0
11556 a := v_1
11557 if a.Op != OpAMD64XORLconst {
11558 break
11559 }
11560 c := auxIntToInt32(a.AuxInt)
11561 l := a.Args[0]
11562 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11563 break
11564 }
11565 mem := l.Args[1]
11566 ptr2 := l.Args[0]
11567 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11568 break
11569 }
11570 v.reset(OpAMD64XORLconstmodify)
11571 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11572 v.Aux = symToAux(sym)
11573 v.AddArg2(ptr, mem)
11574 return true
11575 }
11576
11577
11578 for {
11579 off := auxIntToInt32(v.AuxInt)
11580 sym := auxToSym(v.Aux)
11581 ptr := v_0
11582 if v_1.Op != OpAMD64MOVLf2i {
11583 break
11584 }
11585 val := v_1.Args[0]
11586 mem := v_2
11587 v.reset(OpAMD64MOVSSstore)
11588 v.AuxInt = int32ToAuxInt(off)
11589 v.Aux = symToAux(sym)
11590 v.AddArg3(ptr, val, mem)
11591 return true
11592 }
11593
11594
11595
11596 for {
11597 i := auxIntToInt32(v.AuxInt)
11598 s := auxToSym(v.Aux)
11599 p := v_0
11600 x := v_1
11601 if x.Op != OpAMD64BSWAPL {
11602 break
11603 }
11604 w := x.Args[0]
11605 mem := v_2
11606 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11607 break
11608 }
11609 v.reset(OpAMD64MOVBELstore)
11610 v.AuxInt = int32ToAuxInt(i)
11611 v.Aux = symToAux(s)
11612 v.AddArg3(p, w, mem)
11613 return true
11614 }
11615 return false
11616 }
11617 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11618 v_1 := v.Args[1]
11619 v_0 := v.Args[0]
11620
11621
11622
11623 for {
11624 sc := auxIntToValAndOff(v.AuxInt)
11625 s := auxToSym(v.Aux)
11626 if v_0.Op != OpAMD64ADDQconst {
11627 break
11628 }
11629 off := auxIntToInt32(v_0.AuxInt)
11630 ptr := v_0.Args[0]
11631 mem := v_1
11632 if !(ValAndOff(sc).canAdd32(off)) {
11633 break
11634 }
11635 v.reset(OpAMD64MOVLstoreconst)
11636 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11637 v.Aux = symToAux(s)
11638 v.AddArg2(ptr, mem)
11639 return true
11640 }
11641
11642
11643
11644 for {
11645 sc := auxIntToValAndOff(v.AuxInt)
11646 sym1 := auxToSym(v.Aux)
11647 if v_0.Op != OpAMD64LEAQ {
11648 break
11649 }
11650 off := auxIntToInt32(v_0.AuxInt)
11651 sym2 := auxToSym(v_0.Aux)
11652 ptr := v_0.Args[0]
11653 mem := v_1
11654 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11655 break
11656 }
11657 v.reset(OpAMD64MOVLstoreconst)
11658 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11659 v.Aux = symToAux(mergeSym(sym1, sym2))
11660 v.AddArg2(ptr, mem)
11661 return true
11662 }
11663 return false
11664 }
11665 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11666 v_1 := v.Args[1]
11667 v_0 := v.Args[0]
11668
11669
11670
11671 for {
11672 off1 := auxIntToInt32(v.AuxInt)
11673 sym := auxToSym(v.Aux)
11674 if v_0.Op != OpAMD64ADDQconst {
11675 break
11676 }
11677 off2 := auxIntToInt32(v_0.AuxInt)
11678 ptr := v_0.Args[0]
11679 mem := v_1
11680 if !(is32Bit(int64(off1) + int64(off2))) {
11681 break
11682 }
11683 v.reset(OpAMD64MOVOload)
11684 v.AuxInt = int32ToAuxInt(off1 + off2)
11685 v.Aux = symToAux(sym)
11686 v.AddArg2(ptr, mem)
11687 return true
11688 }
11689
11690
11691
11692 for {
11693 off1 := auxIntToInt32(v.AuxInt)
11694 sym1 := auxToSym(v.Aux)
11695 if v_0.Op != OpAMD64LEAQ {
11696 break
11697 }
11698 off2 := auxIntToInt32(v_0.AuxInt)
11699 sym2 := auxToSym(v_0.Aux)
11700 base := v_0.Args[0]
11701 mem := v_1
11702 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11703 break
11704 }
11705 v.reset(OpAMD64MOVOload)
11706 v.AuxInt = int32ToAuxInt(off1 + off2)
11707 v.Aux = symToAux(mergeSym(sym1, sym2))
11708 v.AddArg2(base, mem)
11709 return true
11710 }
11711 return false
11712 }
11713 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11714 v_2 := v.Args[2]
11715 v_1 := v.Args[1]
11716 v_0 := v.Args[0]
11717 b := v.Block
11718 config := b.Func.Config
11719 typ := &b.Func.Config.Types
11720
11721
11722
11723 for {
11724 off1 := auxIntToInt32(v.AuxInt)
11725 sym := auxToSym(v.Aux)
11726 if v_0.Op != OpAMD64ADDQconst {
11727 break
11728 }
11729 off2 := auxIntToInt32(v_0.AuxInt)
11730 ptr := v_0.Args[0]
11731 val := v_1
11732 mem := v_2
11733 if !(is32Bit(int64(off1) + int64(off2))) {
11734 break
11735 }
11736 v.reset(OpAMD64MOVOstore)
11737 v.AuxInt = int32ToAuxInt(off1 + off2)
11738 v.Aux = symToAux(sym)
11739 v.AddArg3(ptr, val, mem)
11740 return true
11741 }
11742
11743
11744
11745 for {
11746 off1 := auxIntToInt32(v.AuxInt)
11747 sym1 := auxToSym(v.Aux)
11748 if v_0.Op != OpAMD64LEAQ {
11749 break
11750 }
11751 off2 := auxIntToInt32(v_0.AuxInt)
11752 sym2 := auxToSym(v_0.Aux)
11753 base := v_0.Args[0]
11754 val := v_1
11755 mem := v_2
11756 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11757 break
11758 }
11759 v.reset(OpAMD64MOVOstore)
11760 v.AuxInt = int32ToAuxInt(off1 + off2)
11761 v.Aux = symToAux(mergeSym(sym1, sym2))
11762 v.AddArg3(base, val, mem)
11763 return true
11764 }
11765
11766
11767
11768 for {
11769 dstOff := auxIntToInt32(v.AuxInt)
11770 dstSym := auxToSym(v.Aux)
11771 ptr := v_0
11772 if v_1.Op != OpAMD64MOVOload {
11773 break
11774 }
11775 srcOff := auxIntToInt32(v_1.AuxInt)
11776 srcSym := auxToSym(v_1.Aux)
11777 v_1_0 := v_1.Args[0]
11778 if v_1_0.Op != OpSB {
11779 break
11780 }
11781 mem := v_2
11782 if !(symIsRO(srcSym)) {
11783 break
11784 }
11785 v.reset(OpAMD64MOVQstore)
11786 v.AuxInt = int32ToAuxInt(dstOff + 8)
11787 v.Aux = symToAux(dstSym)
11788 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11789 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11790 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11791 v1.AuxInt = int32ToAuxInt(dstOff)
11792 v1.Aux = symToAux(dstSym)
11793 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11794 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11795 v1.AddArg3(ptr, v2, mem)
11796 v.AddArg3(ptr, v0, v1)
11797 return true
11798 }
11799 return false
11800 }
11801 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11802 v_1 := v.Args[1]
11803 v_0 := v.Args[0]
11804
11805
11806
11807 for {
11808 sc := auxIntToValAndOff(v.AuxInt)
11809 s := auxToSym(v.Aux)
11810 if v_0.Op != OpAMD64ADDQconst {
11811 break
11812 }
11813 off := auxIntToInt32(v_0.AuxInt)
11814 ptr := v_0.Args[0]
11815 mem := v_1
11816 if !(ValAndOff(sc).canAdd32(off)) {
11817 break
11818 }
11819 v.reset(OpAMD64MOVOstoreconst)
11820 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11821 v.Aux = symToAux(s)
11822 v.AddArg2(ptr, mem)
11823 return true
11824 }
11825
11826
11827
11828 for {
11829 sc := auxIntToValAndOff(v.AuxInt)
11830 sym1 := auxToSym(v.Aux)
11831 if v_0.Op != OpAMD64LEAQ {
11832 break
11833 }
11834 off := auxIntToInt32(v_0.AuxInt)
11835 sym2 := auxToSym(v_0.Aux)
11836 ptr := v_0.Args[0]
11837 mem := v_1
11838 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11839 break
11840 }
11841 v.reset(OpAMD64MOVOstoreconst)
11842 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11843 v.Aux = symToAux(mergeSym(sym1, sym2))
11844 v.AddArg2(ptr, mem)
11845 return true
11846 }
11847 return false
11848 }
11849 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11850 v_1 := v.Args[1]
11851 v_0 := v.Args[0]
11852
11853
11854
11855 for {
11856 off1 := auxIntToInt32(v.AuxInt)
11857 sym := auxToSym(v.Aux)
11858 if v_0.Op != OpAMD64ADDQconst {
11859 break
11860 }
11861 off2 := auxIntToInt32(v_0.AuxInt)
11862 ptr := v_0.Args[0]
11863 mem := v_1
11864 if !(is32Bit(int64(off1) + int64(off2))) {
11865 break
11866 }
11867 v.reset(OpAMD64MOVQatomicload)
11868 v.AuxInt = int32ToAuxInt(off1 + off2)
11869 v.Aux = symToAux(sym)
11870 v.AddArg2(ptr, mem)
11871 return true
11872 }
11873
11874
11875
11876 for {
11877 off1 := auxIntToInt32(v.AuxInt)
11878 sym1 := auxToSym(v.Aux)
11879 if v_0.Op != OpAMD64LEAQ {
11880 break
11881 }
11882 off2 := auxIntToInt32(v_0.AuxInt)
11883 sym2 := auxToSym(v_0.Aux)
11884 ptr := v_0.Args[0]
11885 mem := v_1
11886 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11887 break
11888 }
11889 v.reset(OpAMD64MOVQatomicload)
11890 v.AuxInt = int32ToAuxInt(off1 + off2)
11891 v.Aux = symToAux(mergeSym(sym1, sym2))
11892 v.AddArg2(ptr, mem)
11893 return true
11894 }
11895 return false
11896 }
11897 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11898 v_0 := v.Args[0]
11899 b := v.Block
11900
11901
11902
11903 for {
11904 t := v.Type
11905 if v_0.Op != OpArg {
11906 break
11907 }
11908 u := v_0.Type
11909 off := auxIntToInt32(v_0.AuxInt)
11910 sym := auxToSym(v_0.Aux)
11911 if !(t.Size() == u.Size()) {
11912 break
11913 }
11914 b = b.Func.Entry
11915 v0 := b.NewValue0(v.Pos, OpArg, t)
11916 v.copyOf(v0)
11917 v0.AuxInt = int32ToAuxInt(off)
11918 v0.Aux = symToAux(sym)
11919 return true
11920 }
11921 return false
11922 }
11923 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11924 v_0 := v.Args[0]
11925 b := v.Block
11926
11927
11928
11929 for {
11930 t := v.Type
11931 if v_0.Op != OpArg {
11932 break
11933 }
11934 u := v_0.Type
11935 off := auxIntToInt32(v_0.AuxInt)
11936 sym := auxToSym(v_0.Aux)
11937 if !(t.Size() == u.Size()) {
11938 break
11939 }
11940 b = b.Func.Entry
11941 v0 := b.NewValue0(v.Pos, OpArg, t)
11942 v.copyOf(v0)
11943 v0.AuxInt = int32ToAuxInt(off)
11944 v0.Aux = symToAux(sym)
11945 return true
11946 }
11947 return false
11948 }
11949 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11950 v_1 := v.Args[1]
11951 v_0 := v.Args[0]
11952 b := v.Block
11953 config := b.Func.Config
11954
11955
11956
11957 for {
11958 off := auxIntToInt32(v.AuxInt)
11959 sym := auxToSym(v.Aux)
11960 ptr := v_0
11961 if v_1.Op != OpAMD64MOVQstore {
11962 break
11963 }
11964 off2 := auxIntToInt32(v_1.AuxInt)
11965 sym2 := auxToSym(v_1.Aux)
11966 x := v_1.Args[1]
11967 ptr2 := v_1.Args[0]
11968 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11969 break
11970 }
11971 v.copyOf(x)
11972 return true
11973 }
11974
11975
11976
11977 for {
11978 off1 := auxIntToInt32(v.AuxInt)
11979 sym := auxToSym(v.Aux)
11980 if v_0.Op != OpAMD64ADDQconst {
11981 break
11982 }
11983 off2 := auxIntToInt32(v_0.AuxInt)
11984 ptr := v_0.Args[0]
11985 mem := v_1
11986 if !(is32Bit(int64(off1) + int64(off2))) {
11987 break
11988 }
11989 v.reset(OpAMD64MOVQload)
11990 v.AuxInt = int32ToAuxInt(off1 + off2)
11991 v.Aux = symToAux(sym)
11992 v.AddArg2(ptr, mem)
11993 return true
11994 }
11995
11996
11997
11998 for {
11999 off1 := auxIntToInt32(v.AuxInt)
12000 sym1 := auxToSym(v.Aux)
12001 if v_0.Op != OpAMD64LEAQ {
12002 break
12003 }
12004 off2 := auxIntToInt32(v_0.AuxInt)
12005 sym2 := auxToSym(v_0.Aux)
12006 base := v_0.Args[0]
12007 mem := v_1
12008 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12009 break
12010 }
12011 v.reset(OpAMD64MOVQload)
12012 v.AuxInt = int32ToAuxInt(off1 + off2)
12013 v.Aux = symToAux(mergeSym(sym1, sym2))
12014 v.AddArg2(base, mem)
12015 return true
12016 }
12017
12018
12019 for {
12020 off := auxIntToInt32(v.AuxInt)
12021 sym := auxToSym(v.Aux)
12022 ptr := v_0
12023 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12024 break
12025 }
12026 val := v_1.Args[1]
12027 if ptr != v_1.Args[0] {
12028 break
12029 }
12030 v.reset(OpAMD64MOVQf2i)
12031 v.AddArg(val)
12032 return true
12033 }
12034
12035
12036
12037 for {
12038 off := auxIntToInt32(v.AuxInt)
12039 sym := auxToSym(v.Aux)
12040 if v_0.Op != OpSB || !(symIsRO(sym)) {
12041 break
12042 }
12043 v.reset(OpAMD64MOVQconst)
12044 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12045 return true
12046 }
12047 return false
12048 }
12049 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
12050 v_2 := v.Args[2]
12051 v_1 := v.Args[1]
12052 v_0 := v.Args[0]
12053
12054
12055
12056 for {
12057 off1 := auxIntToInt32(v.AuxInt)
12058 sym := auxToSym(v.Aux)
12059 if v_0.Op != OpAMD64ADDQconst {
12060 break
12061 }
12062 off2 := auxIntToInt32(v_0.AuxInt)
12063 ptr := v_0.Args[0]
12064 val := v_1
12065 mem := v_2
12066 if !(is32Bit(int64(off1) + int64(off2))) {
12067 break
12068 }
12069 v.reset(OpAMD64MOVQstore)
12070 v.AuxInt = int32ToAuxInt(off1 + off2)
12071 v.Aux = symToAux(sym)
12072 v.AddArg3(ptr, val, mem)
12073 return true
12074 }
12075
12076
12077
12078 for {
12079 off := auxIntToInt32(v.AuxInt)
12080 sym := auxToSym(v.Aux)
12081 ptr := v_0
12082 if v_1.Op != OpAMD64MOVQconst {
12083 break
12084 }
12085 c := auxIntToInt64(v_1.AuxInt)
12086 mem := v_2
12087 if !(validVal(c)) {
12088 break
12089 }
12090 v.reset(OpAMD64MOVQstoreconst)
12091 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12092 v.Aux = symToAux(sym)
12093 v.AddArg2(ptr, mem)
12094 return true
12095 }
12096
12097
12098
12099 for {
12100 off1 := auxIntToInt32(v.AuxInt)
12101 sym1 := auxToSym(v.Aux)
12102 if v_0.Op != OpAMD64LEAQ {
12103 break
12104 }
12105 off2 := auxIntToInt32(v_0.AuxInt)
12106 sym2 := auxToSym(v_0.Aux)
12107 base := v_0.Args[0]
12108 val := v_1
12109 mem := v_2
12110 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12111 break
12112 }
12113 v.reset(OpAMD64MOVQstore)
12114 v.AuxInt = int32ToAuxInt(off1 + off2)
12115 v.Aux = symToAux(mergeSym(sym1, sym2))
12116 v.AddArg3(base, val, mem)
12117 return true
12118 }
12119
12120
12121
12122 for {
12123 off := auxIntToInt32(v.AuxInt)
12124 sym := auxToSym(v.Aux)
12125 ptr := v_0
12126 y := v_1
12127 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12128 break
12129 }
12130 mem := y.Args[2]
12131 x := y.Args[0]
12132 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12133 break
12134 }
12135 v.reset(OpAMD64ADDQmodify)
12136 v.AuxInt = int32ToAuxInt(off)
12137 v.Aux = symToAux(sym)
12138 v.AddArg3(ptr, x, mem)
12139 return true
12140 }
12141
12142
12143
12144 for {
12145 off := auxIntToInt32(v.AuxInt)
12146 sym := auxToSym(v.Aux)
12147 ptr := v_0
12148 y := v_1
12149 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12150 break
12151 }
12152 mem := y.Args[2]
12153 x := y.Args[0]
12154 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12155 break
12156 }
12157 v.reset(OpAMD64ANDQmodify)
12158 v.AuxInt = int32ToAuxInt(off)
12159 v.Aux = symToAux(sym)
12160 v.AddArg3(ptr, x, mem)
12161 return true
12162 }
12163
12164
12165
12166 for {
12167 off := auxIntToInt32(v.AuxInt)
12168 sym := auxToSym(v.Aux)
12169 ptr := v_0
12170 y := v_1
12171 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12172 break
12173 }
12174 mem := y.Args[2]
12175 x := y.Args[0]
12176 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12177 break
12178 }
12179 v.reset(OpAMD64ORQmodify)
12180 v.AuxInt = int32ToAuxInt(off)
12181 v.Aux = symToAux(sym)
12182 v.AddArg3(ptr, x, mem)
12183 return true
12184 }
12185
12186
12187
12188 for {
12189 off := auxIntToInt32(v.AuxInt)
12190 sym := auxToSym(v.Aux)
12191 ptr := v_0
12192 y := v_1
12193 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12194 break
12195 }
12196 mem := y.Args[2]
12197 x := y.Args[0]
12198 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12199 break
12200 }
12201 v.reset(OpAMD64XORQmodify)
12202 v.AuxInt = int32ToAuxInt(off)
12203 v.Aux = symToAux(sym)
12204 v.AddArg3(ptr, x, mem)
12205 return true
12206 }
12207
12208
12209
12210 for {
12211 off := auxIntToInt32(v.AuxInt)
12212 sym := auxToSym(v.Aux)
12213 ptr := v_0
12214 y := v_1
12215 if y.Op != OpAMD64ADDQ {
12216 break
12217 }
12218 _ = y.Args[1]
12219 y_0 := y.Args[0]
12220 y_1 := y.Args[1]
12221 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12222 l := y_0
12223 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12224 continue
12225 }
12226 mem := l.Args[1]
12227 if ptr != l.Args[0] {
12228 continue
12229 }
12230 x := y_1
12231 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12232 continue
12233 }
12234 v.reset(OpAMD64ADDQmodify)
12235 v.AuxInt = int32ToAuxInt(off)
12236 v.Aux = symToAux(sym)
12237 v.AddArg3(ptr, x, mem)
12238 return true
12239 }
12240 break
12241 }
12242
12243
12244
12245 for {
12246 off := auxIntToInt32(v.AuxInt)
12247 sym := auxToSym(v.Aux)
12248 ptr := v_0
12249 y := v_1
12250 if y.Op != OpAMD64SUBQ {
12251 break
12252 }
12253 x := y.Args[1]
12254 l := y.Args[0]
12255 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12256 break
12257 }
12258 mem := l.Args[1]
12259 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12260 break
12261 }
12262 v.reset(OpAMD64SUBQmodify)
12263 v.AuxInt = int32ToAuxInt(off)
12264 v.Aux = symToAux(sym)
12265 v.AddArg3(ptr, x, mem)
12266 return true
12267 }
12268
12269
12270
12271 for {
12272 off := auxIntToInt32(v.AuxInt)
12273 sym := auxToSym(v.Aux)
12274 ptr := v_0
12275 y := v_1
12276 if y.Op != OpAMD64ANDQ {
12277 break
12278 }
12279 _ = y.Args[1]
12280 y_0 := y.Args[0]
12281 y_1 := y.Args[1]
12282 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12283 l := y_0
12284 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12285 continue
12286 }
12287 mem := l.Args[1]
12288 if ptr != l.Args[0] {
12289 continue
12290 }
12291 x := y_1
12292 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12293 continue
12294 }
12295 v.reset(OpAMD64ANDQmodify)
12296 v.AuxInt = int32ToAuxInt(off)
12297 v.Aux = symToAux(sym)
12298 v.AddArg3(ptr, x, mem)
12299 return true
12300 }
12301 break
12302 }
12303
12304
12305
12306 for {
12307 off := auxIntToInt32(v.AuxInt)
12308 sym := auxToSym(v.Aux)
12309 ptr := v_0
12310 y := v_1
12311 if y.Op != OpAMD64ORQ {
12312 break
12313 }
12314 _ = y.Args[1]
12315 y_0 := y.Args[0]
12316 y_1 := y.Args[1]
12317 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12318 l := y_0
12319 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12320 continue
12321 }
12322 mem := l.Args[1]
12323 if ptr != l.Args[0] {
12324 continue
12325 }
12326 x := y_1
12327 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12328 continue
12329 }
12330 v.reset(OpAMD64ORQmodify)
12331 v.AuxInt = int32ToAuxInt(off)
12332 v.Aux = symToAux(sym)
12333 v.AddArg3(ptr, x, mem)
12334 return true
12335 }
12336 break
12337 }
12338
12339
12340
12341 for {
12342 off := auxIntToInt32(v.AuxInt)
12343 sym := auxToSym(v.Aux)
12344 ptr := v_0
12345 y := v_1
12346 if y.Op != OpAMD64XORQ {
12347 break
12348 }
12349 _ = y.Args[1]
12350 y_0 := y.Args[0]
12351 y_1 := y.Args[1]
12352 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12353 l := y_0
12354 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12355 continue
12356 }
12357 mem := l.Args[1]
12358 if ptr != l.Args[0] {
12359 continue
12360 }
12361 x := y_1
12362 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12363 continue
12364 }
12365 v.reset(OpAMD64XORQmodify)
12366 v.AuxInt = int32ToAuxInt(off)
12367 v.Aux = symToAux(sym)
12368 v.AddArg3(ptr, x, mem)
12369 return true
12370 }
12371 break
12372 }
12373
12374
12375
12376 for {
12377 off := auxIntToInt32(v.AuxInt)
12378 sym := auxToSym(v.Aux)
12379 ptr := v_0
12380 x := v_1
12381 if x.Op != OpAMD64BTSQconst {
12382 break
12383 }
12384 c := auxIntToInt8(x.AuxInt)
12385 l := x.Args[0]
12386 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12387 break
12388 }
12389 mem := l.Args[1]
12390 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12391 break
12392 }
12393 v.reset(OpAMD64BTSQconstmodify)
12394 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12395 v.Aux = symToAux(sym)
12396 v.AddArg2(ptr, mem)
12397 return true
12398 }
12399
12400
12401
12402 for {
12403 off := auxIntToInt32(v.AuxInt)
12404 sym := auxToSym(v.Aux)
12405 ptr := v_0
12406 x := v_1
12407 if x.Op != OpAMD64BTRQconst {
12408 break
12409 }
12410 c := auxIntToInt8(x.AuxInt)
12411 l := x.Args[0]
12412 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12413 break
12414 }
12415 mem := l.Args[1]
12416 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12417 break
12418 }
12419 v.reset(OpAMD64BTRQconstmodify)
12420 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12421 v.Aux = symToAux(sym)
12422 v.AddArg2(ptr, mem)
12423 return true
12424 }
12425
12426
12427
12428 for {
12429 off := auxIntToInt32(v.AuxInt)
12430 sym := auxToSym(v.Aux)
12431 ptr := v_0
12432 x := v_1
12433 if x.Op != OpAMD64BTCQconst {
12434 break
12435 }
12436 c := auxIntToInt8(x.AuxInt)
12437 l := x.Args[0]
12438 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12439 break
12440 }
12441 mem := l.Args[1]
12442 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12443 break
12444 }
12445 v.reset(OpAMD64BTCQconstmodify)
12446 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12447 v.Aux = symToAux(sym)
12448 v.AddArg2(ptr, mem)
12449 return true
12450 }
12451
12452
12453
12454 for {
12455 off := auxIntToInt32(v.AuxInt)
12456 sym := auxToSym(v.Aux)
12457 ptr := v_0
12458 a := v_1
12459 if a.Op != OpAMD64ADDQconst {
12460 break
12461 }
12462 c := auxIntToInt32(a.AuxInt)
12463 l := a.Args[0]
12464 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12465 break
12466 }
12467 mem := l.Args[1]
12468 ptr2 := l.Args[0]
12469 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12470 break
12471 }
12472 v.reset(OpAMD64ADDQconstmodify)
12473 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12474 v.Aux = symToAux(sym)
12475 v.AddArg2(ptr, mem)
12476 return true
12477 }
12478
12479
12480
12481 for {
12482 off := auxIntToInt32(v.AuxInt)
12483 sym := auxToSym(v.Aux)
12484 ptr := v_0
12485 a := v_1
12486 if a.Op != OpAMD64ANDQconst {
12487 break
12488 }
12489 c := auxIntToInt32(a.AuxInt)
12490 l := a.Args[0]
12491 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12492 break
12493 }
12494 mem := l.Args[1]
12495 ptr2 := l.Args[0]
12496 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12497 break
12498 }
12499 v.reset(OpAMD64ANDQconstmodify)
12500 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12501 v.Aux = symToAux(sym)
12502 v.AddArg2(ptr, mem)
12503 return true
12504 }
12505
12506
12507
12508 for {
12509 off := auxIntToInt32(v.AuxInt)
12510 sym := auxToSym(v.Aux)
12511 ptr := v_0
12512 a := v_1
12513 if a.Op != OpAMD64ORQconst {
12514 break
12515 }
12516 c := auxIntToInt32(a.AuxInt)
12517 l := a.Args[0]
12518 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12519 break
12520 }
12521 mem := l.Args[1]
12522 ptr2 := l.Args[0]
12523 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12524 break
12525 }
12526 v.reset(OpAMD64ORQconstmodify)
12527 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12528 v.Aux = symToAux(sym)
12529 v.AddArg2(ptr, mem)
12530 return true
12531 }
12532
12533
12534
12535 for {
12536 off := auxIntToInt32(v.AuxInt)
12537 sym := auxToSym(v.Aux)
12538 ptr := v_0
12539 a := v_1
12540 if a.Op != OpAMD64XORQconst {
12541 break
12542 }
12543 c := auxIntToInt32(a.AuxInt)
12544 l := a.Args[0]
12545 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12546 break
12547 }
12548 mem := l.Args[1]
12549 ptr2 := l.Args[0]
12550 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12551 break
12552 }
12553 v.reset(OpAMD64XORQconstmodify)
12554 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12555 v.Aux = symToAux(sym)
12556 v.AddArg2(ptr, mem)
12557 return true
12558 }
12559
12560
12561 for {
12562 off := auxIntToInt32(v.AuxInt)
12563 sym := auxToSym(v.Aux)
12564 ptr := v_0
12565 if v_1.Op != OpAMD64MOVQf2i {
12566 break
12567 }
12568 val := v_1.Args[0]
12569 mem := v_2
12570 v.reset(OpAMD64MOVSDstore)
12571 v.AuxInt = int32ToAuxInt(off)
12572 v.Aux = symToAux(sym)
12573 v.AddArg3(ptr, val, mem)
12574 return true
12575 }
12576
12577
12578
12579 for {
12580 i := auxIntToInt32(v.AuxInt)
12581 s := auxToSym(v.Aux)
12582 p := v_0
12583 x := v_1
12584 if x.Op != OpAMD64BSWAPQ {
12585 break
12586 }
12587 w := x.Args[0]
12588 mem := v_2
12589 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12590 break
12591 }
12592 v.reset(OpAMD64MOVBEQstore)
12593 v.AuxInt = int32ToAuxInt(i)
12594 v.Aux = symToAux(s)
12595 v.AddArg3(p, w, mem)
12596 return true
12597 }
12598 return false
12599 }
12600 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12601 v_1 := v.Args[1]
12602 v_0 := v.Args[0]
12603
12604
12605
12606 for {
12607 sc := auxIntToValAndOff(v.AuxInt)
12608 s := auxToSym(v.Aux)
12609 if v_0.Op != OpAMD64ADDQconst {
12610 break
12611 }
12612 off := auxIntToInt32(v_0.AuxInt)
12613 ptr := v_0.Args[0]
12614 mem := v_1
12615 if !(ValAndOff(sc).canAdd32(off)) {
12616 break
12617 }
12618 v.reset(OpAMD64MOVQstoreconst)
12619 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12620 v.Aux = symToAux(s)
12621 v.AddArg2(ptr, mem)
12622 return true
12623 }
12624
12625
12626
12627 for {
12628 sc := auxIntToValAndOff(v.AuxInt)
12629 sym1 := auxToSym(v.Aux)
12630 if v_0.Op != OpAMD64LEAQ {
12631 break
12632 }
12633 off := auxIntToInt32(v_0.AuxInt)
12634 sym2 := auxToSym(v_0.Aux)
12635 ptr := v_0.Args[0]
12636 mem := v_1
12637 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12638 break
12639 }
12640 v.reset(OpAMD64MOVQstoreconst)
12641 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12642 v.Aux = symToAux(mergeSym(sym1, sym2))
12643 v.AddArg2(ptr, mem)
12644 return true
12645 }
12646
12647
12648
12649 for {
12650 c := auxIntToValAndOff(v.AuxInt)
12651 s := auxToSym(v.Aux)
12652 p1 := v_0
12653 x := v_1
12654 if x.Op != OpAMD64MOVQstoreconst {
12655 break
12656 }
12657 a := auxIntToValAndOff(x.AuxInt)
12658 if auxToSym(x.Aux) != s {
12659 break
12660 }
12661 mem := x.Args[1]
12662 p0 := x.Args[0]
12663 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12664 break
12665 }
12666 v.reset(OpAMD64MOVOstoreconst)
12667 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12668 v.Aux = symToAux(s)
12669 v.AddArg2(p0, mem)
12670 return true
12671 }
12672
12673
12674
12675 for {
12676 a := auxIntToValAndOff(v.AuxInt)
12677 s := auxToSym(v.Aux)
12678 p0 := v_0
12679 x := v_1
12680 if x.Op != OpAMD64MOVQstoreconst {
12681 break
12682 }
12683 c := auxIntToValAndOff(x.AuxInt)
12684 if auxToSym(x.Aux) != s {
12685 break
12686 }
12687 mem := x.Args[1]
12688 p1 := x.Args[0]
12689 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12690 break
12691 }
12692 v.reset(OpAMD64MOVOstoreconst)
12693 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12694 v.Aux = symToAux(s)
12695 v.AddArg2(p0, mem)
12696 return true
12697 }
12698 return false
12699 }
12700 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12701 v_1 := v.Args[1]
12702 v_0 := v.Args[0]
12703
12704
12705
12706 for {
12707 off1 := auxIntToInt32(v.AuxInt)
12708 sym := auxToSym(v.Aux)
12709 if v_0.Op != OpAMD64ADDQconst {
12710 break
12711 }
12712 off2 := auxIntToInt32(v_0.AuxInt)
12713 ptr := v_0.Args[0]
12714 mem := v_1
12715 if !(is32Bit(int64(off1) + int64(off2))) {
12716 break
12717 }
12718 v.reset(OpAMD64MOVSDload)
12719 v.AuxInt = int32ToAuxInt(off1 + off2)
12720 v.Aux = symToAux(sym)
12721 v.AddArg2(ptr, mem)
12722 return true
12723 }
12724
12725
12726
12727 for {
12728 off1 := auxIntToInt32(v.AuxInt)
12729 sym1 := auxToSym(v.Aux)
12730 if v_0.Op != OpAMD64LEAQ {
12731 break
12732 }
12733 off2 := auxIntToInt32(v_0.AuxInt)
12734 sym2 := auxToSym(v_0.Aux)
12735 base := v_0.Args[0]
12736 mem := v_1
12737 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12738 break
12739 }
12740 v.reset(OpAMD64MOVSDload)
12741 v.AuxInt = int32ToAuxInt(off1 + off2)
12742 v.Aux = symToAux(mergeSym(sym1, sym2))
12743 v.AddArg2(base, mem)
12744 return true
12745 }
12746
12747
12748 for {
12749 off := auxIntToInt32(v.AuxInt)
12750 sym := auxToSym(v.Aux)
12751 ptr := v_0
12752 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12753 break
12754 }
12755 val := v_1.Args[1]
12756 if ptr != v_1.Args[0] {
12757 break
12758 }
12759 v.reset(OpAMD64MOVQi2f)
12760 v.AddArg(val)
12761 return true
12762 }
12763 return false
12764 }
12765 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12766 v_2 := v.Args[2]
12767 v_1 := v.Args[1]
12768 v_0 := v.Args[0]
12769 b := v.Block
12770 typ := &b.Func.Config.Types
12771
12772
12773
12774 for {
12775 off1 := auxIntToInt32(v.AuxInt)
12776 sym := auxToSym(v.Aux)
12777 if v_0.Op != OpAMD64ADDQconst {
12778 break
12779 }
12780 off2 := auxIntToInt32(v_0.AuxInt)
12781 ptr := v_0.Args[0]
12782 val := v_1
12783 mem := v_2
12784 if !(is32Bit(int64(off1) + int64(off2))) {
12785 break
12786 }
12787 v.reset(OpAMD64MOVSDstore)
12788 v.AuxInt = int32ToAuxInt(off1 + off2)
12789 v.Aux = symToAux(sym)
12790 v.AddArg3(ptr, val, mem)
12791 return true
12792 }
12793
12794
12795
12796 for {
12797 off1 := auxIntToInt32(v.AuxInt)
12798 sym1 := auxToSym(v.Aux)
12799 if v_0.Op != OpAMD64LEAQ {
12800 break
12801 }
12802 off2 := auxIntToInt32(v_0.AuxInt)
12803 sym2 := auxToSym(v_0.Aux)
12804 base := v_0.Args[0]
12805 val := v_1
12806 mem := v_2
12807 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12808 break
12809 }
12810 v.reset(OpAMD64MOVSDstore)
12811 v.AuxInt = int32ToAuxInt(off1 + off2)
12812 v.Aux = symToAux(mergeSym(sym1, sym2))
12813 v.AddArg3(base, val, mem)
12814 return true
12815 }
12816
12817
12818 for {
12819 off := auxIntToInt32(v.AuxInt)
12820 sym := auxToSym(v.Aux)
12821 ptr := v_0
12822 if v_1.Op != OpAMD64MOVQi2f {
12823 break
12824 }
12825 val := v_1.Args[0]
12826 mem := v_2
12827 v.reset(OpAMD64MOVQstore)
12828 v.AuxInt = int32ToAuxInt(off)
12829 v.Aux = symToAux(sym)
12830 v.AddArg3(ptr, val, mem)
12831 return true
12832 }
12833
12834
12835
12836 for {
12837 off := auxIntToInt32(v.AuxInt)
12838 sym := auxToSym(v.Aux)
12839 ptr := v_0
12840 if v_1.Op != OpAMD64MOVSDconst {
12841 break
12842 }
12843 f := auxIntToFloat64(v_1.AuxInt)
12844 mem := v_2
12845 if !(f == f) {
12846 break
12847 }
12848 v.reset(OpAMD64MOVQstore)
12849 v.AuxInt = int32ToAuxInt(off)
12850 v.Aux = symToAux(sym)
12851 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
12852 v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(f)))
12853 v.AddArg3(ptr, v0, mem)
12854 return true
12855 }
12856 return false
12857 }
12858 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12859 v_1 := v.Args[1]
12860 v_0 := v.Args[0]
12861
12862
12863
12864 for {
12865 off1 := auxIntToInt32(v.AuxInt)
12866 sym := auxToSym(v.Aux)
12867 if v_0.Op != OpAMD64ADDQconst {
12868 break
12869 }
12870 off2 := auxIntToInt32(v_0.AuxInt)
12871 ptr := v_0.Args[0]
12872 mem := v_1
12873 if !(is32Bit(int64(off1) + int64(off2))) {
12874 break
12875 }
12876 v.reset(OpAMD64MOVSSload)
12877 v.AuxInt = int32ToAuxInt(off1 + off2)
12878 v.Aux = symToAux(sym)
12879 v.AddArg2(ptr, mem)
12880 return true
12881 }
12882
12883
12884
12885 for {
12886 off1 := auxIntToInt32(v.AuxInt)
12887 sym1 := auxToSym(v.Aux)
12888 if v_0.Op != OpAMD64LEAQ {
12889 break
12890 }
12891 off2 := auxIntToInt32(v_0.AuxInt)
12892 sym2 := auxToSym(v_0.Aux)
12893 base := v_0.Args[0]
12894 mem := v_1
12895 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12896 break
12897 }
12898 v.reset(OpAMD64MOVSSload)
12899 v.AuxInt = int32ToAuxInt(off1 + off2)
12900 v.Aux = symToAux(mergeSym(sym1, sym2))
12901 v.AddArg2(base, mem)
12902 return true
12903 }
12904
12905
12906 for {
12907 off := auxIntToInt32(v.AuxInt)
12908 sym := auxToSym(v.Aux)
12909 ptr := v_0
12910 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12911 break
12912 }
12913 val := v_1.Args[1]
12914 if ptr != v_1.Args[0] {
12915 break
12916 }
12917 v.reset(OpAMD64MOVLi2f)
12918 v.AddArg(val)
12919 return true
12920 }
12921 return false
12922 }
12923 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12924 v_2 := v.Args[2]
12925 v_1 := v.Args[1]
12926 v_0 := v.Args[0]
12927 b := v.Block
12928 typ := &b.Func.Config.Types
12929
12930
12931
12932 for {
12933 off1 := auxIntToInt32(v.AuxInt)
12934 sym := auxToSym(v.Aux)
12935 if v_0.Op != OpAMD64ADDQconst {
12936 break
12937 }
12938 off2 := auxIntToInt32(v_0.AuxInt)
12939 ptr := v_0.Args[0]
12940 val := v_1
12941 mem := v_2
12942 if !(is32Bit(int64(off1) + int64(off2))) {
12943 break
12944 }
12945 v.reset(OpAMD64MOVSSstore)
12946 v.AuxInt = int32ToAuxInt(off1 + off2)
12947 v.Aux = symToAux(sym)
12948 v.AddArg3(ptr, val, mem)
12949 return true
12950 }
12951
12952
12953
12954 for {
12955 off1 := auxIntToInt32(v.AuxInt)
12956 sym1 := auxToSym(v.Aux)
12957 if v_0.Op != OpAMD64LEAQ {
12958 break
12959 }
12960 off2 := auxIntToInt32(v_0.AuxInt)
12961 sym2 := auxToSym(v_0.Aux)
12962 base := v_0.Args[0]
12963 val := v_1
12964 mem := v_2
12965 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12966 break
12967 }
12968 v.reset(OpAMD64MOVSSstore)
12969 v.AuxInt = int32ToAuxInt(off1 + off2)
12970 v.Aux = symToAux(mergeSym(sym1, sym2))
12971 v.AddArg3(base, val, mem)
12972 return true
12973 }
12974
12975
12976 for {
12977 off := auxIntToInt32(v.AuxInt)
12978 sym := auxToSym(v.Aux)
12979 ptr := v_0
12980 if v_1.Op != OpAMD64MOVLi2f {
12981 break
12982 }
12983 val := v_1.Args[0]
12984 mem := v_2
12985 v.reset(OpAMD64MOVLstore)
12986 v.AuxInt = int32ToAuxInt(off)
12987 v.Aux = symToAux(sym)
12988 v.AddArg3(ptr, val, mem)
12989 return true
12990 }
12991
12992
12993
12994 for {
12995 off := auxIntToInt32(v.AuxInt)
12996 sym := auxToSym(v.Aux)
12997 ptr := v_0
12998 if v_1.Op != OpAMD64MOVSSconst {
12999 break
13000 }
13001 f := auxIntToFloat32(v_1.AuxInt)
13002 mem := v_2
13003 if !(f == f) {
13004 break
13005 }
13006 v.reset(OpAMD64MOVLstore)
13007 v.AuxInt = int32ToAuxInt(off)
13008 v.Aux = symToAux(sym)
13009 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt32)
13010 v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(f)))
13011 v.AddArg3(ptr, v0, mem)
13012 return true
13013 }
13014 return false
13015 }
13016 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
13017 v_0 := v.Args[0]
13018 b := v.Block
13019
13020
13021
13022 for {
13023 x := v_0
13024 if x.Op != OpAMD64MOVWload {
13025 break
13026 }
13027 off := auxIntToInt32(x.AuxInt)
13028 sym := auxToSym(x.Aux)
13029 mem := x.Args[1]
13030 ptr := x.Args[0]
13031 if !(x.Uses == 1 && clobber(x)) {
13032 break
13033 }
13034 b = x.Block
13035 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13036 v.copyOf(v0)
13037 v0.AuxInt = int32ToAuxInt(off)
13038 v0.Aux = symToAux(sym)
13039 v0.AddArg2(ptr, mem)
13040 return true
13041 }
13042
13043
13044
13045 for {
13046 x := v_0
13047 if x.Op != OpAMD64MOVLload {
13048 break
13049 }
13050 off := auxIntToInt32(x.AuxInt)
13051 sym := auxToSym(x.Aux)
13052 mem := x.Args[1]
13053 ptr := x.Args[0]
13054 if !(x.Uses == 1 && clobber(x)) {
13055 break
13056 }
13057 b = x.Block
13058 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13059 v.copyOf(v0)
13060 v0.AuxInt = int32ToAuxInt(off)
13061 v0.Aux = symToAux(sym)
13062 v0.AddArg2(ptr, mem)
13063 return true
13064 }
13065
13066
13067
13068 for {
13069 x := v_0
13070 if x.Op != OpAMD64MOVQload {
13071 break
13072 }
13073 off := auxIntToInt32(x.AuxInt)
13074 sym := auxToSym(x.Aux)
13075 mem := x.Args[1]
13076 ptr := x.Args[0]
13077 if !(x.Uses == 1 && clobber(x)) {
13078 break
13079 }
13080 b = x.Block
13081 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13082 v.copyOf(v0)
13083 v0.AuxInt = int32ToAuxInt(off)
13084 v0.Aux = symToAux(sym)
13085 v0.AddArg2(ptr, mem)
13086 return true
13087 }
13088
13089
13090
13091 for {
13092 if v_0.Op != OpAMD64ANDLconst {
13093 break
13094 }
13095 c := auxIntToInt32(v_0.AuxInt)
13096 x := v_0.Args[0]
13097 if !(c&0x8000 == 0) {
13098 break
13099 }
13100 v.reset(OpAMD64ANDLconst)
13101 v.AuxInt = int32ToAuxInt(c & 0x7fff)
13102 v.AddArg(x)
13103 return true
13104 }
13105
13106
13107 for {
13108 if v_0.Op != OpAMD64MOVWQSX {
13109 break
13110 }
13111 x := v_0.Args[0]
13112 v.reset(OpAMD64MOVWQSX)
13113 v.AddArg(x)
13114 return true
13115 }
13116
13117
13118 for {
13119 if v_0.Op != OpAMD64MOVBQSX {
13120 break
13121 }
13122 x := v_0.Args[0]
13123 v.reset(OpAMD64MOVBQSX)
13124 v.AddArg(x)
13125 return true
13126 }
13127 return false
13128 }
13129 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
13130 v_1 := v.Args[1]
13131 v_0 := v.Args[0]
13132 b := v.Block
13133 config := b.Func.Config
13134
13135
13136
13137 for {
13138 off := auxIntToInt32(v.AuxInt)
13139 sym := auxToSym(v.Aux)
13140 ptr := v_0
13141 if v_1.Op != OpAMD64MOVWstore {
13142 break
13143 }
13144 off2 := auxIntToInt32(v_1.AuxInt)
13145 sym2 := auxToSym(v_1.Aux)
13146 x := v_1.Args[1]
13147 ptr2 := v_1.Args[0]
13148 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13149 break
13150 }
13151 v.reset(OpAMD64MOVWQSX)
13152 v.AddArg(x)
13153 return true
13154 }
13155
13156
13157
13158 for {
13159 off1 := auxIntToInt32(v.AuxInt)
13160 sym1 := auxToSym(v.Aux)
13161 if v_0.Op != OpAMD64LEAQ {
13162 break
13163 }
13164 off2 := auxIntToInt32(v_0.AuxInt)
13165 sym2 := auxToSym(v_0.Aux)
13166 base := v_0.Args[0]
13167 mem := v_1
13168 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13169 break
13170 }
13171 v.reset(OpAMD64MOVWQSXload)
13172 v.AuxInt = int32ToAuxInt(off1 + off2)
13173 v.Aux = symToAux(mergeSym(sym1, sym2))
13174 v.AddArg2(base, mem)
13175 return true
13176 }
13177
13178
13179
13180 for {
13181 off := auxIntToInt32(v.AuxInt)
13182 sym := auxToSym(v.Aux)
13183 if v_0.Op != OpSB || !(symIsRO(sym)) {
13184 break
13185 }
13186 v.reset(OpAMD64MOVQconst)
13187 v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
13188 return true
13189 }
13190 return false
13191 }
13192 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
13193 v_0 := v.Args[0]
13194 b := v.Block
13195
13196
13197
13198 for {
13199 x := v_0
13200 if x.Op != OpAMD64MOVWload {
13201 break
13202 }
13203 off := auxIntToInt32(x.AuxInt)
13204 sym := auxToSym(x.Aux)
13205 mem := x.Args[1]
13206 ptr := x.Args[0]
13207 if !(x.Uses == 1 && clobber(x)) {
13208 break
13209 }
13210 b = x.Block
13211 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13212 v.copyOf(v0)
13213 v0.AuxInt = int32ToAuxInt(off)
13214 v0.Aux = symToAux(sym)
13215 v0.AddArg2(ptr, mem)
13216 return true
13217 }
13218
13219
13220
13221 for {
13222 x := v_0
13223 if x.Op != OpAMD64MOVLload {
13224 break
13225 }
13226 off := auxIntToInt32(x.AuxInt)
13227 sym := auxToSym(x.Aux)
13228 mem := x.Args[1]
13229 ptr := x.Args[0]
13230 if !(x.Uses == 1 && clobber(x)) {
13231 break
13232 }
13233 b = x.Block
13234 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13235 v.copyOf(v0)
13236 v0.AuxInt = int32ToAuxInt(off)
13237 v0.Aux = symToAux(sym)
13238 v0.AddArg2(ptr, mem)
13239 return true
13240 }
13241
13242
13243
13244 for {
13245 x := v_0
13246 if x.Op != OpAMD64MOVQload {
13247 break
13248 }
13249 off := auxIntToInt32(x.AuxInt)
13250 sym := auxToSym(x.Aux)
13251 mem := x.Args[1]
13252 ptr := x.Args[0]
13253 if !(x.Uses == 1 && clobber(x)) {
13254 break
13255 }
13256 b = x.Block
13257 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13258 v.copyOf(v0)
13259 v0.AuxInt = int32ToAuxInt(off)
13260 v0.Aux = symToAux(sym)
13261 v0.AddArg2(ptr, mem)
13262 return true
13263 }
13264
13265
13266 for {
13267 if v_0.Op != OpAMD64ANDLconst {
13268 break
13269 }
13270 c := auxIntToInt32(v_0.AuxInt)
13271 x := v_0.Args[0]
13272 v.reset(OpAMD64ANDLconst)
13273 v.AuxInt = int32ToAuxInt(c & 0xffff)
13274 v.AddArg(x)
13275 return true
13276 }
13277
13278
13279 for {
13280 if v_0.Op != OpAMD64MOVWQZX {
13281 break
13282 }
13283 x := v_0.Args[0]
13284 v.reset(OpAMD64MOVWQZX)
13285 v.AddArg(x)
13286 return true
13287 }
13288
13289
13290 for {
13291 if v_0.Op != OpAMD64MOVBQZX {
13292 break
13293 }
13294 x := v_0.Args[0]
13295 v.reset(OpAMD64MOVBQZX)
13296 v.AddArg(x)
13297 return true
13298 }
13299 return false
13300 }
13301 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
13302 v_1 := v.Args[1]
13303 v_0 := v.Args[0]
13304 b := v.Block
13305 config := b.Func.Config
13306
13307
13308
13309 for {
13310 off := auxIntToInt32(v.AuxInt)
13311 sym := auxToSym(v.Aux)
13312 ptr := v_0
13313 if v_1.Op != OpAMD64MOVWstore {
13314 break
13315 }
13316 off2 := auxIntToInt32(v_1.AuxInt)
13317 sym2 := auxToSym(v_1.Aux)
13318 x := v_1.Args[1]
13319 ptr2 := v_1.Args[0]
13320 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13321 break
13322 }
13323 v.reset(OpAMD64MOVWQZX)
13324 v.AddArg(x)
13325 return true
13326 }
13327
13328
13329
13330 for {
13331 off1 := auxIntToInt32(v.AuxInt)
13332 sym := auxToSym(v.Aux)
13333 if v_0.Op != OpAMD64ADDQconst {
13334 break
13335 }
13336 off2 := auxIntToInt32(v_0.AuxInt)
13337 ptr := v_0.Args[0]
13338 mem := v_1
13339 if !(is32Bit(int64(off1) + int64(off2))) {
13340 break
13341 }
13342 v.reset(OpAMD64MOVWload)
13343 v.AuxInt = int32ToAuxInt(off1 + off2)
13344 v.Aux = symToAux(sym)
13345 v.AddArg2(ptr, mem)
13346 return true
13347 }
13348
13349
13350
13351 for {
13352 off1 := auxIntToInt32(v.AuxInt)
13353 sym1 := auxToSym(v.Aux)
13354 if v_0.Op != OpAMD64LEAQ {
13355 break
13356 }
13357 off2 := auxIntToInt32(v_0.AuxInt)
13358 sym2 := auxToSym(v_0.Aux)
13359 base := v_0.Args[0]
13360 mem := v_1
13361 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13362 break
13363 }
13364 v.reset(OpAMD64MOVWload)
13365 v.AuxInt = int32ToAuxInt(off1 + off2)
13366 v.Aux = symToAux(mergeSym(sym1, sym2))
13367 v.AddArg2(base, mem)
13368 return true
13369 }
13370
13371
13372
13373 for {
13374 off := auxIntToInt32(v.AuxInt)
13375 sym := auxToSym(v.Aux)
13376 if v_0.Op != OpSB || !(symIsRO(sym)) {
13377 break
13378 }
13379 v.reset(OpAMD64MOVLconst)
13380 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
13381 return true
13382 }
13383 return false
13384 }
13385 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
13386 v_2 := v.Args[2]
13387 v_1 := v.Args[1]
13388 v_0 := v.Args[0]
13389
13390
13391 for {
13392 off := auxIntToInt32(v.AuxInt)
13393 sym := auxToSym(v.Aux)
13394 ptr := v_0
13395 if v_1.Op != OpAMD64MOVWQSX {
13396 break
13397 }
13398 x := v_1.Args[0]
13399 mem := v_2
13400 v.reset(OpAMD64MOVWstore)
13401 v.AuxInt = int32ToAuxInt(off)
13402 v.Aux = symToAux(sym)
13403 v.AddArg3(ptr, x, mem)
13404 return true
13405 }
13406
13407
13408 for {
13409 off := auxIntToInt32(v.AuxInt)
13410 sym := auxToSym(v.Aux)
13411 ptr := v_0
13412 if v_1.Op != OpAMD64MOVWQZX {
13413 break
13414 }
13415 x := v_1.Args[0]
13416 mem := v_2
13417 v.reset(OpAMD64MOVWstore)
13418 v.AuxInt = int32ToAuxInt(off)
13419 v.Aux = symToAux(sym)
13420 v.AddArg3(ptr, x, mem)
13421 return true
13422 }
13423
13424
13425
13426 for {
13427 off1 := auxIntToInt32(v.AuxInt)
13428 sym := auxToSym(v.Aux)
13429 if v_0.Op != OpAMD64ADDQconst {
13430 break
13431 }
13432 off2 := auxIntToInt32(v_0.AuxInt)
13433 ptr := v_0.Args[0]
13434 val := v_1
13435 mem := v_2
13436 if !(is32Bit(int64(off1) + int64(off2))) {
13437 break
13438 }
13439 v.reset(OpAMD64MOVWstore)
13440 v.AuxInt = int32ToAuxInt(off1 + off2)
13441 v.Aux = symToAux(sym)
13442 v.AddArg3(ptr, val, mem)
13443 return true
13444 }
13445
13446
13447 for {
13448 off := auxIntToInt32(v.AuxInt)
13449 sym := auxToSym(v.Aux)
13450 ptr := v_0
13451 if v_1.Op != OpAMD64MOVLconst {
13452 break
13453 }
13454 c := auxIntToInt32(v_1.AuxInt)
13455 mem := v_2
13456 v.reset(OpAMD64MOVWstoreconst)
13457 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13458 v.Aux = symToAux(sym)
13459 v.AddArg2(ptr, mem)
13460 return true
13461 }
13462
13463
13464 for {
13465 off := auxIntToInt32(v.AuxInt)
13466 sym := auxToSym(v.Aux)
13467 ptr := v_0
13468 if v_1.Op != OpAMD64MOVQconst {
13469 break
13470 }
13471 c := auxIntToInt64(v_1.AuxInt)
13472 mem := v_2
13473 v.reset(OpAMD64MOVWstoreconst)
13474 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13475 v.Aux = symToAux(sym)
13476 v.AddArg2(ptr, mem)
13477 return true
13478 }
13479
13480
13481
13482 for {
13483 off1 := auxIntToInt32(v.AuxInt)
13484 sym1 := auxToSym(v.Aux)
13485 if v_0.Op != OpAMD64LEAQ {
13486 break
13487 }
13488 off2 := auxIntToInt32(v_0.AuxInt)
13489 sym2 := auxToSym(v_0.Aux)
13490 base := v_0.Args[0]
13491 val := v_1
13492 mem := v_2
13493 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13494 break
13495 }
13496 v.reset(OpAMD64MOVWstore)
13497 v.AuxInt = int32ToAuxInt(off1 + off2)
13498 v.Aux = symToAux(mergeSym(sym1, sym2))
13499 v.AddArg3(base, val, mem)
13500 return true
13501 }
13502
13503
13504
13505 for {
13506 i := auxIntToInt32(v.AuxInt)
13507 s := auxToSym(v.Aux)
13508 p := v_0
13509 x := v_1
13510 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
13511 break
13512 }
13513 w := x.Args[0]
13514 mem := v_2
13515 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
13516 break
13517 }
13518 v.reset(OpAMD64MOVBEWstore)
13519 v.AuxInt = int32ToAuxInt(i)
13520 v.Aux = symToAux(s)
13521 v.AddArg3(p, w, mem)
13522 return true
13523 }
13524 return false
13525 }
13526 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13527 v_1 := v.Args[1]
13528 v_0 := v.Args[0]
13529
13530
13531
13532 for {
13533 sc := auxIntToValAndOff(v.AuxInt)
13534 s := auxToSym(v.Aux)
13535 if v_0.Op != OpAMD64ADDQconst {
13536 break
13537 }
13538 off := auxIntToInt32(v_0.AuxInt)
13539 ptr := v_0.Args[0]
13540 mem := v_1
13541 if !(ValAndOff(sc).canAdd32(off)) {
13542 break
13543 }
13544 v.reset(OpAMD64MOVWstoreconst)
13545 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13546 v.Aux = symToAux(s)
13547 v.AddArg2(ptr, mem)
13548 return true
13549 }
13550
13551
13552
13553 for {
13554 sc := auxIntToValAndOff(v.AuxInt)
13555 sym1 := auxToSym(v.Aux)
13556 if v_0.Op != OpAMD64LEAQ {
13557 break
13558 }
13559 off := auxIntToInt32(v_0.AuxInt)
13560 sym2 := auxToSym(v_0.Aux)
13561 ptr := v_0.Args[0]
13562 mem := v_1
13563 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13564 break
13565 }
13566 v.reset(OpAMD64MOVWstoreconst)
13567 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13568 v.Aux = symToAux(mergeSym(sym1, sym2))
13569 v.AddArg2(ptr, mem)
13570 return true
13571 }
13572 return false
13573 }
13574 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13575 v_1 := v.Args[1]
13576 v_0 := v.Args[0]
13577
13578
13579 for {
13580 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13581 x := v_0
13582 if v_1.Op != OpAMD64MOVLconst {
13583 continue
13584 }
13585 c := auxIntToInt32(v_1.AuxInt)
13586 v.reset(OpAMD64MULLconst)
13587 v.AuxInt = int32ToAuxInt(c)
13588 v.AddArg(x)
13589 return true
13590 }
13591 break
13592 }
13593 return false
13594 }
13595 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13596 v_0 := v.Args[0]
13597 b := v.Block
13598 config := b.Func.Config
13599
13600
13601 for {
13602 c := auxIntToInt32(v.AuxInt)
13603 if v_0.Op != OpAMD64MULLconst {
13604 break
13605 }
13606 d := auxIntToInt32(v_0.AuxInt)
13607 x := v_0.Args[0]
13608 v.reset(OpAMD64MULLconst)
13609 v.AuxInt = int32ToAuxInt(c * d)
13610 v.AddArg(x)
13611 return true
13612 }
13613
13614
13615 for {
13616 if auxIntToInt32(v.AuxInt) != 0 {
13617 break
13618 }
13619 v.reset(OpAMD64MOVLconst)
13620 v.AuxInt = int32ToAuxInt(0)
13621 return true
13622 }
13623
13624
13625 for {
13626 if auxIntToInt32(v.AuxInt) != 1 {
13627 break
13628 }
13629 x := v_0
13630 v.copyOf(x)
13631 return true
13632 }
13633
13634
13635
13636 for {
13637 c := auxIntToInt32(v.AuxInt)
13638 x := v_0
13639 if !(v.Type.Size() <= 4 && canMulStrengthReduce32(config, c)) {
13640 break
13641 }
13642 v.copyOf(mulStrengthReduce32(v, x, c))
13643 return true
13644 }
13645
13646
13647 for {
13648 c := auxIntToInt32(v.AuxInt)
13649 if v_0.Op != OpAMD64MOVLconst {
13650 break
13651 }
13652 d := auxIntToInt32(v_0.AuxInt)
13653 v.reset(OpAMD64MOVLconst)
13654 v.AuxInt = int32ToAuxInt(c * d)
13655 return true
13656 }
13657 return false
13658 }
13659 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13660 v_1 := v.Args[1]
13661 v_0 := v.Args[0]
13662
13663
13664
13665 for {
13666 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13667 x := v_0
13668 if v_1.Op != OpAMD64MOVQconst {
13669 continue
13670 }
13671 c := auxIntToInt64(v_1.AuxInt)
13672 if !(is32Bit(c)) {
13673 continue
13674 }
13675 v.reset(OpAMD64MULQconst)
13676 v.AuxInt = int32ToAuxInt(int32(c))
13677 v.AddArg(x)
13678 return true
13679 }
13680 break
13681 }
13682 return false
13683 }
13684 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13685 v_0 := v.Args[0]
13686 b := v.Block
13687 config := b.Func.Config
13688
13689
13690
13691 for {
13692 c := auxIntToInt32(v.AuxInt)
13693 if v_0.Op != OpAMD64MULQconst {
13694 break
13695 }
13696 d := auxIntToInt32(v_0.AuxInt)
13697 x := v_0.Args[0]
13698 if !(is32Bit(int64(c) * int64(d))) {
13699 break
13700 }
13701 v.reset(OpAMD64MULQconst)
13702 v.AuxInt = int32ToAuxInt(c * d)
13703 v.AddArg(x)
13704 return true
13705 }
13706
13707
13708 for {
13709 if auxIntToInt32(v.AuxInt) != 0 {
13710 break
13711 }
13712 v.reset(OpAMD64MOVQconst)
13713 v.AuxInt = int64ToAuxInt(0)
13714 return true
13715 }
13716
13717
13718 for {
13719 if auxIntToInt32(v.AuxInt) != 1 {
13720 break
13721 }
13722 x := v_0
13723 v.copyOf(x)
13724 return true
13725 }
13726
13727
13728
13729 for {
13730 c := auxIntToInt32(v.AuxInt)
13731 x := v_0
13732 if !(canMulStrengthReduce(config, int64(c))) {
13733 break
13734 }
13735 v.copyOf(mulStrengthReduce(v, x, int64(c)))
13736 return true
13737 }
13738
13739
13740 for {
13741 c := auxIntToInt32(v.AuxInt)
13742 if v_0.Op != OpAMD64MOVQconst {
13743 break
13744 }
13745 d := auxIntToInt64(v_0.AuxInt)
13746 v.reset(OpAMD64MOVQconst)
13747 v.AuxInt = int64ToAuxInt(int64(c) * d)
13748 return true
13749 }
13750
13751
13752
13753 for {
13754 c := auxIntToInt32(v.AuxInt)
13755 if v_0.Op != OpAMD64NEGQ {
13756 break
13757 }
13758 x := v_0.Args[0]
13759 if !(c != -(1 << 31)) {
13760 break
13761 }
13762 v.reset(OpAMD64MULQconst)
13763 v.AuxInt = int32ToAuxInt(-c)
13764 v.AddArg(x)
13765 return true
13766 }
13767 return false
13768 }
13769 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
13770 v_1 := v.Args[1]
13771 v_0 := v.Args[0]
13772
13773
13774
13775 for {
13776 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13777 x := v_0
13778 l := v_1
13779 if l.Op != OpAMD64MOVSDload {
13780 continue
13781 }
13782 off := auxIntToInt32(l.AuxInt)
13783 sym := auxToSym(l.Aux)
13784 mem := l.Args[1]
13785 ptr := l.Args[0]
13786 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13787 continue
13788 }
13789 v.reset(OpAMD64MULSDload)
13790 v.AuxInt = int32ToAuxInt(off)
13791 v.Aux = symToAux(sym)
13792 v.AddArg3(x, ptr, mem)
13793 return true
13794 }
13795 break
13796 }
13797 return false
13798 }
13799 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
13800 v_2 := v.Args[2]
13801 v_1 := v.Args[1]
13802 v_0 := v.Args[0]
13803 b := v.Block
13804 typ := &b.Func.Config.Types
13805
13806
13807
13808 for {
13809 off1 := auxIntToInt32(v.AuxInt)
13810 sym := auxToSym(v.Aux)
13811 val := v_0
13812 if v_1.Op != OpAMD64ADDQconst {
13813 break
13814 }
13815 off2 := auxIntToInt32(v_1.AuxInt)
13816 base := v_1.Args[0]
13817 mem := v_2
13818 if !(is32Bit(int64(off1) + int64(off2))) {
13819 break
13820 }
13821 v.reset(OpAMD64MULSDload)
13822 v.AuxInt = int32ToAuxInt(off1 + off2)
13823 v.Aux = symToAux(sym)
13824 v.AddArg3(val, base, mem)
13825 return true
13826 }
13827
13828
13829
13830 for {
13831 off1 := auxIntToInt32(v.AuxInt)
13832 sym1 := auxToSym(v.Aux)
13833 val := v_0
13834 if v_1.Op != OpAMD64LEAQ {
13835 break
13836 }
13837 off2 := auxIntToInt32(v_1.AuxInt)
13838 sym2 := auxToSym(v_1.Aux)
13839 base := v_1.Args[0]
13840 mem := v_2
13841 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13842 break
13843 }
13844 v.reset(OpAMD64MULSDload)
13845 v.AuxInt = int32ToAuxInt(off1 + off2)
13846 v.Aux = symToAux(mergeSym(sym1, sym2))
13847 v.AddArg3(val, base, mem)
13848 return true
13849 }
13850
13851
13852 for {
13853 off := auxIntToInt32(v.AuxInt)
13854 sym := auxToSym(v.Aux)
13855 x := v_0
13856 ptr := v_1
13857 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
13858 break
13859 }
13860 y := v_2.Args[1]
13861 if ptr != v_2.Args[0] {
13862 break
13863 }
13864 v.reset(OpAMD64MULSD)
13865 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
13866 v0.AddArg(y)
13867 v.AddArg2(x, v0)
13868 return true
13869 }
13870 return false
13871 }
13872 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
13873 v_1 := v.Args[1]
13874 v_0 := v.Args[0]
13875
13876
13877
13878 for {
13879 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13880 x := v_0
13881 l := v_1
13882 if l.Op != OpAMD64MOVSSload {
13883 continue
13884 }
13885 off := auxIntToInt32(l.AuxInt)
13886 sym := auxToSym(l.Aux)
13887 mem := l.Args[1]
13888 ptr := l.Args[0]
13889 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13890 continue
13891 }
13892 v.reset(OpAMD64MULSSload)
13893 v.AuxInt = int32ToAuxInt(off)
13894 v.Aux = symToAux(sym)
13895 v.AddArg3(x, ptr, mem)
13896 return true
13897 }
13898 break
13899 }
13900 return false
13901 }
13902 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
13903 v_2 := v.Args[2]
13904 v_1 := v.Args[1]
13905 v_0 := v.Args[0]
13906 b := v.Block
13907 typ := &b.Func.Config.Types
13908
13909
13910
13911 for {
13912 off1 := auxIntToInt32(v.AuxInt)
13913 sym := auxToSym(v.Aux)
13914 val := v_0
13915 if v_1.Op != OpAMD64ADDQconst {
13916 break
13917 }
13918 off2 := auxIntToInt32(v_1.AuxInt)
13919 base := v_1.Args[0]
13920 mem := v_2
13921 if !(is32Bit(int64(off1) + int64(off2))) {
13922 break
13923 }
13924 v.reset(OpAMD64MULSSload)
13925 v.AuxInt = int32ToAuxInt(off1 + off2)
13926 v.Aux = symToAux(sym)
13927 v.AddArg3(val, base, mem)
13928 return true
13929 }
13930
13931
13932
13933 for {
13934 off1 := auxIntToInt32(v.AuxInt)
13935 sym1 := auxToSym(v.Aux)
13936 val := v_0
13937 if v_1.Op != OpAMD64LEAQ {
13938 break
13939 }
13940 off2 := auxIntToInt32(v_1.AuxInt)
13941 sym2 := auxToSym(v_1.Aux)
13942 base := v_1.Args[0]
13943 mem := v_2
13944 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13945 break
13946 }
13947 v.reset(OpAMD64MULSSload)
13948 v.AuxInt = int32ToAuxInt(off1 + off2)
13949 v.Aux = symToAux(mergeSym(sym1, sym2))
13950 v.AddArg3(val, base, mem)
13951 return true
13952 }
13953
13954
13955 for {
13956 off := auxIntToInt32(v.AuxInt)
13957 sym := auxToSym(v.Aux)
13958 x := v_0
13959 ptr := v_1
13960 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
13961 break
13962 }
13963 y := v_2.Args[1]
13964 if ptr != v_2.Args[0] {
13965 break
13966 }
13967 v.reset(OpAMD64MULSS)
13968 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
13969 v0.AddArg(y)
13970 v.AddArg2(x, v0)
13971 return true
13972 }
13973 return false
13974 }
13975 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
13976 v_0 := v.Args[0]
13977
13978
13979 for {
13980 if v_0.Op != OpAMD64NEGL {
13981 break
13982 }
13983 x := v_0.Args[0]
13984 v.copyOf(x)
13985 return true
13986 }
13987
13988
13989
13990 for {
13991 s := v_0
13992 if s.Op != OpAMD64SUBL {
13993 break
13994 }
13995 y := s.Args[1]
13996 x := s.Args[0]
13997 if !(s.Uses == 1) {
13998 break
13999 }
14000 v.reset(OpAMD64SUBL)
14001 v.AddArg2(y, x)
14002 return true
14003 }
14004
14005
14006 for {
14007 if v_0.Op != OpAMD64MOVLconst {
14008 break
14009 }
14010 c := auxIntToInt32(v_0.AuxInt)
14011 v.reset(OpAMD64MOVLconst)
14012 v.AuxInt = int32ToAuxInt(-c)
14013 return true
14014 }
14015 return false
14016 }
14017 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
14018 v_0 := v.Args[0]
14019
14020
14021 for {
14022 if v_0.Op != OpAMD64NEGQ {
14023 break
14024 }
14025 x := v_0.Args[0]
14026 v.copyOf(x)
14027 return true
14028 }
14029
14030
14031
14032 for {
14033 s := v_0
14034 if s.Op != OpAMD64SUBQ {
14035 break
14036 }
14037 y := s.Args[1]
14038 x := s.Args[0]
14039 if !(s.Uses == 1) {
14040 break
14041 }
14042 v.reset(OpAMD64SUBQ)
14043 v.AddArg2(y, x)
14044 return true
14045 }
14046
14047
14048 for {
14049 if v_0.Op != OpAMD64MOVQconst {
14050 break
14051 }
14052 c := auxIntToInt64(v_0.AuxInt)
14053 v.reset(OpAMD64MOVQconst)
14054 v.AuxInt = int64ToAuxInt(-c)
14055 return true
14056 }
14057
14058
14059
14060 for {
14061 if v_0.Op != OpAMD64ADDQconst {
14062 break
14063 }
14064 c := auxIntToInt32(v_0.AuxInt)
14065 v_0_0 := v_0.Args[0]
14066 if v_0_0.Op != OpAMD64NEGQ {
14067 break
14068 }
14069 x := v_0_0.Args[0]
14070 if !(c != -(1 << 31)) {
14071 break
14072 }
14073 v.reset(OpAMD64ADDQconst)
14074 v.AuxInt = int32ToAuxInt(-c)
14075 v.AddArg(x)
14076 return true
14077 }
14078 return false
14079 }
14080 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
14081 v_0 := v.Args[0]
14082
14083
14084 for {
14085 if v_0.Op != OpAMD64MOVLconst {
14086 break
14087 }
14088 c := auxIntToInt32(v_0.AuxInt)
14089 v.reset(OpAMD64MOVLconst)
14090 v.AuxInt = int32ToAuxInt(^c)
14091 return true
14092 }
14093 return false
14094 }
14095 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14096 v_0 := v.Args[0]
14097
14098
14099 for {
14100 if v_0.Op != OpAMD64MOVQconst {
14101 break
14102 }
14103 c := auxIntToInt64(v_0.AuxInt)
14104 v.reset(OpAMD64MOVQconst)
14105 v.AuxInt = int64ToAuxInt(^c)
14106 return true
14107 }
14108 return false
14109 }
14110 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14111 v_1 := v.Args[1]
14112 v_0 := v.Args[0]
14113
14114
14115 for {
14116 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14117 if v_0.Op != OpAMD64SHLL {
14118 continue
14119 }
14120 y := v_0.Args[1]
14121 v_0_0 := v_0.Args[0]
14122 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14123 continue
14124 }
14125 x := v_1
14126 v.reset(OpAMD64BTSL)
14127 v.AddArg2(x, y)
14128 return true
14129 }
14130 break
14131 }
14132
14133
14134 for {
14135 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14136 x := v_0
14137 if v_1.Op != OpAMD64MOVLconst {
14138 continue
14139 }
14140 c := auxIntToInt32(v_1.AuxInt)
14141 v.reset(OpAMD64ORLconst)
14142 v.AuxInt = int32ToAuxInt(c)
14143 v.AddArg(x)
14144 return true
14145 }
14146 break
14147 }
14148
14149
14150 for {
14151 x := v_0
14152 if x != v_1 {
14153 break
14154 }
14155 v.copyOf(x)
14156 return true
14157 }
14158
14159
14160
14161 for {
14162 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14163 x := v_0
14164 l := v_1
14165 if l.Op != OpAMD64MOVLload {
14166 continue
14167 }
14168 off := auxIntToInt32(l.AuxInt)
14169 sym := auxToSym(l.Aux)
14170 mem := l.Args[1]
14171 ptr := l.Args[0]
14172 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14173 continue
14174 }
14175 v.reset(OpAMD64ORLload)
14176 v.AuxInt = int32ToAuxInt(off)
14177 v.Aux = symToAux(sym)
14178 v.AddArg3(x, ptr, mem)
14179 return true
14180 }
14181 break
14182 }
14183 return false
14184 }
14185 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14186 v_0 := v.Args[0]
14187
14188
14189 for {
14190 c := auxIntToInt32(v.AuxInt)
14191 if v_0.Op != OpAMD64ORLconst {
14192 break
14193 }
14194 d := auxIntToInt32(v_0.AuxInt)
14195 x := v_0.Args[0]
14196 v.reset(OpAMD64ORLconst)
14197 v.AuxInt = int32ToAuxInt(c | d)
14198 v.AddArg(x)
14199 return true
14200 }
14201
14202
14203
14204 for {
14205 c := auxIntToInt32(v.AuxInt)
14206 x := v_0
14207 if !(c == 0) {
14208 break
14209 }
14210 v.copyOf(x)
14211 return true
14212 }
14213
14214
14215
14216 for {
14217 c := auxIntToInt32(v.AuxInt)
14218 if !(c == -1) {
14219 break
14220 }
14221 v.reset(OpAMD64MOVLconst)
14222 v.AuxInt = int32ToAuxInt(-1)
14223 return true
14224 }
14225
14226
14227 for {
14228 c := auxIntToInt32(v.AuxInt)
14229 if v_0.Op != OpAMD64MOVLconst {
14230 break
14231 }
14232 d := auxIntToInt32(v_0.AuxInt)
14233 v.reset(OpAMD64MOVLconst)
14234 v.AuxInt = int32ToAuxInt(c | d)
14235 return true
14236 }
14237 return false
14238 }
14239 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14240 v_1 := v.Args[1]
14241 v_0 := v.Args[0]
14242
14243
14244
14245 for {
14246 valoff1 := auxIntToValAndOff(v.AuxInt)
14247 sym := auxToSym(v.Aux)
14248 if v_0.Op != OpAMD64ADDQconst {
14249 break
14250 }
14251 off2 := auxIntToInt32(v_0.AuxInt)
14252 base := v_0.Args[0]
14253 mem := v_1
14254 if !(ValAndOff(valoff1).canAdd32(off2)) {
14255 break
14256 }
14257 v.reset(OpAMD64ORLconstmodify)
14258 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14259 v.Aux = symToAux(sym)
14260 v.AddArg2(base, mem)
14261 return true
14262 }
14263
14264
14265
14266 for {
14267 valoff1 := auxIntToValAndOff(v.AuxInt)
14268 sym1 := auxToSym(v.Aux)
14269 if v_0.Op != OpAMD64LEAQ {
14270 break
14271 }
14272 off2 := auxIntToInt32(v_0.AuxInt)
14273 sym2 := auxToSym(v_0.Aux)
14274 base := v_0.Args[0]
14275 mem := v_1
14276 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14277 break
14278 }
14279 v.reset(OpAMD64ORLconstmodify)
14280 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14281 v.Aux = symToAux(mergeSym(sym1, sym2))
14282 v.AddArg2(base, mem)
14283 return true
14284 }
14285 return false
14286 }
14287 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14288 v_2 := v.Args[2]
14289 v_1 := v.Args[1]
14290 v_0 := v.Args[0]
14291 b := v.Block
14292 typ := &b.Func.Config.Types
14293
14294
14295
14296 for {
14297 off1 := auxIntToInt32(v.AuxInt)
14298 sym := auxToSym(v.Aux)
14299 val := v_0
14300 if v_1.Op != OpAMD64ADDQconst {
14301 break
14302 }
14303 off2 := auxIntToInt32(v_1.AuxInt)
14304 base := v_1.Args[0]
14305 mem := v_2
14306 if !(is32Bit(int64(off1) + int64(off2))) {
14307 break
14308 }
14309 v.reset(OpAMD64ORLload)
14310 v.AuxInt = int32ToAuxInt(off1 + off2)
14311 v.Aux = symToAux(sym)
14312 v.AddArg3(val, base, mem)
14313 return true
14314 }
14315
14316
14317
14318 for {
14319 off1 := auxIntToInt32(v.AuxInt)
14320 sym1 := auxToSym(v.Aux)
14321 val := v_0
14322 if v_1.Op != OpAMD64LEAQ {
14323 break
14324 }
14325 off2 := auxIntToInt32(v_1.AuxInt)
14326 sym2 := auxToSym(v_1.Aux)
14327 base := v_1.Args[0]
14328 mem := v_2
14329 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14330 break
14331 }
14332 v.reset(OpAMD64ORLload)
14333 v.AuxInt = int32ToAuxInt(off1 + off2)
14334 v.Aux = symToAux(mergeSym(sym1, sym2))
14335 v.AddArg3(val, base, mem)
14336 return true
14337 }
14338
14339
14340 for {
14341 off := auxIntToInt32(v.AuxInt)
14342 sym := auxToSym(v.Aux)
14343 x := v_0
14344 ptr := v_1
14345 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14346 break
14347 }
14348 y := v_2.Args[1]
14349 if ptr != v_2.Args[0] {
14350 break
14351 }
14352 v.reset(OpAMD64ORL)
14353 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14354 v0.AddArg(y)
14355 v.AddArg2(x, v0)
14356 return true
14357 }
14358 return false
14359 }
14360 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14361 v_2 := v.Args[2]
14362 v_1 := v.Args[1]
14363 v_0 := v.Args[0]
14364
14365
14366
14367 for {
14368 off1 := auxIntToInt32(v.AuxInt)
14369 sym := auxToSym(v.Aux)
14370 if v_0.Op != OpAMD64ADDQconst {
14371 break
14372 }
14373 off2 := auxIntToInt32(v_0.AuxInt)
14374 base := v_0.Args[0]
14375 val := v_1
14376 mem := v_2
14377 if !(is32Bit(int64(off1) + int64(off2))) {
14378 break
14379 }
14380 v.reset(OpAMD64ORLmodify)
14381 v.AuxInt = int32ToAuxInt(off1 + off2)
14382 v.Aux = symToAux(sym)
14383 v.AddArg3(base, val, mem)
14384 return true
14385 }
14386
14387
14388
14389 for {
14390 off1 := auxIntToInt32(v.AuxInt)
14391 sym1 := auxToSym(v.Aux)
14392 if v_0.Op != OpAMD64LEAQ {
14393 break
14394 }
14395 off2 := auxIntToInt32(v_0.AuxInt)
14396 sym2 := auxToSym(v_0.Aux)
14397 base := v_0.Args[0]
14398 val := v_1
14399 mem := v_2
14400 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14401 break
14402 }
14403 v.reset(OpAMD64ORLmodify)
14404 v.AuxInt = int32ToAuxInt(off1 + off2)
14405 v.Aux = symToAux(mergeSym(sym1, sym2))
14406 v.AddArg3(base, val, mem)
14407 return true
14408 }
14409 return false
14410 }
14411 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
14412 v_1 := v.Args[1]
14413 v_0 := v.Args[0]
14414
14415
14416 for {
14417 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14418 if v_0.Op != OpAMD64SHLQ {
14419 continue
14420 }
14421 y := v_0.Args[1]
14422 v_0_0 := v_0.Args[0]
14423 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
14424 continue
14425 }
14426 x := v_1
14427 v.reset(OpAMD64BTSQ)
14428 v.AddArg2(x, y)
14429 return true
14430 }
14431 break
14432 }
14433
14434
14435
14436 for {
14437 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14438 if v_0.Op != OpAMD64MOVQconst {
14439 continue
14440 }
14441 c := auxIntToInt64(v_0.AuxInt)
14442 x := v_1
14443 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
14444 continue
14445 }
14446 v.reset(OpAMD64BTSQconst)
14447 v.AuxInt = int8ToAuxInt(int8(log64(c)))
14448 v.AddArg(x)
14449 return true
14450 }
14451 break
14452 }
14453
14454
14455
14456 for {
14457 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14458 x := v_0
14459 if v_1.Op != OpAMD64MOVQconst {
14460 continue
14461 }
14462 c := auxIntToInt64(v_1.AuxInt)
14463 if !(is32Bit(c)) {
14464 continue
14465 }
14466 v.reset(OpAMD64ORQconst)
14467 v.AuxInt = int32ToAuxInt(int32(c))
14468 v.AddArg(x)
14469 return true
14470 }
14471 break
14472 }
14473
14474
14475 for {
14476 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14477 x := v_0
14478 if v_1.Op != OpAMD64MOVLconst {
14479 continue
14480 }
14481 c := auxIntToInt32(v_1.AuxInt)
14482 v.reset(OpAMD64ORQconst)
14483 v.AuxInt = int32ToAuxInt(c)
14484 v.AddArg(x)
14485 return true
14486 }
14487 break
14488 }
14489
14490
14491 for {
14492 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14493 if v_0.Op != OpAMD64SHRQ {
14494 continue
14495 }
14496 bits := v_0.Args[1]
14497 lo := v_0.Args[0]
14498 if v_1.Op != OpAMD64SHLQ {
14499 continue
14500 }
14501 _ = v_1.Args[1]
14502 hi := v_1.Args[0]
14503 v_1_1 := v_1.Args[1]
14504 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14505 continue
14506 }
14507 v.reset(OpAMD64SHRDQ)
14508 v.AddArg3(lo, hi, bits)
14509 return true
14510 }
14511 break
14512 }
14513
14514
14515 for {
14516 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14517 if v_0.Op != OpAMD64SHLQ {
14518 continue
14519 }
14520 bits := v_0.Args[1]
14521 lo := v_0.Args[0]
14522 if v_1.Op != OpAMD64SHRQ {
14523 continue
14524 }
14525 _ = v_1.Args[1]
14526 hi := v_1.Args[0]
14527 v_1_1 := v_1.Args[1]
14528 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14529 continue
14530 }
14531 v.reset(OpAMD64SHLDQ)
14532 v.AddArg3(lo, hi, bits)
14533 return true
14534 }
14535 break
14536 }
14537
14538
14539 for {
14540 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14541 if v_0.Op != OpAMD64SHRXQ {
14542 continue
14543 }
14544 bits := v_0.Args[1]
14545 lo := v_0.Args[0]
14546 if v_1.Op != OpAMD64SHLXQ {
14547 continue
14548 }
14549 _ = v_1.Args[1]
14550 hi := v_1.Args[0]
14551 v_1_1 := v_1.Args[1]
14552 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14553 continue
14554 }
14555 v.reset(OpAMD64SHRDQ)
14556 v.AddArg3(lo, hi, bits)
14557 return true
14558 }
14559 break
14560 }
14561
14562
14563 for {
14564 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14565 if v_0.Op != OpAMD64SHLXQ {
14566 continue
14567 }
14568 bits := v_0.Args[1]
14569 lo := v_0.Args[0]
14570 if v_1.Op != OpAMD64SHRXQ {
14571 continue
14572 }
14573 _ = v_1.Args[1]
14574 hi := v_1.Args[0]
14575 v_1_1 := v_1.Args[1]
14576 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14577 continue
14578 }
14579 v.reset(OpAMD64SHLDQ)
14580 v.AddArg3(lo, hi, bits)
14581 return true
14582 }
14583 break
14584 }
14585
14586
14587 for {
14588 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14589 if v_0.Op != OpAMD64MOVQconst {
14590 continue
14591 }
14592 c := auxIntToInt64(v_0.AuxInt)
14593 if v_1.Op != OpAMD64MOVQconst {
14594 continue
14595 }
14596 d := auxIntToInt64(v_1.AuxInt)
14597 v.reset(OpAMD64MOVQconst)
14598 v.AuxInt = int64ToAuxInt(c | d)
14599 return true
14600 }
14601 break
14602 }
14603
14604
14605 for {
14606 x := v_0
14607 if x != v_1 {
14608 break
14609 }
14610 v.copyOf(x)
14611 return true
14612 }
14613
14614
14615
14616 for {
14617 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14618 x := v_0
14619 l := v_1
14620 if l.Op != OpAMD64MOVQload {
14621 continue
14622 }
14623 off := auxIntToInt32(l.AuxInt)
14624 sym := auxToSym(l.Aux)
14625 mem := l.Args[1]
14626 ptr := l.Args[0]
14627 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14628 continue
14629 }
14630 v.reset(OpAMD64ORQload)
14631 v.AuxInt = int32ToAuxInt(off)
14632 v.Aux = symToAux(sym)
14633 v.AddArg3(x, ptr, mem)
14634 return true
14635 }
14636 break
14637 }
14638 return false
14639 }
14640 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
14641 v_0 := v.Args[0]
14642
14643
14644 for {
14645 c := auxIntToInt32(v.AuxInt)
14646 if v_0.Op != OpAMD64ORQconst {
14647 break
14648 }
14649 d := auxIntToInt32(v_0.AuxInt)
14650 x := v_0.Args[0]
14651 v.reset(OpAMD64ORQconst)
14652 v.AuxInt = int32ToAuxInt(c | d)
14653 v.AddArg(x)
14654 return true
14655 }
14656
14657
14658 for {
14659 if auxIntToInt32(v.AuxInt) != 0 {
14660 break
14661 }
14662 x := v_0
14663 v.copyOf(x)
14664 return true
14665 }
14666
14667
14668 for {
14669 if auxIntToInt32(v.AuxInt) != -1 {
14670 break
14671 }
14672 v.reset(OpAMD64MOVQconst)
14673 v.AuxInt = int64ToAuxInt(-1)
14674 return true
14675 }
14676
14677
14678 for {
14679 c := auxIntToInt32(v.AuxInt)
14680 if v_0.Op != OpAMD64MOVQconst {
14681 break
14682 }
14683 d := auxIntToInt64(v_0.AuxInt)
14684 v.reset(OpAMD64MOVQconst)
14685 v.AuxInt = int64ToAuxInt(int64(c) | d)
14686 return true
14687 }
14688 return false
14689 }
14690 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
14691 v_1 := v.Args[1]
14692 v_0 := v.Args[0]
14693
14694
14695
14696 for {
14697 valoff1 := auxIntToValAndOff(v.AuxInt)
14698 sym := auxToSym(v.Aux)
14699 if v_0.Op != OpAMD64ADDQconst {
14700 break
14701 }
14702 off2 := auxIntToInt32(v_0.AuxInt)
14703 base := v_0.Args[0]
14704 mem := v_1
14705 if !(ValAndOff(valoff1).canAdd32(off2)) {
14706 break
14707 }
14708 v.reset(OpAMD64ORQconstmodify)
14709 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14710 v.Aux = symToAux(sym)
14711 v.AddArg2(base, mem)
14712 return true
14713 }
14714
14715
14716
14717 for {
14718 valoff1 := auxIntToValAndOff(v.AuxInt)
14719 sym1 := auxToSym(v.Aux)
14720 if v_0.Op != OpAMD64LEAQ {
14721 break
14722 }
14723 off2 := auxIntToInt32(v_0.AuxInt)
14724 sym2 := auxToSym(v_0.Aux)
14725 base := v_0.Args[0]
14726 mem := v_1
14727 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14728 break
14729 }
14730 v.reset(OpAMD64ORQconstmodify)
14731 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14732 v.Aux = symToAux(mergeSym(sym1, sym2))
14733 v.AddArg2(base, mem)
14734 return true
14735 }
14736 return false
14737 }
14738 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
14739 v_2 := v.Args[2]
14740 v_1 := v.Args[1]
14741 v_0 := v.Args[0]
14742 b := v.Block
14743 typ := &b.Func.Config.Types
14744
14745
14746
14747 for {
14748 off1 := auxIntToInt32(v.AuxInt)
14749 sym := auxToSym(v.Aux)
14750 val := v_0
14751 if v_1.Op != OpAMD64ADDQconst {
14752 break
14753 }
14754 off2 := auxIntToInt32(v_1.AuxInt)
14755 base := v_1.Args[0]
14756 mem := v_2
14757 if !(is32Bit(int64(off1) + int64(off2))) {
14758 break
14759 }
14760 v.reset(OpAMD64ORQload)
14761 v.AuxInt = int32ToAuxInt(off1 + off2)
14762 v.Aux = symToAux(sym)
14763 v.AddArg3(val, base, mem)
14764 return true
14765 }
14766
14767
14768
14769 for {
14770 off1 := auxIntToInt32(v.AuxInt)
14771 sym1 := auxToSym(v.Aux)
14772 val := v_0
14773 if v_1.Op != OpAMD64LEAQ {
14774 break
14775 }
14776 off2 := auxIntToInt32(v_1.AuxInt)
14777 sym2 := auxToSym(v_1.Aux)
14778 base := v_1.Args[0]
14779 mem := v_2
14780 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14781 break
14782 }
14783 v.reset(OpAMD64ORQload)
14784 v.AuxInt = int32ToAuxInt(off1 + off2)
14785 v.Aux = symToAux(mergeSym(sym1, sym2))
14786 v.AddArg3(val, base, mem)
14787 return true
14788 }
14789
14790
14791 for {
14792 off := auxIntToInt32(v.AuxInt)
14793 sym := auxToSym(v.Aux)
14794 x := v_0
14795 ptr := v_1
14796 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14797 break
14798 }
14799 y := v_2.Args[1]
14800 if ptr != v_2.Args[0] {
14801 break
14802 }
14803 v.reset(OpAMD64ORQ)
14804 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
14805 v0.AddArg(y)
14806 v.AddArg2(x, v0)
14807 return true
14808 }
14809 return false
14810 }
14811 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
14812 v_2 := v.Args[2]
14813 v_1 := v.Args[1]
14814 v_0 := v.Args[0]
14815
14816
14817
14818 for {
14819 off1 := auxIntToInt32(v.AuxInt)
14820 sym := auxToSym(v.Aux)
14821 if v_0.Op != OpAMD64ADDQconst {
14822 break
14823 }
14824 off2 := auxIntToInt32(v_0.AuxInt)
14825 base := v_0.Args[0]
14826 val := v_1
14827 mem := v_2
14828 if !(is32Bit(int64(off1) + int64(off2))) {
14829 break
14830 }
14831 v.reset(OpAMD64ORQmodify)
14832 v.AuxInt = int32ToAuxInt(off1 + off2)
14833 v.Aux = symToAux(sym)
14834 v.AddArg3(base, val, mem)
14835 return true
14836 }
14837
14838
14839
14840 for {
14841 off1 := auxIntToInt32(v.AuxInt)
14842 sym1 := auxToSym(v.Aux)
14843 if v_0.Op != OpAMD64LEAQ {
14844 break
14845 }
14846 off2 := auxIntToInt32(v_0.AuxInt)
14847 sym2 := auxToSym(v_0.Aux)
14848 base := v_0.Args[0]
14849 val := v_1
14850 mem := v_2
14851 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14852 break
14853 }
14854 v.reset(OpAMD64ORQmodify)
14855 v.AuxInt = int32ToAuxInt(off1 + off2)
14856 v.Aux = symToAux(mergeSym(sym1, sym2))
14857 v.AddArg3(base, val, mem)
14858 return true
14859 }
14860 return false
14861 }
14862 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
14863 v_1 := v.Args[1]
14864 v_0 := v.Args[0]
14865
14866
14867 for {
14868 x := v_0
14869 if v_1.Op != OpAMD64NEGQ {
14870 break
14871 }
14872 y := v_1.Args[0]
14873 v.reset(OpAMD64RORB)
14874 v.AddArg2(x, y)
14875 return true
14876 }
14877
14878
14879 for {
14880 x := v_0
14881 if v_1.Op != OpAMD64NEGL {
14882 break
14883 }
14884 y := v_1.Args[0]
14885 v.reset(OpAMD64RORB)
14886 v.AddArg2(x, y)
14887 return true
14888 }
14889
14890
14891 for {
14892 x := v_0
14893 if v_1.Op != OpAMD64MOVQconst {
14894 break
14895 }
14896 c := auxIntToInt64(v_1.AuxInt)
14897 v.reset(OpAMD64ROLBconst)
14898 v.AuxInt = int8ToAuxInt(int8(c & 7))
14899 v.AddArg(x)
14900 return true
14901 }
14902
14903
14904 for {
14905 x := v_0
14906 if v_1.Op != OpAMD64MOVLconst {
14907 break
14908 }
14909 c := auxIntToInt32(v_1.AuxInt)
14910 v.reset(OpAMD64ROLBconst)
14911 v.AuxInt = int8ToAuxInt(int8(c & 7))
14912 v.AddArg(x)
14913 return true
14914 }
14915 return false
14916 }
14917 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
14918 v_0 := v.Args[0]
14919
14920
14921 for {
14922 if auxIntToInt8(v.AuxInt) != 0 {
14923 break
14924 }
14925 x := v_0
14926 v.copyOf(x)
14927 return true
14928 }
14929 return false
14930 }
14931 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
14932 v_1 := v.Args[1]
14933 v_0 := v.Args[0]
14934
14935
14936 for {
14937 x := v_0
14938 if v_1.Op != OpAMD64NEGQ {
14939 break
14940 }
14941 y := v_1.Args[0]
14942 v.reset(OpAMD64RORL)
14943 v.AddArg2(x, y)
14944 return true
14945 }
14946
14947
14948 for {
14949 x := v_0
14950 if v_1.Op != OpAMD64NEGL {
14951 break
14952 }
14953 y := v_1.Args[0]
14954 v.reset(OpAMD64RORL)
14955 v.AddArg2(x, y)
14956 return true
14957 }
14958
14959
14960 for {
14961 x := v_0
14962 if v_1.Op != OpAMD64MOVQconst {
14963 break
14964 }
14965 c := auxIntToInt64(v_1.AuxInt)
14966 v.reset(OpAMD64ROLLconst)
14967 v.AuxInt = int8ToAuxInt(int8(c & 31))
14968 v.AddArg(x)
14969 return true
14970 }
14971
14972
14973 for {
14974 x := v_0
14975 if v_1.Op != OpAMD64MOVLconst {
14976 break
14977 }
14978 c := auxIntToInt32(v_1.AuxInt)
14979 v.reset(OpAMD64ROLLconst)
14980 v.AuxInt = int8ToAuxInt(int8(c & 31))
14981 v.AddArg(x)
14982 return true
14983 }
14984 return false
14985 }
14986 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
14987 v_0 := v.Args[0]
14988
14989
14990 for {
14991 if auxIntToInt8(v.AuxInt) != 0 {
14992 break
14993 }
14994 x := v_0
14995 v.copyOf(x)
14996 return true
14997 }
14998 return false
14999 }
15000 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
15001 v_1 := v.Args[1]
15002 v_0 := v.Args[0]
15003
15004
15005 for {
15006 x := v_0
15007 if v_1.Op != OpAMD64NEGQ {
15008 break
15009 }
15010 y := v_1.Args[0]
15011 v.reset(OpAMD64RORQ)
15012 v.AddArg2(x, y)
15013 return true
15014 }
15015
15016
15017 for {
15018 x := v_0
15019 if v_1.Op != OpAMD64NEGL {
15020 break
15021 }
15022 y := v_1.Args[0]
15023 v.reset(OpAMD64RORQ)
15024 v.AddArg2(x, y)
15025 return true
15026 }
15027
15028
15029 for {
15030 x := v_0
15031 if v_1.Op != OpAMD64MOVQconst {
15032 break
15033 }
15034 c := auxIntToInt64(v_1.AuxInt)
15035 v.reset(OpAMD64ROLQconst)
15036 v.AuxInt = int8ToAuxInt(int8(c & 63))
15037 v.AddArg(x)
15038 return true
15039 }
15040
15041
15042 for {
15043 x := v_0
15044 if v_1.Op != OpAMD64MOVLconst {
15045 break
15046 }
15047 c := auxIntToInt32(v_1.AuxInt)
15048 v.reset(OpAMD64ROLQconst)
15049 v.AuxInt = int8ToAuxInt(int8(c & 63))
15050 v.AddArg(x)
15051 return true
15052 }
15053 return false
15054 }
15055 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
15056 v_0 := v.Args[0]
15057
15058
15059 for {
15060 if auxIntToInt8(v.AuxInt) != 0 {
15061 break
15062 }
15063 x := v_0
15064 v.copyOf(x)
15065 return true
15066 }
15067 return false
15068 }
15069 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
15070 v_1 := v.Args[1]
15071 v_0 := v.Args[0]
15072
15073
15074 for {
15075 x := v_0
15076 if v_1.Op != OpAMD64NEGQ {
15077 break
15078 }
15079 y := v_1.Args[0]
15080 v.reset(OpAMD64RORW)
15081 v.AddArg2(x, y)
15082 return true
15083 }
15084
15085
15086 for {
15087 x := v_0
15088 if v_1.Op != OpAMD64NEGL {
15089 break
15090 }
15091 y := v_1.Args[0]
15092 v.reset(OpAMD64RORW)
15093 v.AddArg2(x, y)
15094 return true
15095 }
15096
15097
15098 for {
15099 x := v_0
15100 if v_1.Op != OpAMD64MOVQconst {
15101 break
15102 }
15103 c := auxIntToInt64(v_1.AuxInt)
15104 v.reset(OpAMD64ROLWconst)
15105 v.AuxInt = int8ToAuxInt(int8(c & 15))
15106 v.AddArg(x)
15107 return true
15108 }
15109
15110
15111 for {
15112 x := v_0
15113 if v_1.Op != OpAMD64MOVLconst {
15114 break
15115 }
15116 c := auxIntToInt32(v_1.AuxInt)
15117 v.reset(OpAMD64ROLWconst)
15118 v.AuxInt = int8ToAuxInt(int8(c & 15))
15119 v.AddArg(x)
15120 return true
15121 }
15122 return false
15123 }
15124 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15125 v_0 := v.Args[0]
15126
15127
15128 for {
15129 if auxIntToInt8(v.AuxInt) != 0 {
15130 break
15131 }
15132 x := v_0
15133 v.copyOf(x)
15134 return true
15135 }
15136 return false
15137 }
15138 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15139 v_1 := v.Args[1]
15140 v_0 := v.Args[0]
15141
15142
15143 for {
15144 x := v_0
15145 if v_1.Op != OpAMD64NEGQ {
15146 break
15147 }
15148 y := v_1.Args[0]
15149 v.reset(OpAMD64ROLB)
15150 v.AddArg2(x, y)
15151 return true
15152 }
15153
15154
15155 for {
15156 x := v_0
15157 if v_1.Op != OpAMD64NEGL {
15158 break
15159 }
15160 y := v_1.Args[0]
15161 v.reset(OpAMD64ROLB)
15162 v.AddArg2(x, y)
15163 return true
15164 }
15165
15166
15167 for {
15168 x := v_0
15169 if v_1.Op != OpAMD64MOVQconst {
15170 break
15171 }
15172 c := auxIntToInt64(v_1.AuxInt)
15173 v.reset(OpAMD64ROLBconst)
15174 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15175 v.AddArg(x)
15176 return true
15177 }
15178
15179
15180 for {
15181 x := v_0
15182 if v_1.Op != OpAMD64MOVLconst {
15183 break
15184 }
15185 c := auxIntToInt32(v_1.AuxInt)
15186 v.reset(OpAMD64ROLBconst)
15187 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15188 v.AddArg(x)
15189 return true
15190 }
15191 return false
15192 }
15193 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15194 v_1 := v.Args[1]
15195 v_0 := v.Args[0]
15196
15197
15198 for {
15199 x := v_0
15200 if v_1.Op != OpAMD64NEGQ {
15201 break
15202 }
15203 y := v_1.Args[0]
15204 v.reset(OpAMD64ROLL)
15205 v.AddArg2(x, y)
15206 return true
15207 }
15208
15209
15210 for {
15211 x := v_0
15212 if v_1.Op != OpAMD64NEGL {
15213 break
15214 }
15215 y := v_1.Args[0]
15216 v.reset(OpAMD64ROLL)
15217 v.AddArg2(x, y)
15218 return true
15219 }
15220
15221
15222 for {
15223 x := v_0
15224 if v_1.Op != OpAMD64MOVQconst {
15225 break
15226 }
15227 c := auxIntToInt64(v_1.AuxInt)
15228 v.reset(OpAMD64ROLLconst)
15229 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15230 v.AddArg(x)
15231 return true
15232 }
15233
15234
15235 for {
15236 x := v_0
15237 if v_1.Op != OpAMD64MOVLconst {
15238 break
15239 }
15240 c := auxIntToInt32(v_1.AuxInt)
15241 v.reset(OpAMD64ROLLconst)
15242 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15243 v.AddArg(x)
15244 return true
15245 }
15246 return false
15247 }
15248 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15249 v_1 := v.Args[1]
15250 v_0 := v.Args[0]
15251
15252
15253 for {
15254 x := v_0
15255 if v_1.Op != OpAMD64NEGQ {
15256 break
15257 }
15258 y := v_1.Args[0]
15259 v.reset(OpAMD64ROLQ)
15260 v.AddArg2(x, y)
15261 return true
15262 }
15263
15264
15265 for {
15266 x := v_0
15267 if v_1.Op != OpAMD64NEGL {
15268 break
15269 }
15270 y := v_1.Args[0]
15271 v.reset(OpAMD64ROLQ)
15272 v.AddArg2(x, y)
15273 return true
15274 }
15275
15276
15277 for {
15278 x := v_0
15279 if v_1.Op != OpAMD64MOVQconst {
15280 break
15281 }
15282 c := auxIntToInt64(v_1.AuxInt)
15283 v.reset(OpAMD64ROLQconst)
15284 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15285 v.AddArg(x)
15286 return true
15287 }
15288
15289
15290 for {
15291 x := v_0
15292 if v_1.Op != OpAMD64MOVLconst {
15293 break
15294 }
15295 c := auxIntToInt32(v_1.AuxInt)
15296 v.reset(OpAMD64ROLQconst)
15297 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15298 v.AddArg(x)
15299 return true
15300 }
15301 return false
15302 }
15303 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15304 v_1 := v.Args[1]
15305 v_0 := v.Args[0]
15306
15307
15308 for {
15309 x := v_0
15310 if v_1.Op != OpAMD64NEGQ {
15311 break
15312 }
15313 y := v_1.Args[0]
15314 v.reset(OpAMD64ROLW)
15315 v.AddArg2(x, y)
15316 return true
15317 }
15318
15319
15320 for {
15321 x := v_0
15322 if v_1.Op != OpAMD64NEGL {
15323 break
15324 }
15325 y := v_1.Args[0]
15326 v.reset(OpAMD64ROLW)
15327 v.AddArg2(x, y)
15328 return true
15329 }
15330
15331
15332 for {
15333 x := v_0
15334 if v_1.Op != OpAMD64MOVQconst {
15335 break
15336 }
15337 c := auxIntToInt64(v_1.AuxInt)
15338 v.reset(OpAMD64ROLWconst)
15339 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15340 v.AddArg(x)
15341 return true
15342 }
15343
15344
15345 for {
15346 x := v_0
15347 if v_1.Op != OpAMD64MOVLconst {
15348 break
15349 }
15350 c := auxIntToInt32(v_1.AuxInt)
15351 v.reset(OpAMD64ROLWconst)
15352 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15353 v.AddArg(x)
15354 return true
15355 }
15356 return false
15357 }
15358 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15359 v_1 := v.Args[1]
15360 v_0 := v.Args[0]
15361
15362
15363 for {
15364 x := v_0
15365 if v_1.Op != OpAMD64MOVQconst {
15366 break
15367 }
15368 c := auxIntToInt64(v_1.AuxInt)
15369 v.reset(OpAMD64SARBconst)
15370 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15371 v.AddArg(x)
15372 return true
15373 }
15374
15375
15376 for {
15377 x := v_0
15378 if v_1.Op != OpAMD64MOVLconst {
15379 break
15380 }
15381 c := auxIntToInt32(v_1.AuxInt)
15382 v.reset(OpAMD64SARBconst)
15383 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15384 v.AddArg(x)
15385 return true
15386 }
15387 return false
15388 }
15389 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15390 v_0 := v.Args[0]
15391
15392
15393 for {
15394 if auxIntToInt8(v.AuxInt) != 0 {
15395 break
15396 }
15397 x := v_0
15398 v.copyOf(x)
15399 return true
15400 }
15401
15402
15403 for {
15404 c := auxIntToInt8(v.AuxInt)
15405 if v_0.Op != OpAMD64MOVQconst {
15406 break
15407 }
15408 d := auxIntToInt64(v_0.AuxInt)
15409 v.reset(OpAMD64MOVQconst)
15410 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
15411 return true
15412 }
15413 return false
15414 }
15415 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
15416 v_1 := v.Args[1]
15417 v_0 := v.Args[0]
15418 b := v.Block
15419
15420
15421 for {
15422 x := v_0
15423 if v_1.Op != OpAMD64MOVQconst {
15424 break
15425 }
15426 c := auxIntToInt64(v_1.AuxInt)
15427 v.reset(OpAMD64SARLconst)
15428 v.AuxInt = int8ToAuxInt(int8(c & 31))
15429 v.AddArg(x)
15430 return true
15431 }
15432
15433
15434 for {
15435 x := v_0
15436 if v_1.Op != OpAMD64MOVLconst {
15437 break
15438 }
15439 c := auxIntToInt32(v_1.AuxInt)
15440 v.reset(OpAMD64SARLconst)
15441 v.AuxInt = int8ToAuxInt(int8(c & 31))
15442 v.AddArg(x)
15443 return true
15444 }
15445
15446
15447
15448 for {
15449 x := v_0
15450 if v_1.Op != OpAMD64ADDQconst {
15451 break
15452 }
15453 c := auxIntToInt32(v_1.AuxInt)
15454 y := v_1.Args[0]
15455 if !(c&31 == 0) {
15456 break
15457 }
15458 v.reset(OpAMD64SARL)
15459 v.AddArg2(x, y)
15460 return true
15461 }
15462
15463
15464
15465 for {
15466 x := v_0
15467 if v_1.Op != OpAMD64NEGQ {
15468 break
15469 }
15470 t := v_1.Type
15471 v_1_0 := v_1.Args[0]
15472 if v_1_0.Op != OpAMD64ADDQconst {
15473 break
15474 }
15475 c := auxIntToInt32(v_1_0.AuxInt)
15476 y := v_1_0.Args[0]
15477 if !(c&31 == 0) {
15478 break
15479 }
15480 v.reset(OpAMD64SARL)
15481 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15482 v0.AddArg(y)
15483 v.AddArg2(x, v0)
15484 return true
15485 }
15486
15487
15488
15489 for {
15490 x := v_0
15491 if v_1.Op != OpAMD64ANDQconst {
15492 break
15493 }
15494 c := auxIntToInt32(v_1.AuxInt)
15495 y := v_1.Args[0]
15496 if !(c&31 == 31) {
15497 break
15498 }
15499 v.reset(OpAMD64SARL)
15500 v.AddArg2(x, y)
15501 return true
15502 }
15503
15504
15505
15506 for {
15507 x := v_0
15508 if v_1.Op != OpAMD64NEGQ {
15509 break
15510 }
15511 t := v_1.Type
15512 v_1_0 := v_1.Args[0]
15513 if v_1_0.Op != OpAMD64ANDQconst {
15514 break
15515 }
15516 c := auxIntToInt32(v_1_0.AuxInt)
15517 y := v_1_0.Args[0]
15518 if !(c&31 == 31) {
15519 break
15520 }
15521 v.reset(OpAMD64SARL)
15522 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15523 v0.AddArg(y)
15524 v.AddArg2(x, v0)
15525 return true
15526 }
15527
15528
15529
15530 for {
15531 x := v_0
15532 if v_1.Op != OpAMD64ADDLconst {
15533 break
15534 }
15535 c := auxIntToInt32(v_1.AuxInt)
15536 y := v_1.Args[0]
15537 if !(c&31 == 0) {
15538 break
15539 }
15540 v.reset(OpAMD64SARL)
15541 v.AddArg2(x, y)
15542 return true
15543 }
15544
15545
15546
15547 for {
15548 x := v_0
15549 if v_1.Op != OpAMD64NEGL {
15550 break
15551 }
15552 t := v_1.Type
15553 v_1_0 := v_1.Args[0]
15554 if v_1_0.Op != OpAMD64ADDLconst {
15555 break
15556 }
15557 c := auxIntToInt32(v_1_0.AuxInt)
15558 y := v_1_0.Args[0]
15559 if !(c&31 == 0) {
15560 break
15561 }
15562 v.reset(OpAMD64SARL)
15563 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15564 v0.AddArg(y)
15565 v.AddArg2(x, v0)
15566 return true
15567 }
15568
15569
15570
15571 for {
15572 x := v_0
15573 if v_1.Op != OpAMD64ANDLconst {
15574 break
15575 }
15576 c := auxIntToInt32(v_1.AuxInt)
15577 y := v_1.Args[0]
15578 if !(c&31 == 31) {
15579 break
15580 }
15581 v.reset(OpAMD64SARL)
15582 v.AddArg2(x, y)
15583 return true
15584 }
15585
15586
15587
15588 for {
15589 x := v_0
15590 if v_1.Op != OpAMD64NEGL {
15591 break
15592 }
15593 t := v_1.Type
15594 v_1_0 := v_1.Args[0]
15595 if v_1_0.Op != OpAMD64ANDLconst {
15596 break
15597 }
15598 c := auxIntToInt32(v_1_0.AuxInt)
15599 y := v_1_0.Args[0]
15600 if !(c&31 == 31) {
15601 break
15602 }
15603 v.reset(OpAMD64SARL)
15604 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15605 v0.AddArg(y)
15606 v.AddArg2(x, v0)
15607 return true
15608 }
15609
15610
15611
15612 for {
15613 l := v_0
15614 if l.Op != OpAMD64MOVLload {
15615 break
15616 }
15617 off := auxIntToInt32(l.AuxInt)
15618 sym := auxToSym(l.Aux)
15619 mem := l.Args[1]
15620 ptr := l.Args[0]
15621 x := v_1
15622 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15623 break
15624 }
15625 v.reset(OpAMD64SARXLload)
15626 v.AuxInt = int32ToAuxInt(off)
15627 v.Aux = symToAux(sym)
15628 v.AddArg3(ptr, x, mem)
15629 return true
15630 }
15631 return false
15632 }
15633 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
15634 v_0 := v.Args[0]
15635
15636
15637 for {
15638 if auxIntToInt8(v.AuxInt) != 0 {
15639 break
15640 }
15641 x := v_0
15642 v.copyOf(x)
15643 return true
15644 }
15645
15646
15647 for {
15648 c := auxIntToInt8(v.AuxInt)
15649 if v_0.Op != OpAMD64MOVQconst {
15650 break
15651 }
15652 d := auxIntToInt64(v_0.AuxInt)
15653 v.reset(OpAMD64MOVQconst)
15654 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
15655 return true
15656 }
15657 return false
15658 }
15659 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
15660 v_1 := v.Args[1]
15661 v_0 := v.Args[0]
15662 b := v.Block
15663
15664
15665 for {
15666 x := v_0
15667 if v_1.Op != OpAMD64MOVQconst {
15668 break
15669 }
15670 c := auxIntToInt64(v_1.AuxInt)
15671 v.reset(OpAMD64SARQconst)
15672 v.AuxInt = int8ToAuxInt(int8(c & 63))
15673 v.AddArg(x)
15674 return true
15675 }
15676
15677
15678 for {
15679 x := v_0
15680 if v_1.Op != OpAMD64MOVLconst {
15681 break
15682 }
15683 c := auxIntToInt32(v_1.AuxInt)
15684 v.reset(OpAMD64SARQconst)
15685 v.AuxInt = int8ToAuxInt(int8(c & 63))
15686 v.AddArg(x)
15687 return true
15688 }
15689
15690
15691
15692 for {
15693 x := v_0
15694 if v_1.Op != OpAMD64ADDQconst {
15695 break
15696 }
15697 c := auxIntToInt32(v_1.AuxInt)
15698 y := v_1.Args[0]
15699 if !(c&63 == 0) {
15700 break
15701 }
15702 v.reset(OpAMD64SARQ)
15703 v.AddArg2(x, y)
15704 return true
15705 }
15706
15707
15708
15709 for {
15710 x := v_0
15711 if v_1.Op != OpAMD64NEGQ {
15712 break
15713 }
15714 t := v_1.Type
15715 v_1_0 := v_1.Args[0]
15716 if v_1_0.Op != OpAMD64ADDQconst {
15717 break
15718 }
15719 c := auxIntToInt32(v_1_0.AuxInt)
15720 y := v_1_0.Args[0]
15721 if !(c&63 == 0) {
15722 break
15723 }
15724 v.reset(OpAMD64SARQ)
15725 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15726 v0.AddArg(y)
15727 v.AddArg2(x, v0)
15728 return true
15729 }
15730
15731
15732
15733 for {
15734 x := v_0
15735 if v_1.Op != OpAMD64ANDQconst {
15736 break
15737 }
15738 c := auxIntToInt32(v_1.AuxInt)
15739 y := v_1.Args[0]
15740 if !(c&63 == 63) {
15741 break
15742 }
15743 v.reset(OpAMD64SARQ)
15744 v.AddArg2(x, y)
15745 return true
15746 }
15747
15748
15749
15750 for {
15751 x := v_0
15752 if v_1.Op != OpAMD64NEGQ {
15753 break
15754 }
15755 t := v_1.Type
15756 v_1_0 := v_1.Args[0]
15757 if v_1_0.Op != OpAMD64ANDQconst {
15758 break
15759 }
15760 c := auxIntToInt32(v_1_0.AuxInt)
15761 y := v_1_0.Args[0]
15762 if !(c&63 == 63) {
15763 break
15764 }
15765 v.reset(OpAMD64SARQ)
15766 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15767 v0.AddArg(y)
15768 v.AddArg2(x, v0)
15769 return true
15770 }
15771
15772
15773
15774 for {
15775 x := v_0
15776 if v_1.Op != OpAMD64ADDLconst {
15777 break
15778 }
15779 c := auxIntToInt32(v_1.AuxInt)
15780 y := v_1.Args[0]
15781 if !(c&63 == 0) {
15782 break
15783 }
15784 v.reset(OpAMD64SARQ)
15785 v.AddArg2(x, y)
15786 return true
15787 }
15788
15789
15790
15791 for {
15792 x := v_0
15793 if v_1.Op != OpAMD64NEGL {
15794 break
15795 }
15796 t := v_1.Type
15797 v_1_0 := v_1.Args[0]
15798 if v_1_0.Op != OpAMD64ADDLconst {
15799 break
15800 }
15801 c := auxIntToInt32(v_1_0.AuxInt)
15802 y := v_1_0.Args[0]
15803 if !(c&63 == 0) {
15804 break
15805 }
15806 v.reset(OpAMD64SARQ)
15807 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15808 v0.AddArg(y)
15809 v.AddArg2(x, v0)
15810 return true
15811 }
15812
15813
15814
15815 for {
15816 x := v_0
15817 if v_1.Op != OpAMD64ANDLconst {
15818 break
15819 }
15820 c := auxIntToInt32(v_1.AuxInt)
15821 y := v_1.Args[0]
15822 if !(c&63 == 63) {
15823 break
15824 }
15825 v.reset(OpAMD64SARQ)
15826 v.AddArg2(x, y)
15827 return true
15828 }
15829
15830
15831
15832 for {
15833 x := v_0
15834 if v_1.Op != OpAMD64NEGL {
15835 break
15836 }
15837 t := v_1.Type
15838 v_1_0 := v_1.Args[0]
15839 if v_1_0.Op != OpAMD64ANDLconst {
15840 break
15841 }
15842 c := auxIntToInt32(v_1_0.AuxInt)
15843 y := v_1_0.Args[0]
15844 if !(c&63 == 63) {
15845 break
15846 }
15847 v.reset(OpAMD64SARQ)
15848 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15849 v0.AddArg(y)
15850 v.AddArg2(x, v0)
15851 return true
15852 }
15853
15854
15855
15856 for {
15857 l := v_0
15858 if l.Op != OpAMD64MOVQload {
15859 break
15860 }
15861 off := auxIntToInt32(l.AuxInt)
15862 sym := auxToSym(l.Aux)
15863 mem := l.Args[1]
15864 ptr := l.Args[0]
15865 x := v_1
15866 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15867 break
15868 }
15869 v.reset(OpAMD64SARXQload)
15870 v.AuxInt = int32ToAuxInt(off)
15871 v.Aux = symToAux(sym)
15872 v.AddArg3(ptr, x, mem)
15873 return true
15874 }
15875 return false
15876 }
15877 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
15878 v_0 := v.Args[0]
15879
15880
15881 for {
15882 if auxIntToInt8(v.AuxInt) != 0 {
15883 break
15884 }
15885 x := v_0
15886 v.copyOf(x)
15887 return true
15888 }
15889
15890
15891 for {
15892 c := auxIntToInt8(v.AuxInt)
15893 if v_0.Op != OpAMD64MOVQconst {
15894 break
15895 }
15896 d := auxIntToInt64(v_0.AuxInt)
15897 v.reset(OpAMD64MOVQconst)
15898 v.AuxInt = int64ToAuxInt(d >> uint64(c))
15899 return true
15900 }
15901 return false
15902 }
15903 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
15904 v_1 := v.Args[1]
15905 v_0 := v.Args[0]
15906
15907
15908 for {
15909 x := v_0
15910 if v_1.Op != OpAMD64MOVQconst {
15911 break
15912 }
15913 c := auxIntToInt64(v_1.AuxInt)
15914 v.reset(OpAMD64SARWconst)
15915 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
15916 v.AddArg(x)
15917 return true
15918 }
15919
15920
15921 for {
15922 x := v_0
15923 if v_1.Op != OpAMD64MOVLconst {
15924 break
15925 }
15926 c := auxIntToInt32(v_1.AuxInt)
15927 v.reset(OpAMD64SARWconst)
15928 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
15929 v.AddArg(x)
15930 return true
15931 }
15932 return false
15933 }
15934 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
15935 v_0 := v.Args[0]
15936
15937
15938 for {
15939 if auxIntToInt8(v.AuxInt) != 0 {
15940 break
15941 }
15942 x := v_0
15943 v.copyOf(x)
15944 return true
15945 }
15946
15947
15948 for {
15949 c := auxIntToInt8(v.AuxInt)
15950 if v_0.Op != OpAMD64MOVQconst {
15951 break
15952 }
15953 d := auxIntToInt64(v_0.AuxInt)
15954 v.reset(OpAMD64MOVQconst)
15955 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
15956 return true
15957 }
15958 return false
15959 }
15960 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
15961 v_2 := v.Args[2]
15962 v_1 := v.Args[1]
15963 v_0 := v.Args[0]
15964 b := v.Block
15965 typ := &b.Func.Config.Types
15966
15967
15968 for {
15969 off := auxIntToInt32(v.AuxInt)
15970 sym := auxToSym(v.Aux)
15971 ptr := v_0
15972 if v_1.Op != OpAMD64MOVLconst {
15973 break
15974 }
15975 c := auxIntToInt32(v_1.AuxInt)
15976 mem := v_2
15977 v.reset(OpAMD64SARLconst)
15978 v.AuxInt = int8ToAuxInt(int8(c & 31))
15979 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
15980 v0.AuxInt = int32ToAuxInt(off)
15981 v0.Aux = symToAux(sym)
15982 v0.AddArg2(ptr, mem)
15983 v.AddArg(v0)
15984 return true
15985 }
15986 return false
15987 }
15988 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
15989 v_2 := v.Args[2]
15990 v_1 := v.Args[1]
15991 v_0 := v.Args[0]
15992 b := v.Block
15993 typ := &b.Func.Config.Types
15994
15995
15996 for {
15997 off := auxIntToInt32(v.AuxInt)
15998 sym := auxToSym(v.Aux)
15999 ptr := v_0
16000 if v_1.Op != OpAMD64MOVQconst {
16001 break
16002 }
16003 c := auxIntToInt64(v_1.AuxInt)
16004 mem := v_2
16005 v.reset(OpAMD64SARQconst)
16006 v.AuxInt = int8ToAuxInt(int8(c & 63))
16007 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16008 v0.AuxInt = int32ToAuxInt(off)
16009 v0.Aux = symToAux(sym)
16010 v0.AddArg2(ptr, mem)
16011 v.AddArg(v0)
16012 return true
16013 }
16014
16015
16016 for {
16017 off := auxIntToInt32(v.AuxInt)
16018 sym := auxToSym(v.Aux)
16019 ptr := v_0
16020 if v_1.Op != OpAMD64MOVLconst {
16021 break
16022 }
16023 c := auxIntToInt32(v_1.AuxInt)
16024 mem := v_2
16025 v.reset(OpAMD64SARQconst)
16026 v.AuxInt = int8ToAuxInt(int8(c & 63))
16027 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16028 v0.AuxInt = int32ToAuxInt(off)
16029 v0.Aux = symToAux(sym)
16030 v0.AddArg2(ptr, mem)
16031 v.AddArg(v0)
16032 return true
16033 }
16034 return false
16035 }
16036 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
16037 v_0 := v.Args[0]
16038
16039
16040 for {
16041 if v_0.Op != OpAMD64FlagEQ {
16042 break
16043 }
16044 v.reset(OpAMD64MOVLconst)
16045 v.AuxInt = int32ToAuxInt(0)
16046 return true
16047 }
16048
16049
16050 for {
16051 if v_0.Op != OpAMD64FlagLT_ULT {
16052 break
16053 }
16054 v.reset(OpAMD64MOVLconst)
16055 v.AuxInt = int32ToAuxInt(-1)
16056 return true
16057 }
16058
16059
16060 for {
16061 if v_0.Op != OpAMD64FlagLT_UGT {
16062 break
16063 }
16064 v.reset(OpAMD64MOVLconst)
16065 v.AuxInt = int32ToAuxInt(0)
16066 return true
16067 }
16068
16069
16070 for {
16071 if v_0.Op != OpAMD64FlagGT_ULT {
16072 break
16073 }
16074 v.reset(OpAMD64MOVLconst)
16075 v.AuxInt = int32ToAuxInt(-1)
16076 return true
16077 }
16078
16079
16080 for {
16081 if v_0.Op != OpAMD64FlagGT_UGT {
16082 break
16083 }
16084 v.reset(OpAMD64MOVLconst)
16085 v.AuxInt = int32ToAuxInt(0)
16086 return true
16087 }
16088 return false
16089 }
16090 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16091 v_2 := v.Args[2]
16092 v_1 := v.Args[1]
16093 v_0 := v.Args[0]
16094
16095
16096
16097 for {
16098 x := v_0
16099 if v_1.Op != OpAMD64MOVQconst {
16100 break
16101 }
16102 c := auxIntToInt64(v_1.AuxInt)
16103 borrow := v_2
16104 if !(is32Bit(c)) {
16105 break
16106 }
16107 v.reset(OpAMD64SBBQconst)
16108 v.AuxInt = int32ToAuxInt(int32(c))
16109 v.AddArg2(x, borrow)
16110 return true
16111 }
16112
16113
16114 for {
16115 x := v_0
16116 y := v_1
16117 if v_2.Op != OpAMD64FlagEQ {
16118 break
16119 }
16120 v.reset(OpAMD64SUBQborrow)
16121 v.AddArg2(x, y)
16122 return true
16123 }
16124 return false
16125 }
16126 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16127 v_0 := v.Args[0]
16128
16129
16130 for {
16131 if v_0.Op != OpAMD64FlagEQ {
16132 break
16133 }
16134 v.reset(OpAMD64MOVQconst)
16135 v.AuxInt = int64ToAuxInt(0)
16136 return true
16137 }
16138
16139
16140 for {
16141 if v_0.Op != OpAMD64FlagLT_ULT {
16142 break
16143 }
16144 v.reset(OpAMD64MOVQconst)
16145 v.AuxInt = int64ToAuxInt(-1)
16146 return true
16147 }
16148
16149
16150 for {
16151 if v_0.Op != OpAMD64FlagLT_UGT {
16152 break
16153 }
16154 v.reset(OpAMD64MOVQconst)
16155 v.AuxInt = int64ToAuxInt(0)
16156 return true
16157 }
16158
16159
16160 for {
16161 if v_0.Op != OpAMD64FlagGT_ULT {
16162 break
16163 }
16164 v.reset(OpAMD64MOVQconst)
16165 v.AuxInt = int64ToAuxInt(-1)
16166 return true
16167 }
16168
16169
16170 for {
16171 if v_0.Op != OpAMD64FlagGT_UGT {
16172 break
16173 }
16174 v.reset(OpAMD64MOVQconst)
16175 v.AuxInt = int64ToAuxInt(0)
16176 return true
16177 }
16178 return false
16179 }
16180 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16181 v_1 := v.Args[1]
16182 v_0 := v.Args[0]
16183
16184
16185 for {
16186 c := auxIntToInt32(v.AuxInt)
16187 x := v_0
16188 if v_1.Op != OpAMD64FlagEQ {
16189 break
16190 }
16191 v.reset(OpAMD64SUBQconstborrow)
16192 v.AuxInt = int32ToAuxInt(c)
16193 v.AddArg(x)
16194 return true
16195 }
16196 return false
16197 }
16198 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16199 v_0 := v.Args[0]
16200
16201
16202 for {
16203 if v_0.Op != OpAMD64InvertFlags {
16204 break
16205 }
16206 x := v_0.Args[0]
16207 v.reset(OpAMD64SETB)
16208 v.AddArg(x)
16209 return true
16210 }
16211
16212
16213 for {
16214 if v_0.Op != OpAMD64FlagEQ {
16215 break
16216 }
16217 v.reset(OpAMD64MOVLconst)
16218 v.AuxInt = int32ToAuxInt(0)
16219 return true
16220 }
16221
16222
16223 for {
16224 if v_0.Op != OpAMD64FlagLT_ULT {
16225 break
16226 }
16227 v.reset(OpAMD64MOVLconst)
16228 v.AuxInt = int32ToAuxInt(0)
16229 return true
16230 }
16231
16232
16233 for {
16234 if v_0.Op != OpAMD64FlagLT_UGT {
16235 break
16236 }
16237 v.reset(OpAMD64MOVLconst)
16238 v.AuxInt = int32ToAuxInt(1)
16239 return true
16240 }
16241
16242
16243 for {
16244 if v_0.Op != OpAMD64FlagGT_ULT {
16245 break
16246 }
16247 v.reset(OpAMD64MOVLconst)
16248 v.AuxInt = int32ToAuxInt(0)
16249 return true
16250 }
16251
16252
16253 for {
16254 if v_0.Op != OpAMD64FlagGT_UGT {
16255 break
16256 }
16257 v.reset(OpAMD64MOVLconst)
16258 v.AuxInt = int32ToAuxInt(1)
16259 return true
16260 }
16261 return false
16262 }
16263 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16264 v_0 := v.Args[0]
16265 b := v.Block
16266 typ := &b.Func.Config.Types
16267
16268
16269 for {
16270 if v_0.Op != OpAMD64TESTQ {
16271 break
16272 }
16273 x := v_0.Args[1]
16274 if x != v_0.Args[0] {
16275 break
16276 }
16277 v.reset(OpConstBool)
16278 v.AuxInt = boolToAuxInt(true)
16279 return true
16280 }
16281
16282
16283 for {
16284 if v_0.Op != OpAMD64TESTL {
16285 break
16286 }
16287 x := v_0.Args[1]
16288 if x != v_0.Args[0] {
16289 break
16290 }
16291 v.reset(OpConstBool)
16292 v.AuxInt = boolToAuxInt(true)
16293 return true
16294 }
16295
16296
16297 for {
16298 if v_0.Op != OpAMD64TESTW {
16299 break
16300 }
16301 x := v_0.Args[1]
16302 if x != v_0.Args[0] {
16303 break
16304 }
16305 v.reset(OpConstBool)
16306 v.AuxInt = boolToAuxInt(true)
16307 return true
16308 }
16309
16310
16311 for {
16312 if v_0.Op != OpAMD64TESTB {
16313 break
16314 }
16315 x := v_0.Args[1]
16316 if x != v_0.Args[0] {
16317 break
16318 }
16319 v.reset(OpConstBool)
16320 v.AuxInt = boolToAuxInt(true)
16321 return true
16322 }
16323
16324
16325 for {
16326 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16327 break
16328 }
16329 x := v_0.Args[0]
16330 v.reset(OpAMD64XORLconst)
16331 v.AuxInt = int32ToAuxInt(1)
16332 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16333 v0.AuxInt = int32ToAuxInt(1)
16334 v0.AddArg(x)
16335 v.AddArg(v0)
16336 return true
16337 }
16338
16339
16340 for {
16341 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16342 break
16343 }
16344 x := v_0.Args[0]
16345 v.reset(OpAMD64XORLconst)
16346 v.AuxInt = int32ToAuxInt(1)
16347 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16348 v0.AuxInt = int32ToAuxInt(1)
16349 v0.AddArg(x)
16350 v.AddArg(v0)
16351 return true
16352 }
16353
16354
16355
16356 for {
16357 c := v_0
16358 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
16359 break
16360 }
16361 x := c.Args[0]
16362 if !(c.Uses == 1) {
16363 break
16364 }
16365 v.reset(OpAMD64SETA)
16366 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
16367 v0.AuxInt = int32ToAuxInt(127)
16368 v0.AddArg(x)
16369 v.AddArg(v0)
16370 return true
16371 }
16372
16373
16374
16375 for {
16376 c := v_0
16377 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
16378 break
16379 }
16380 x := c.Args[0]
16381 if !(c.Uses == 1) {
16382 break
16383 }
16384 v.reset(OpAMD64SETA)
16385 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
16386 v0.AuxInt = int32ToAuxInt(127)
16387 v0.AddArg(x)
16388 v.AddArg(v0)
16389 return true
16390 }
16391
16392
16393 for {
16394 if v_0.Op != OpAMD64InvertFlags {
16395 break
16396 }
16397 x := v_0.Args[0]
16398 v.reset(OpAMD64SETBE)
16399 v.AddArg(x)
16400 return true
16401 }
16402
16403
16404 for {
16405 if v_0.Op != OpAMD64FlagEQ {
16406 break
16407 }
16408 v.reset(OpAMD64MOVLconst)
16409 v.AuxInt = int32ToAuxInt(1)
16410 return true
16411 }
16412
16413
16414 for {
16415 if v_0.Op != OpAMD64FlagLT_ULT {
16416 break
16417 }
16418 v.reset(OpAMD64MOVLconst)
16419 v.AuxInt = int32ToAuxInt(0)
16420 return true
16421 }
16422
16423
16424 for {
16425 if v_0.Op != OpAMD64FlagLT_UGT {
16426 break
16427 }
16428 v.reset(OpAMD64MOVLconst)
16429 v.AuxInt = int32ToAuxInt(1)
16430 return true
16431 }
16432
16433
16434 for {
16435 if v_0.Op != OpAMD64FlagGT_ULT {
16436 break
16437 }
16438 v.reset(OpAMD64MOVLconst)
16439 v.AuxInt = int32ToAuxInt(0)
16440 return true
16441 }
16442
16443
16444 for {
16445 if v_0.Op != OpAMD64FlagGT_UGT {
16446 break
16447 }
16448 v.reset(OpAMD64MOVLconst)
16449 v.AuxInt = int32ToAuxInt(1)
16450 return true
16451 }
16452 return false
16453 }
16454 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
16455 v_2 := v.Args[2]
16456 v_1 := v.Args[1]
16457 v_0 := v.Args[0]
16458 b := v.Block
16459 typ := &b.Func.Config.Types
16460
16461
16462 for {
16463 off := auxIntToInt32(v.AuxInt)
16464 sym := auxToSym(v.Aux)
16465 ptr := v_0
16466 if v_1.Op != OpAMD64InvertFlags {
16467 break
16468 }
16469 x := v_1.Args[0]
16470 mem := v_2
16471 v.reset(OpAMD64SETBEstore)
16472 v.AuxInt = int32ToAuxInt(off)
16473 v.Aux = symToAux(sym)
16474 v.AddArg3(ptr, x, mem)
16475 return true
16476 }
16477
16478
16479
16480 for {
16481 off1 := auxIntToInt32(v.AuxInt)
16482 sym := auxToSym(v.Aux)
16483 if v_0.Op != OpAMD64ADDQconst {
16484 break
16485 }
16486 off2 := auxIntToInt32(v_0.AuxInt)
16487 base := v_0.Args[0]
16488 val := v_1
16489 mem := v_2
16490 if !(is32Bit(int64(off1) + int64(off2))) {
16491 break
16492 }
16493 v.reset(OpAMD64SETAEstore)
16494 v.AuxInt = int32ToAuxInt(off1 + off2)
16495 v.Aux = symToAux(sym)
16496 v.AddArg3(base, val, mem)
16497 return true
16498 }
16499
16500
16501
16502 for {
16503 off1 := auxIntToInt32(v.AuxInt)
16504 sym1 := auxToSym(v.Aux)
16505 if v_0.Op != OpAMD64LEAQ {
16506 break
16507 }
16508 off2 := auxIntToInt32(v_0.AuxInt)
16509 sym2 := auxToSym(v_0.Aux)
16510 base := v_0.Args[0]
16511 val := v_1
16512 mem := v_2
16513 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16514 break
16515 }
16516 v.reset(OpAMD64SETAEstore)
16517 v.AuxInt = int32ToAuxInt(off1 + off2)
16518 v.Aux = symToAux(mergeSym(sym1, sym2))
16519 v.AddArg3(base, val, mem)
16520 return true
16521 }
16522
16523
16524 for {
16525 off := auxIntToInt32(v.AuxInt)
16526 sym := auxToSym(v.Aux)
16527 ptr := v_0
16528 if v_1.Op != OpAMD64FlagEQ {
16529 break
16530 }
16531 mem := v_2
16532 v.reset(OpAMD64MOVBstore)
16533 v.AuxInt = int32ToAuxInt(off)
16534 v.Aux = symToAux(sym)
16535 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16536 v0.AuxInt = int32ToAuxInt(1)
16537 v.AddArg3(ptr, v0, mem)
16538 return true
16539 }
16540
16541
16542 for {
16543 off := auxIntToInt32(v.AuxInt)
16544 sym := auxToSym(v.Aux)
16545 ptr := v_0
16546 if v_1.Op != OpAMD64FlagLT_ULT {
16547 break
16548 }
16549 mem := v_2
16550 v.reset(OpAMD64MOVBstore)
16551 v.AuxInt = int32ToAuxInt(off)
16552 v.Aux = symToAux(sym)
16553 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16554 v0.AuxInt = int32ToAuxInt(0)
16555 v.AddArg3(ptr, v0, mem)
16556 return true
16557 }
16558
16559
16560 for {
16561 off := auxIntToInt32(v.AuxInt)
16562 sym := auxToSym(v.Aux)
16563 ptr := v_0
16564 if v_1.Op != OpAMD64FlagLT_UGT {
16565 break
16566 }
16567 mem := v_2
16568 v.reset(OpAMD64MOVBstore)
16569 v.AuxInt = int32ToAuxInt(off)
16570 v.Aux = symToAux(sym)
16571 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16572 v0.AuxInt = int32ToAuxInt(1)
16573 v.AddArg3(ptr, v0, mem)
16574 return true
16575 }
16576
16577
16578 for {
16579 off := auxIntToInt32(v.AuxInt)
16580 sym := auxToSym(v.Aux)
16581 ptr := v_0
16582 if v_1.Op != OpAMD64FlagGT_ULT {
16583 break
16584 }
16585 mem := v_2
16586 v.reset(OpAMD64MOVBstore)
16587 v.AuxInt = int32ToAuxInt(off)
16588 v.Aux = symToAux(sym)
16589 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16590 v0.AuxInt = int32ToAuxInt(0)
16591 v.AddArg3(ptr, v0, mem)
16592 return true
16593 }
16594
16595
16596 for {
16597 off := auxIntToInt32(v.AuxInt)
16598 sym := auxToSym(v.Aux)
16599 ptr := v_0
16600 if v_1.Op != OpAMD64FlagGT_UGT {
16601 break
16602 }
16603 mem := v_2
16604 v.reset(OpAMD64MOVBstore)
16605 v.AuxInt = int32ToAuxInt(off)
16606 v.Aux = symToAux(sym)
16607 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16608 v0.AuxInt = int32ToAuxInt(1)
16609 v.AddArg3(ptr, v0, mem)
16610 return true
16611 }
16612 return false
16613 }
16614 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
16615 v_2 := v.Args[2]
16616 v_1 := v.Args[1]
16617 v_0 := v.Args[0]
16618 b := v.Block
16619 typ := &b.Func.Config.Types
16620
16621
16622 for {
16623 off := auxIntToInt32(v.AuxInt)
16624 sym := auxToSym(v.Aux)
16625 ptr := v_0
16626 if v_1.Op != OpAMD64InvertFlags {
16627 break
16628 }
16629 x := v_1.Args[0]
16630 mem := v_2
16631 v.reset(OpAMD64SETBstore)
16632 v.AuxInt = int32ToAuxInt(off)
16633 v.Aux = symToAux(sym)
16634 v.AddArg3(ptr, x, mem)
16635 return true
16636 }
16637
16638
16639
16640 for {
16641 off1 := auxIntToInt32(v.AuxInt)
16642 sym := auxToSym(v.Aux)
16643 if v_0.Op != OpAMD64ADDQconst {
16644 break
16645 }
16646 off2 := auxIntToInt32(v_0.AuxInt)
16647 base := v_0.Args[0]
16648 val := v_1
16649 mem := v_2
16650 if !(is32Bit(int64(off1) + int64(off2))) {
16651 break
16652 }
16653 v.reset(OpAMD64SETAstore)
16654 v.AuxInt = int32ToAuxInt(off1 + off2)
16655 v.Aux = symToAux(sym)
16656 v.AddArg3(base, val, mem)
16657 return true
16658 }
16659
16660
16661
16662 for {
16663 off1 := auxIntToInt32(v.AuxInt)
16664 sym1 := auxToSym(v.Aux)
16665 if v_0.Op != OpAMD64LEAQ {
16666 break
16667 }
16668 off2 := auxIntToInt32(v_0.AuxInt)
16669 sym2 := auxToSym(v_0.Aux)
16670 base := v_0.Args[0]
16671 val := v_1
16672 mem := v_2
16673 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16674 break
16675 }
16676 v.reset(OpAMD64SETAstore)
16677 v.AuxInt = int32ToAuxInt(off1 + off2)
16678 v.Aux = symToAux(mergeSym(sym1, sym2))
16679 v.AddArg3(base, val, mem)
16680 return true
16681 }
16682
16683
16684 for {
16685 off := auxIntToInt32(v.AuxInt)
16686 sym := auxToSym(v.Aux)
16687 ptr := v_0
16688 if v_1.Op != OpAMD64FlagEQ {
16689 break
16690 }
16691 mem := v_2
16692 v.reset(OpAMD64MOVBstore)
16693 v.AuxInt = int32ToAuxInt(off)
16694 v.Aux = symToAux(sym)
16695 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16696 v0.AuxInt = int32ToAuxInt(0)
16697 v.AddArg3(ptr, v0, mem)
16698 return true
16699 }
16700
16701
16702 for {
16703 off := auxIntToInt32(v.AuxInt)
16704 sym := auxToSym(v.Aux)
16705 ptr := v_0
16706 if v_1.Op != OpAMD64FlagLT_ULT {
16707 break
16708 }
16709 mem := v_2
16710 v.reset(OpAMD64MOVBstore)
16711 v.AuxInt = int32ToAuxInt(off)
16712 v.Aux = symToAux(sym)
16713 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16714 v0.AuxInt = int32ToAuxInt(0)
16715 v.AddArg3(ptr, v0, mem)
16716 return true
16717 }
16718
16719
16720 for {
16721 off := auxIntToInt32(v.AuxInt)
16722 sym := auxToSym(v.Aux)
16723 ptr := v_0
16724 if v_1.Op != OpAMD64FlagLT_UGT {
16725 break
16726 }
16727 mem := v_2
16728 v.reset(OpAMD64MOVBstore)
16729 v.AuxInt = int32ToAuxInt(off)
16730 v.Aux = symToAux(sym)
16731 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16732 v0.AuxInt = int32ToAuxInt(1)
16733 v.AddArg3(ptr, v0, mem)
16734 return true
16735 }
16736
16737
16738 for {
16739 off := auxIntToInt32(v.AuxInt)
16740 sym := auxToSym(v.Aux)
16741 ptr := v_0
16742 if v_1.Op != OpAMD64FlagGT_ULT {
16743 break
16744 }
16745 mem := v_2
16746 v.reset(OpAMD64MOVBstore)
16747 v.AuxInt = int32ToAuxInt(off)
16748 v.Aux = symToAux(sym)
16749 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16750 v0.AuxInt = int32ToAuxInt(0)
16751 v.AddArg3(ptr, v0, mem)
16752 return true
16753 }
16754
16755
16756 for {
16757 off := auxIntToInt32(v.AuxInt)
16758 sym := auxToSym(v.Aux)
16759 ptr := v_0
16760 if v_1.Op != OpAMD64FlagGT_UGT {
16761 break
16762 }
16763 mem := v_2
16764 v.reset(OpAMD64MOVBstore)
16765 v.AuxInt = int32ToAuxInt(off)
16766 v.Aux = symToAux(sym)
16767 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16768 v0.AuxInt = int32ToAuxInt(1)
16769 v.AddArg3(ptr, v0, mem)
16770 return true
16771 }
16772 return false
16773 }
16774 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
16775 v_0 := v.Args[0]
16776 b := v.Block
16777
16778
16779 for {
16780 if v_0.Op != OpAMD64TESTQ {
16781 break
16782 }
16783 x := v_0.Args[1]
16784 if x != v_0.Args[0] {
16785 break
16786 }
16787 v.reset(OpConstBool)
16788 v.AuxInt = boolToAuxInt(false)
16789 return true
16790 }
16791
16792
16793 for {
16794 if v_0.Op != OpAMD64TESTL {
16795 break
16796 }
16797 x := v_0.Args[1]
16798 if x != v_0.Args[0] {
16799 break
16800 }
16801 v.reset(OpConstBool)
16802 v.AuxInt = boolToAuxInt(false)
16803 return true
16804 }
16805
16806
16807 for {
16808 if v_0.Op != OpAMD64TESTW {
16809 break
16810 }
16811 x := v_0.Args[1]
16812 if x != v_0.Args[0] {
16813 break
16814 }
16815 v.reset(OpConstBool)
16816 v.AuxInt = boolToAuxInt(false)
16817 return true
16818 }
16819
16820
16821 for {
16822 if v_0.Op != OpAMD64TESTB {
16823 break
16824 }
16825 x := v_0.Args[1]
16826 if x != v_0.Args[0] {
16827 break
16828 }
16829 v.reset(OpConstBool)
16830 v.AuxInt = boolToAuxInt(false)
16831 return true
16832 }
16833
16834
16835 for {
16836 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16837 break
16838 }
16839 x := v_0.Args[0]
16840 v.reset(OpAMD64ANDLconst)
16841 v.AuxInt = int32ToAuxInt(1)
16842 v.AddArg(x)
16843 return true
16844 }
16845
16846
16847 for {
16848 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16849 break
16850 }
16851 x := v_0.Args[0]
16852 v.reset(OpAMD64ANDQconst)
16853 v.AuxInt = int32ToAuxInt(1)
16854 v.AddArg(x)
16855 return true
16856 }
16857
16858
16859
16860 for {
16861 c := v_0
16862 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
16863 break
16864 }
16865 x := c.Args[0]
16866 if !(c.Uses == 1) {
16867 break
16868 }
16869 v.reset(OpAMD64SETBE)
16870 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
16871 v0.AuxInt = int32ToAuxInt(127)
16872 v0.AddArg(x)
16873 v.AddArg(v0)
16874 return true
16875 }
16876
16877
16878
16879 for {
16880 c := v_0
16881 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
16882 break
16883 }
16884 x := c.Args[0]
16885 if !(c.Uses == 1) {
16886 break
16887 }
16888 v.reset(OpAMD64SETBE)
16889 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
16890 v0.AuxInt = int32ToAuxInt(127)
16891 v0.AddArg(x)
16892 v.AddArg(v0)
16893 return true
16894 }
16895
16896
16897 for {
16898 if v_0.Op != OpAMD64InvertFlags {
16899 break
16900 }
16901 x := v_0.Args[0]
16902 v.reset(OpAMD64SETA)
16903 v.AddArg(x)
16904 return true
16905 }
16906
16907
16908 for {
16909 if v_0.Op != OpAMD64FlagEQ {
16910 break
16911 }
16912 v.reset(OpAMD64MOVLconst)
16913 v.AuxInt = int32ToAuxInt(0)
16914 return true
16915 }
16916
16917
16918 for {
16919 if v_0.Op != OpAMD64FlagLT_ULT {
16920 break
16921 }
16922 v.reset(OpAMD64MOVLconst)
16923 v.AuxInt = int32ToAuxInt(1)
16924 return true
16925 }
16926
16927
16928 for {
16929 if v_0.Op != OpAMD64FlagLT_UGT {
16930 break
16931 }
16932 v.reset(OpAMD64MOVLconst)
16933 v.AuxInt = int32ToAuxInt(0)
16934 return true
16935 }
16936
16937
16938 for {
16939 if v_0.Op != OpAMD64FlagGT_ULT {
16940 break
16941 }
16942 v.reset(OpAMD64MOVLconst)
16943 v.AuxInt = int32ToAuxInt(1)
16944 return true
16945 }
16946
16947
16948 for {
16949 if v_0.Op != OpAMD64FlagGT_UGT {
16950 break
16951 }
16952 v.reset(OpAMD64MOVLconst)
16953 v.AuxInt = int32ToAuxInt(0)
16954 return true
16955 }
16956 return false
16957 }
16958 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
16959 v_0 := v.Args[0]
16960
16961
16962 for {
16963 if v_0.Op != OpAMD64InvertFlags {
16964 break
16965 }
16966 x := v_0.Args[0]
16967 v.reset(OpAMD64SETAE)
16968 v.AddArg(x)
16969 return true
16970 }
16971
16972
16973 for {
16974 if v_0.Op != OpAMD64FlagEQ {
16975 break
16976 }
16977 v.reset(OpAMD64MOVLconst)
16978 v.AuxInt = int32ToAuxInt(1)
16979 return true
16980 }
16981
16982
16983 for {
16984 if v_0.Op != OpAMD64FlagLT_ULT {
16985 break
16986 }
16987 v.reset(OpAMD64MOVLconst)
16988 v.AuxInt = int32ToAuxInt(1)
16989 return true
16990 }
16991
16992
16993 for {
16994 if v_0.Op != OpAMD64FlagLT_UGT {
16995 break
16996 }
16997 v.reset(OpAMD64MOVLconst)
16998 v.AuxInt = int32ToAuxInt(0)
16999 return true
17000 }
17001
17002
17003 for {
17004 if v_0.Op != OpAMD64FlagGT_ULT {
17005 break
17006 }
17007 v.reset(OpAMD64MOVLconst)
17008 v.AuxInt = int32ToAuxInt(1)
17009 return true
17010 }
17011
17012
17013 for {
17014 if v_0.Op != OpAMD64FlagGT_UGT {
17015 break
17016 }
17017 v.reset(OpAMD64MOVLconst)
17018 v.AuxInt = int32ToAuxInt(0)
17019 return true
17020 }
17021 return false
17022 }
17023 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
17024 v_2 := v.Args[2]
17025 v_1 := v.Args[1]
17026 v_0 := v.Args[0]
17027 b := v.Block
17028 typ := &b.Func.Config.Types
17029
17030
17031 for {
17032 off := auxIntToInt32(v.AuxInt)
17033 sym := auxToSym(v.Aux)
17034 ptr := v_0
17035 if v_1.Op != OpAMD64InvertFlags {
17036 break
17037 }
17038 x := v_1.Args[0]
17039 mem := v_2
17040 v.reset(OpAMD64SETAEstore)
17041 v.AuxInt = int32ToAuxInt(off)
17042 v.Aux = symToAux(sym)
17043 v.AddArg3(ptr, x, mem)
17044 return true
17045 }
17046
17047
17048
17049 for {
17050 off1 := auxIntToInt32(v.AuxInt)
17051 sym := auxToSym(v.Aux)
17052 if v_0.Op != OpAMD64ADDQconst {
17053 break
17054 }
17055 off2 := auxIntToInt32(v_0.AuxInt)
17056 base := v_0.Args[0]
17057 val := v_1
17058 mem := v_2
17059 if !(is32Bit(int64(off1) + int64(off2))) {
17060 break
17061 }
17062 v.reset(OpAMD64SETBEstore)
17063 v.AuxInt = int32ToAuxInt(off1 + off2)
17064 v.Aux = symToAux(sym)
17065 v.AddArg3(base, val, mem)
17066 return true
17067 }
17068
17069
17070
17071 for {
17072 off1 := auxIntToInt32(v.AuxInt)
17073 sym1 := auxToSym(v.Aux)
17074 if v_0.Op != OpAMD64LEAQ {
17075 break
17076 }
17077 off2 := auxIntToInt32(v_0.AuxInt)
17078 sym2 := auxToSym(v_0.Aux)
17079 base := v_0.Args[0]
17080 val := v_1
17081 mem := v_2
17082 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17083 break
17084 }
17085 v.reset(OpAMD64SETBEstore)
17086 v.AuxInt = int32ToAuxInt(off1 + off2)
17087 v.Aux = symToAux(mergeSym(sym1, sym2))
17088 v.AddArg3(base, val, mem)
17089 return true
17090 }
17091
17092
17093 for {
17094 off := auxIntToInt32(v.AuxInt)
17095 sym := auxToSym(v.Aux)
17096 ptr := v_0
17097 if v_1.Op != OpAMD64FlagEQ {
17098 break
17099 }
17100 mem := v_2
17101 v.reset(OpAMD64MOVBstore)
17102 v.AuxInt = int32ToAuxInt(off)
17103 v.Aux = symToAux(sym)
17104 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17105 v0.AuxInt = int32ToAuxInt(1)
17106 v.AddArg3(ptr, v0, mem)
17107 return true
17108 }
17109
17110
17111 for {
17112 off := auxIntToInt32(v.AuxInt)
17113 sym := auxToSym(v.Aux)
17114 ptr := v_0
17115 if v_1.Op != OpAMD64FlagLT_ULT {
17116 break
17117 }
17118 mem := v_2
17119 v.reset(OpAMD64MOVBstore)
17120 v.AuxInt = int32ToAuxInt(off)
17121 v.Aux = symToAux(sym)
17122 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17123 v0.AuxInt = int32ToAuxInt(1)
17124 v.AddArg3(ptr, v0, mem)
17125 return true
17126 }
17127
17128
17129 for {
17130 off := auxIntToInt32(v.AuxInt)
17131 sym := auxToSym(v.Aux)
17132 ptr := v_0
17133 if v_1.Op != OpAMD64FlagLT_UGT {
17134 break
17135 }
17136 mem := v_2
17137 v.reset(OpAMD64MOVBstore)
17138 v.AuxInt = int32ToAuxInt(off)
17139 v.Aux = symToAux(sym)
17140 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17141 v0.AuxInt = int32ToAuxInt(0)
17142 v.AddArg3(ptr, v0, mem)
17143 return true
17144 }
17145
17146
17147 for {
17148 off := auxIntToInt32(v.AuxInt)
17149 sym := auxToSym(v.Aux)
17150 ptr := v_0
17151 if v_1.Op != OpAMD64FlagGT_ULT {
17152 break
17153 }
17154 mem := v_2
17155 v.reset(OpAMD64MOVBstore)
17156 v.AuxInt = int32ToAuxInt(off)
17157 v.Aux = symToAux(sym)
17158 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17159 v0.AuxInt = int32ToAuxInt(1)
17160 v.AddArg3(ptr, v0, mem)
17161 return true
17162 }
17163
17164
17165 for {
17166 off := auxIntToInt32(v.AuxInt)
17167 sym := auxToSym(v.Aux)
17168 ptr := v_0
17169 if v_1.Op != OpAMD64FlagGT_UGT {
17170 break
17171 }
17172 mem := v_2
17173 v.reset(OpAMD64MOVBstore)
17174 v.AuxInt = int32ToAuxInt(off)
17175 v.Aux = symToAux(sym)
17176 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17177 v0.AuxInt = int32ToAuxInt(0)
17178 v.AddArg3(ptr, v0, mem)
17179 return true
17180 }
17181 return false
17182 }
17183 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17184 v_2 := v.Args[2]
17185 v_1 := v.Args[1]
17186 v_0 := v.Args[0]
17187 b := v.Block
17188 typ := &b.Func.Config.Types
17189
17190
17191 for {
17192 off := auxIntToInt32(v.AuxInt)
17193 sym := auxToSym(v.Aux)
17194 ptr := v_0
17195 if v_1.Op != OpAMD64InvertFlags {
17196 break
17197 }
17198 x := v_1.Args[0]
17199 mem := v_2
17200 v.reset(OpAMD64SETAstore)
17201 v.AuxInt = int32ToAuxInt(off)
17202 v.Aux = symToAux(sym)
17203 v.AddArg3(ptr, x, mem)
17204 return true
17205 }
17206
17207
17208
17209 for {
17210 off1 := auxIntToInt32(v.AuxInt)
17211 sym := auxToSym(v.Aux)
17212 if v_0.Op != OpAMD64ADDQconst {
17213 break
17214 }
17215 off2 := auxIntToInt32(v_0.AuxInt)
17216 base := v_0.Args[0]
17217 val := v_1
17218 mem := v_2
17219 if !(is32Bit(int64(off1) + int64(off2))) {
17220 break
17221 }
17222 v.reset(OpAMD64SETBstore)
17223 v.AuxInt = int32ToAuxInt(off1 + off2)
17224 v.Aux = symToAux(sym)
17225 v.AddArg3(base, val, mem)
17226 return true
17227 }
17228
17229
17230
17231 for {
17232 off1 := auxIntToInt32(v.AuxInt)
17233 sym1 := auxToSym(v.Aux)
17234 if v_0.Op != OpAMD64LEAQ {
17235 break
17236 }
17237 off2 := auxIntToInt32(v_0.AuxInt)
17238 sym2 := auxToSym(v_0.Aux)
17239 base := v_0.Args[0]
17240 val := v_1
17241 mem := v_2
17242 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17243 break
17244 }
17245 v.reset(OpAMD64SETBstore)
17246 v.AuxInt = int32ToAuxInt(off1 + off2)
17247 v.Aux = symToAux(mergeSym(sym1, sym2))
17248 v.AddArg3(base, val, mem)
17249 return true
17250 }
17251
17252
17253 for {
17254 off := auxIntToInt32(v.AuxInt)
17255 sym := auxToSym(v.Aux)
17256 ptr := v_0
17257 if v_1.Op != OpAMD64FlagEQ {
17258 break
17259 }
17260 mem := v_2
17261 v.reset(OpAMD64MOVBstore)
17262 v.AuxInt = int32ToAuxInt(off)
17263 v.Aux = symToAux(sym)
17264 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17265 v0.AuxInt = int32ToAuxInt(0)
17266 v.AddArg3(ptr, v0, mem)
17267 return true
17268 }
17269
17270
17271 for {
17272 off := auxIntToInt32(v.AuxInt)
17273 sym := auxToSym(v.Aux)
17274 ptr := v_0
17275 if v_1.Op != OpAMD64FlagLT_ULT {
17276 break
17277 }
17278 mem := v_2
17279 v.reset(OpAMD64MOVBstore)
17280 v.AuxInt = int32ToAuxInt(off)
17281 v.Aux = symToAux(sym)
17282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17283 v0.AuxInt = int32ToAuxInt(1)
17284 v.AddArg3(ptr, v0, mem)
17285 return true
17286 }
17287
17288
17289 for {
17290 off := auxIntToInt32(v.AuxInt)
17291 sym := auxToSym(v.Aux)
17292 ptr := v_0
17293 if v_1.Op != OpAMD64FlagLT_UGT {
17294 break
17295 }
17296 mem := v_2
17297 v.reset(OpAMD64MOVBstore)
17298 v.AuxInt = int32ToAuxInt(off)
17299 v.Aux = symToAux(sym)
17300 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17301 v0.AuxInt = int32ToAuxInt(0)
17302 v.AddArg3(ptr, v0, mem)
17303 return true
17304 }
17305
17306
17307 for {
17308 off := auxIntToInt32(v.AuxInt)
17309 sym := auxToSym(v.Aux)
17310 ptr := v_0
17311 if v_1.Op != OpAMD64FlagGT_ULT {
17312 break
17313 }
17314 mem := v_2
17315 v.reset(OpAMD64MOVBstore)
17316 v.AuxInt = int32ToAuxInt(off)
17317 v.Aux = symToAux(sym)
17318 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17319 v0.AuxInt = int32ToAuxInt(1)
17320 v.AddArg3(ptr, v0, mem)
17321 return true
17322 }
17323
17324
17325 for {
17326 off := auxIntToInt32(v.AuxInt)
17327 sym := auxToSym(v.Aux)
17328 ptr := v_0
17329 if v_1.Op != OpAMD64FlagGT_UGT {
17330 break
17331 }
17332 mem := v_2
17333 v.reset(OpAMD64MOVBstore)
17334 v.AuxInt = int32ToAuxInt(off)
17335 v.Aux = symToAux(sym)
17336 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17337 v0.AuxInt = int32ToAuxInt(0)
17338 v.AddArg3(ptr, v0, mem)
17339 return true
17340 }
17341 return false
17342 }
17343 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17344 v_0 := v.Args[0]
17345 b := v.Block
17346
17347
17348 for {
17349 if v_0.Op != OpAMD64TESTL {
17350 break
17351 }
17352 _ = v_0.Args[1]
17353 v_0_0 := v_0.Args[0]
17354 v_0_1 := v_0.Args[1]
17355 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17356 if v_0_0.Op != OpAMD64SHLL {
17357 continue
17358 }
17359 x := v_0_0.Args[1]
17360 v_0_0_0 := v_0_0.Args[0]
17361 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17362 continue
17363 }
17364 y := v_0_1
17365 v.reset(OpAMD64SETAE)
17366 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17367 v0.AddArg2(x, y)
17368 v.AddArg(v0)
17369 return true
17370 }
17371 break
17372 }
17373
17374
17375 for {
17376 if v_0.Op != OpAMD64TESTQ {
17377 break
17378 }
17379 _ = v_0.Args[1]
17380 v_0_0 := v_0.Args[0]
17381 v_0_1 := v_0.Args[1]
17382 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17383 if v_0_0.Op != OpAMD64SHLQ {
17384 continue
17385 }
17386 x := v_0_0.Args[1]
17387 v_0_0_0 := v_0_0.Args[0]
17388 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17389 continue
17390 }
17391 y := v_0_1
17392 v.reset(OpAMD64SETAE)
17393 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17394 v0.AddArg2(x, y)
17395 v.AddArg(v0)
17396 return true
17397 }
17398 break
17399 }
17400
17401
17402
17403 for {
17404 if v_0.Op != OpAMD64TESTLconst {
17405 break
17406 }
17407 c := auxIntToInt32(v_0.AuxInt)
17408 x := v_0.Args[0]
17409 if !(isUint32PowerOfTwo(int64(c))) {
17410 break
17411 }
17412 v.reset(OpAMD64SETAE)
17413 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17414 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17415 v0.AddArg(x)
17416 v.AddArg(v0)
17417 return true
17418 }
17419
17420
17421
17422 for {
17423 if v_0.Op != OpAMD64TESTQconst {
17424 break
17425 }
17426 c := auxIntToInt32(v_0.AuxInt)
17427 x := v_0.Args[0]
17428 if !(isUint64PowerOfTwo(int64(c))) {
17429 break
17430 }
17431 v.reset(OpAMD64SETAE)
17432 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17433 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17434 v0.AddArg(x)
17435 v.AddArg(v0)
17436 return true
17437 }
17438
17439
17440
17441 for {
17442 if v_0.Op != OpAMD64TESTQ {
17443 break
17444 }
17445 _ = v_0.Args[1]
17446 v_0_0 := v_0.Args[0]
17447 v_0_1 := v_0.Args[1]
17448 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17449 if v_0_0.Op != OpAMD64MOVQconst {
17450 continue
17451 }
17452 c := auxIntToInt64(v_0_0.AuxInt)
17453 x := v_0_1
17454 if !(isUint64PowerOfTwo(c)) {
17455 continue
17456 }
17457 v.reset(OpAMD64SETAE)
17458 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17459 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17460 v0.AddArg(x)
17461 v.AddArg(v0)
17462 return true
17463 }
17464 break
17465 }
17466
17467
17468 for {
17469 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
17470 break
17471 }
17472 s := v_0.Args[0]
17473 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17474 break
17475 }
17476 v.reset(OpAMD64SETNE)
17477 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17478 v0.AuxInt = int32ToAuxInt(0)
17479 v0.AddArg(s)
17480 v.AddArg(v0)
17481 return true
17482 }
17483
17484
17485 for {
17486 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
17487 break
17488 }
17489 s := v_0.Args[0]
17490 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17491 break
17492 }
17493 v.reset(OpAMD64SETNE)
17494 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17495 v0.AuxInt = int32ToAuxInt(0)
17496 v0.AddArg(s)
17497 v.AddArg(v0)
17498 return true
17499 }
17500
17501
17502
17503 for {
17504 if v_0.Op != OpAMD64TESTQ {
17505 break
17506 }
17507 _ = v_0.Args[1]
17508 v_0_0 := v_0.Args[0]
17509 v_0_1 := v_0.Args[1]
17510 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17511 z1 := v_0_0
17512 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17513 continue
17514 }
17515 z1_0 := z1.Args[0]
17516 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17517 continue
17518 }
17519 x := z1_0.Args[0]
17520 z2 := v_0_1
17521 if !(z1 == z2) {
17522 continue
17523 }
17524 v.reset(OpAMD64SETAE)
17525 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17526 v0.AuxInt = int8ToAuxInt(63)
17527 v0.AddArg(x)
17528 v.AddArg(v0)
17529 return true
17530 }
17531 break
17532 }
17533
17534
17535
17536 for {
17537 if v_0.Op != OpAMD64TESTL {
17538 break
17539 }
17540 _ = v_0.Args[1]
17541 v_0_0 := v_0.Args[0]
17542 v_0_1 := v_0.Args[1]
17543 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17544 z1 := v_0_0
17545 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17546 continue
17547 }
17548 z1_0 := z1.Args[0]
17549 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17550 continue
17551 }
17552 x := z1_0.Args[0]
17553 z2 := v_0_1
17554 if !(z1 == z2) {
17555 continue
17556 }
17557 v.reset(OpAMD64SETAE)
17558 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17559 v0.AuxInt = int8ToAuxInt(31)
17560 v0.AddArg(x)
17561 v.AddArg(v0)
17562 return true
17563 }
17564 break
17565 }
17566
17567
17568
17569 for {
17570 if v_0.Op != OpAMD64TESTQ {
17571 break
17572 }
17573 _ = v_0.Args[1]
17574 v_0_0 := v_0.Args[0]
17575 v_0_1 := v_0.Args[1]
17576 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17577 z1 := v_0_0
17578 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17579 continue
17580 }
17581 z1_0 := z1.Args[0]
17582 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17583 continue
17584 }
17585 x := z1_0.Args[0]
17586 z2 := v_0_1
17587 if !(z1 == z2) {
17588 continue
17589 }
17590 v.reset(OpAMD64SETAE)
17591 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17592 v0.AuxInt = int8ToAuxInt(0)
17593 v0.AddArg(x)
17594 v.AddArg(v0)
17595 return true
17596 }
17597 break
17598 }
17599
17600
17601
17602 for {
17603 if v_0.Op != OpAMD64TESTL {
17604 break
17605 }
17606 _ = v_0.Args[1]
17607 v_0_0 := v_0.Args[0]
17608 v_0_1 := v_0.Args[1]
17609 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17610 z1 := v_0_0
17611 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17612 continue
17613 }
17614 z1_0 := z1.Args[0]
17615 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17616 continue
17617 }
17618 x := z1_0.Args[0]
17619 z2 := v_0_1
17620 if !(z1 == z2) {
17621 continue
17622 }
17623 v.reset(OpAMD64SETAE)
17624 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17625 v0.AuxInt = int8ToAuxInt(0)
17626 v0.AddArg(x)
17627 v.AddArg(v0)
17628 return true
17629 }
17630 break
17631 }
17632
17633
17634
17635 for {
17636 if v_0.Op != OpAMD64TESTQ {
17637 break
17638 }
17639 _ = v_0.Args[1]
17640 v_0_0 := v_0.Args[0]
17641 v_0_1 := v_0.Args[1]
17642 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17643 z1 := v_0_0
17644 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17645 continue
17646 }
17647 x := z1.Args[0]
17648 z2 := v_0_1
17649 if !(z1 == z2) {
17650 continue
17651 }
17652 v.reset(OpAMD64SETAE)
17653 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17654 v0.AuxInt = int8ToAuxInt(63)
17655 v0.AddArg(x)
17656 v.AddArg(v0)
17657 return true
17658 }
17659 break
17660 }
17661
17662
17663
17664 for {
17665 if v_0.Op != OpAMD64TESTL {
17666 break
17667 }
17668 _ = v_0.Args[1]
17669 v_0_0 := v_0.Args[0]
17670 v_0_1 := v_0.Args[1]
17671 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17672 z1 := v_0_0
17673 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17674 continue
17675 }
17676 x := z1.Args[0]
17677 z2 := v_0_1
17678 if !(z1 == z2) {
17679 continue
17680 }
17681 v.reset(OpAMD64SETAE)
17682 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17683 v0.AuxInt = int8ToAuxInt(31)
17684 v0.AddArg(x)
17685 v.AddArg(v0)
17686 return true
17687 }
17688 break
17689 }
17690
17691
17692 for {
17693 if v_0.Op != OpAMD64InvertFlags {
17694 break
17695 }
17696 x := v_0.Args[0]
17697 v.reset(OpAMD64SETEQ)
17698 v.AddArg(x)
17699 return true
17700 }
17701
17702
17703 for {
17704 if v_0.Op != OpAMD64FlagEQ {
17705 break
17706 }
17707 v.reset(OpAMD64MOVLconst)
17708 v.AuxInt = int32ToAuxInt(1)
17709 return true
17710 }
17711
17712
17713 for {
17714 if v_0.Op != OpAMD64FlagLT_ULT {
17715 break
17716 }
17717 v.reset(OpAMD64MOVLconst)
17718 v.AuxInt = int32ToAuxInt(0)
17719 return true
17720 }
17721
17722
17723 for {
17724 if v_0.Op != OpAMD64FlagLT_UGT {
17725 break
17726 }
17727 v.reset(OpAMD64MOVLconst)
17728 v.AuxInt = int32ToAuxInt(0)
17729 return true
17730 }
17731
17732
17733 for {
17734 if v_0.Op != OpAMD64FlagGT_ULT {
17735 break
17736 }
17737 v.reset(OpAMD64MOVLconst)
17738 v.AuxInt = int32ToAuxInt(0)
17739 return true
17740 }
17741
17742
17743 for {
17744 if v_0.Op != OpAMD64FlagGT_UGT {
17745 break
17746 }
17747 v.reset(OpAMD64MOVLconst)
17748 v.AuxInt = int32ToAuxInt(0)
17749 return true
17750 }
17751
17752
17753 for {
17754 if v_0.Op != OpAMD64TESTQ {
17755 break
17756 }
17757 _ = v_0.Args[1]
17758 v_0_0 := v_0.Args[0]
17759 v_0_1 := v_0.Args[1]
17760 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17761 s := v_0_0
17762 if s.Op != OpSelect0 {
17763 continue
17764 }
17765 blsr := s.Args[0]
17766 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
17767 continue
17768 }
17769 v.reset(OpAMD64SETEQ)
17770 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17771 v0.AddArg(blsr)
17772 v.AddArg(v0)
17773 return true
17774 }
17775 break
17776 }
17777
17778
17779 for {
17780 if v_0.Op != OpAMD64TESTL {
17781 break
17782 }
17783 _ = v_0.Args[1]
17784 v_0_0 := v_0.Args[0]
17785 v_0_1 := v_0.Args[1]
17786 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17787 s := v_0_0
17788 if s.Op != OpSelect0 {
17789 continue
17790 }
17791 blsr := s.Args[0]
17792 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
17793 continue
17794 }
17795 v.reset(OpAMD64SETEQ)
17796 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17797 v0.AddArg(blsr)
17798 v.AddArg(v0)
17799 return true
17800 }
17801 break
17802 }
17803 return false
17804 }
17805 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
17806 v_2 := v.Args[2]
17807 v_1 := v.Args[1]
17808 v_0 := v.Args[0]
17809 b := v.Block
17810 typ := &b.Func.Config.Types
17811
17812
17813 for {
17814 off := auxIntToInt32(v.AuxInt)
17815 sym := auxToSym(v.Aux)
17816 ptr := v_0
17817 if v_1.Op != OpAMD64TESTL {
17818 break
17819 }
17820 _ = v_1.Args[1]
17821 v_1_0 := v_1.Args[0]
17822 v_1_1 := v_1.Args[1]
17823 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17824 if v_1_0.Op != OpAMD64SHLL {
17825 continue
17826 }
17827 x := v_1_0.Args[1]
17828 v_1_0_0 := v_1_0.Args[0]
17829 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
17830 continue
17831 }
17832 y := v_1_1
17833 mem := v_2
17834 v.reset(OpAMD64SETAEstore)
17835 v.AuxInt = int32ToAuxInt(off)
17836 v.Aux = symToAux(sym)
17837 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17838 v0.AddArg2(x, y)
17839 v.AddArg3(ptr, v0, mem)
17840 return true
17841 }
17842 break
17843 }
17844
17845
17846 for {
17847 off := auxIntToInt32(v.AuxInt)
17848 sym := auxToSym(v.Aux)
17849 ptr := v_0
17850 if v_1.Op != OpAMD64TESTQ {
17851 break
17852 }
17853 _ = v_1.Args[1]
17854 v_1_0 := v_1.Args[0]
17855 v_1_1 := v_1.Args[1]
17856 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17857 if v_1_0.Op != OpAMD64SHLQ {
17858 continue
17859 }
17860 x := v_1_0.Args[1]
17861 v_1_0_0 := v_1_0.Args[0]
17862 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
17863 continue
17864 }
17865 y := v_1_1
17866 mem := v_2
17867 v.reset(OpAMD64SETAEstore)
17868 v.AuxInt = int32ToAuxInt(off)
17869 v.Aux = symToAux(sym)
17870 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17871 v0.AddArg2(x, y)
17872 v.AddArg3(ptr, v0, mem)
17873 return true
17874 }
17875 break
17876 }
17877
17878
17879
17880 for {
17881 off := auxIntToInt32(v.AuxInt)
17882 sym := auxToSym(v.Aux)
17883 ptr := v_0
17884 if v_1.Op != OpAMD64TESTLconst {
17885 break
17886 }
17887 c := auxIntToInt32(v_1.AuxInt)
17888 x := v_1.Args[0]
17889 mem := v_2
17890 if !(isUint32PowerOfTwo(int64(c))) {
17891 break
17892 }
17893 v.reset(OpAMD64SETAEstore)
17894 v.AuxInt = int32ToAuxInt(off)
17895 v.Aux = symToAux(sym)
17896 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17897 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17898 v0.AddArg(x)
17899 v.AddArg3(ptr, v0, mem)
17900 return true
17901 }
17902
17903
17904
17905 for {
17906 off := auxIntToInt32(v.AuxInt)
17907 sym := auxToSym(v.Aux)
17908 ptr := v_0
17909 if v_1.Op != OpAMD64TESTQconst {
17910 break
17911 }
17912 c := auxIntToInt32(v_1.AuxInt)
17913 x := v_1.Args[0]
17914 mem := v_2
17915 if !(isUint64PowerOfTwo(int64(c))) {
17916 break
17917 }
17918 v.reset(OpAMD64SETAEstore)
17919 v.AuxInt = int32ToAuxInt(off)
17920 v.Aux = symToAux(sym)
17921 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17922 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17923 v0.AddArg(x)
17924 v.AddArg3(ptr, v0, mem)
17925 return true
17926 }
17927
17928
17929
17930 for {
17931 off := auxIntToInt32(v.AuxInt)
17932 sym := auxToSym(v.Aux)
17933 ptr := v_0
17934 if v_1.Op != OpAMD64TESTQ {
17935 break
17936 }
17937 _ = v_1.Args[1]
17938 v_1_0 := v_1.Args[0]
17939 v_1_1 := v_1.Args[1]
17940 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17941 if v_1_0.Op != OpAMD64MOVQconst {
17942 continue
17943 }
17944 c := auxIntToInt64(v_1_0.AuxInt)
17945 x := v_1_1
17946 mem := v_2
17947 if !(isUint64PowerOfTwo(c)) {
17948 continue
17949 }
17950 v.reset(OpAMD64SETAEstore)
17951 v.AuxInt = int32ToAuxInt(off)
17952 v.Aux = symToAux(sym)
17953 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17954 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17955 v0.AddArg(x)
17956 v.AddArg3(ptr, v0, mem)
17957 return true
17958 }
17959 break
17960 }
17961
17962
17963 for {
17964 off := auxIntToInt32(v.AuxInt)
17965 sym := auxToSym(v.Aux)
17966 ptr := v_0
17967 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
17968 break
17969 }
17970 s := v_1.Args[0]
17971 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17972 break
17973 }
17974 mem := v_2
17975 v.reset(OpAMD64SETNEstore)
17976 v.AuxInt = int32ToAuxInt(off)
17977 v.Aux = symToAux(sym)
17978 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17979 v0.AuxInt = int32ToAuxInt(0)
17980 v0.AddArg(s)
17981 v.AddArg3(ptr, v0, mem)
17982 return true
17983 }
17984
17985
17986 for {
17987 off := auxIntToInt32(v.AuxInt)
17988 sym := auxToSym(v.Aux)
17989 ptr := v_0
17990 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
17991 break
17992 }
17993 s := v_1.Args[0]
17994 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17995 break
17996 }
17997 mem := v_2
17998 v.reset(OpAMD64SETNEstore)
17999 v.AuxInt = int32ToAuxInt(off)
18000 v.Aux = symToAux(sym)
18001 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18002 v0.AuxInt = int32ToAuxInt(0)
18003 v0.AddArg(s)
18004 v.AddArg3(ptr, v0, mem)
18005 return true
18006 }
18007
18008
18009
18010 for {
18011 off := auxIntToInt32(v.AuxInt)
18012 sym := auxToSym(v.Aux)
18013 ptr := v_0
18014 if v_1.Op != OpAMD64TESTQ {
18015 break
18016 }
18017 _ = v_1.Args[1]
18018 v_1_0 := v_1.Args[0]
18019 v_1_1 := v_1.Args[1]
18020 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18021 z1 := v_1_0
18022 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18023 continue
18024 }
18025 z1_0 := z1.Args[0]
18026 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18027 continue
18028 }
18029 x := z1_0.Args[0]
18030 z2 := v_1_1
18031 mem := v_2
18032 if !(z1 == z2) {
18033 continue
18034 }
18035 v.reset(OpAMD64SETAEstore)
18036 v.AuxInt = int32ToAuxInt(off)
18037 v.Aux = symToAux(sym)
18038 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18039 v0.AuxInt = int8ToAuxInt(63)
18040 v0.AddArg(x)
18041 v.AddArg3(ptr, v0, mem)
18042 return true
18043 }
18044 break
18045 }
18046
18047
18048
18049 for {
18050 off := auxIntToInt32(v.AuxInt)
18051 sym := auxToSym(v.Aux)
18052 ptr := v_0
18053 if v_1.Op != OpAMD64TESTL {
18054 break
18055 }
18056 _ = v_1.Args[1]
18057 v_1_0 := v_1.Args[0]
18058 v_1_1 := v_1.Args[1]
18059 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18060 z1 := v_1_0
18061 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18062 continue
18063 }
18064 z1_0 := z1.Args[0]
18065 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18066 continue
18067 }
18068 x := z1_0.Args[0]
18069 z2 := v_1_1
18070 mem := v_2
18071 if !(z1 == z2) {
18072 continue
18073 }
18074 v.reset(OpAMD64SETAEstore)
18075 v.AuxInt = int32ToAuxInt(off)
18076 v.Aux = symToAux(sym)
18077 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18078 v0.AuxInt = int8ToAuxInt(31)
18079 v0.AddArg(x)
18080 v.AddArg3(ptr, v0, mem)
18081 return true
18082 }
18083 break
18084 }
18085
18086
18087
18088 for {
18089 off := auxIntToInt32(v.AuxInt)
18090 sym := auxToSym(v.Aux)
18091 ptr := v_0
18092 if v_1.Op != OpAMD64TESTQ {
18093 break
18094 }
18095 _ = v_1.Args[1]
18096 v_1_0 := v_1.Args[0]
18097 v_1_1 := v_1.Args[1]
18098 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18099 z1 := v_1_0
18100 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18101 continue
18102 }
18103 z1_0 := z1.Args[0]
18104 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18105 continue
18106 }
18107 x := z1_0.Args[0]
18108 z2 := v_1_1
18109 mem := v_2
18110 if !(z1 == z2) {
18111 continue
18112 }
18113 v.reset(OpAMD64SETAEstore)
18114 v.AuxInt = int32ToAuxInt(off)
18115 v.Aux = symToAux(sym)
18116 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18117 v0.AuxInt = int8ToAuxInt(0)
18118 v0.AddArg(x)
18119 v.AddArg3(ptr, v0, mem)
18120 return true
18121 }
18122 break
18123 }
18124
18125
18126
18127 for {
18128 off := auxIntToInt32(v.AuxInt)
18129 sym := auxToSym(v.Aux)
18130 ptr := v_0
18131 if v_1.Op != OpAMD64TESTL {
18132 break
18133 }
18134 _ = v_1.Args[1]
18135 v_1_0 := v_1.Args[0]
18136 v_1_1 := v_1.Args[1]
18137 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18138 z1 := v_1_0
18139 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18140 continue
18141 }
18142 z1_0 := z1.Args[0]
18143 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18144 continue
18145 }
18146 x := z1_0.Args[0]
18147 z2 := v_1_1
18148 mem := v_2
18149 if !(z1 == z2) {
18150 continue
18151 }
18152 v.reset(OpAMD64SETAEstore)
18153 v.AuxInt = int32ToAuxInt(off)
18154 v.Aux = symToAux(sym)
18155 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18156 v0.AuxInt = int8ToAuxInt(0)
18157 v0.AddArg(x)
18158 v.AddArg3(ptr, v0, mem)
18159 return true
18160 }
18161 break
18162 }
18163
18164
18165
18166 for {
18167 off := auxIntToInt32(v.AuxInt)
18168 sym := auxToSym(v.Aux)
18169 ptr := v_0
18170 if v_1.Op != OpAMD64TESTQ {
18171 break
18172 }
18173 _ = v_1.Args[1]
18174 v_1_0 := v_1.Args[0]
18175 v_1_1 := v_1.Args[1]
18176 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18177 z1 := v_1_0
18178 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18179 continue
18180 }
18181 x := z1.Args[0]
18182 z2 := v_1_1
18183 mem := v_2
18184 if !(z1 == z2) {
18185 continue
18186 }
18187 v.reset(OpAMD64SETAEstore)
18188 v.AuxInt = int32ToAuxInt(off)
18189 v.Aux = symToAux(sym)
18190 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18191 v0.AuxInt = int8ToAuxInt(63)
18192 v0.AddArg(x)
18193 v.AddArg3(ptr, v0, mem)
18194 return true
18195 }
18196 break
18197 }
18198
18199
18200
18201 for {
18202 off := auxIntToInt32(v.AuxInt)
18203 sym := auxToSym(v.Aux)
18204 ptr := v_0
18205 if v_1.Op != OpAMD64TESTL {
18206 break
18207 }
18208 _ = v_1.Args[1]
18209 v_1_0 := v_1.Args[0]
18210 v_1_1 := v_1.Args[1]
18211 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18212 z1 := v_1_0
18213 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18214 continue
18215 }
18216 x := z1.Args[0]
18217 z2 := v_1_1
18218 mem := v_2
18219 if !(z1 == z2) {
18220 continue
18221 }
18222 v.reset(OpAMD64SETAEstore)
18223 v.AuxInt = int32ToAuxInt(off)
18224 v.Aux = symToAux(sym)
18225 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18226 v0.AuxInt = int8ToAuxInt(31)
18227 v0.AddArg(x)
18228 v.AddArg3(ptr, v0, mem)
18229 return true
18230 }
18231 break
18232 }
18233
18234
18235 for {
18236 off := auxIntToInt32(v.AuxInt)
18237 sym := auxToSym(v.Aux)
18238 ptr := v_0
18239 if v_1.Op != OpAMD64InvertFlags {
18240 break
18241 }
18242 x := v_1.Args[0]
18243 mem := v_2
18244 v.reset(OpAMD64SETEQstore)
18245 v.AuxInt = int32ToAuxInt(off)
18246 v.Aux = symToAux(sym)
18247 v.AddArg3(ptr, x, mem)
18248 return true
18249 }
18250
18251
18252
18253 for {
18254 off1 := auxIntToInt32(v.AuxInt)
18255 sym := auxToSym(v.Aux)
18256 if v_0.Op != OpAMD64ADDQconst {
18257 break
18258 }
18259 off2 := auxIntToInt32(v_0.AuxInt)
18260 base := v_0.Args[0]
18261 val := v_1
18262 mem := v_2
18263 if !(is32Bit(int64(off1) + int64(off2))) {
18264 break
18265 }
18266 v.reset(OpAMD64SETEQstore)
18267 v.AuxInt = int32ToAuxInt(off1 + off2)
18268 v.Aux = symToAux(sym)
18269 v.AddArg3(base, val, mem)
18270 return true
18271 }
18272
18273
18274
18275 for {
18276 off1 := auxIntToInt32(v.AuxInt)
18277 sym1 := auxToSym(v.Aux)
18278 if v_0.Op != OpAMD64LEAQ {
18279 break
18280 }
18281 off2 := auxIntToInt32(v_0.AuxInt)
18282 sym2 := auxToSym(v_0.Aux)
18283 base := v_0.Args[0]
18284 val := v_1
18285 mem := v_2
18286 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18287 break
18288 }
18289 v.reset(OpAMD64SETEQstore)
18290 v.AuxInt = int32ToAuxInt(off1 + off2)
18291 v.Aux = symToAux(mergeSym(sym1, sym2))
18292 v.AddArg3(base, val, mem)
18293 return true
18294 }
18295
18296
18297 for {
18298 off := auxIntToInt32(v.AuxInt)
18299 sym := auxToSym(v.Aux)
18300 ptr := v_0
18301 if v_1.Op != OpAMD64FlagEQ {
18302 break
18303 }
18304 mem := v_2
18305 v.reset(OpAMD64MOVBstore)
18306 v.AuxInt = int32ToAuxInt(off)
18307 v.Aux = symToAux(sym)
18308 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18309 v0.AuxInt = int32ToAuxInt(1)
18310 v.AddArg3(ptr, v0, mem)
18311 return true
18312 }
18313
18314
18315 for {
18316 off := auxIntToInt32(v.AuxInt)
18317 sym := auxToSym(v.Aux)
18318 ptr := v_0
18319 if v_1.Op != OpAMD64FlagLT_ULT {
18320 break
18321 }
18322 mem := v_2
18323 v.reset(OpAMD64MOVBstore)
18324 v.AuxInt = int32ToAuxInt(off)
18325 v.Aux = symToAux(sym)
18326 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18327 v0.AuxInt = int32ToAuxInt(0)
18328 v.AddArg3(ptr, v0, mem)
18329 return true
18330 }
18331
18332
18333 for {
18334 off := auxIntToInt32(v.AuxInt)
18335 sym := auxToSym(v.Aux)
18336 ptr := v_0
18337 if v_1.Op != OpAMD64FlagLT_UGT {
18338 break
18339 }
18340 mem := v_2
18341 v.reset(OpAMD64MOVBstore)
18342 v.AuxInt = int32ToAuxInt(off)
18343 v.Aux = symToAux(sym)
18344 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18345 v0.AuxInt = int32ToAuxInt(0)
18346 v.AddArg3(ptr, v0, mem)
18347 return true
18348 }
18349
18350
18351 for {
18352 off := auxIntToInt32(v.AuxInt)
18353 sym := auxToSym(v.Aux)
18354 ptr := v_0
18355 if v_1.Op != OpAMD64FlagGT_ULT {
18356 break
18357 }
18358 mem := v_2
18359 v.reset(OpAMD64MOVBstore)
18360 v.AuxInt = int32ToAuxInt(off)
18361 v.Aux = symToAux(sym)
18362 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18363 v0.AuxInt = int32ToAuxInt(0)
18364 v.AddArg3(ptr, v0, mem)
18365 return true
18366 }
18367
18368
18369 for {
18370 off := auxIntToInt32(v.AuxInt)
18371 sym := auxToSym(v.Aux)
18372 ptr := v_0
18373 if v_1.Op != OpAMD64FlagGT_UGT {
18374 break
18375 }
18376 mem := v_2
18377 v.reset(OpAMD64MOVBstore)
18378 v.AuxInt = int32ToAuxInt(off)
18379 v.Aux = symToAux(sym)
18380 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18381 v0.AuxInt = int32ToAuxInt(0)
18382 v.AddArg3(ptr, v0, mem)
18383 return true
18384 }
18385 return false
18386 }
18387 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18388 v_0 := v.Args[0]
18389
18390
18391 for {
18392 if v_0.Op != OpAMD64InvertFlags {
18393 break
18394 }
18395 x := v_0.Args[0]
18396 v.reset(OpAMD64SETL)
18397 v.AddArg(x)
18398 return true
18399 }
18400
18401
18402 for {
18403 if v_0.Op != OpAMD64FlagEQ {
18404 break
18405 }
18406 v.reset(OpAMD64MOVLconst)
18407 v.AuxInt = int32ToAuxInt(0)
18408 return true
18409 }
18410
18411
18412 for {
18413 if v_0.Op != OpAMD64FlagLT_ULT {
18414 break
18415 }
18416 v.reset(OpAMD64MOVLconst)
18417 v.AuxInt = int32ToAuxInt(0)
18418 return true
18419 }
18420
18421
18422 for {
18423 if v_0.Op != OpAMD64FlagLT_UGT {
18424 break
18425 }
18426 v.reset(OpAMD64MOVLconst)
18427 v.AuxInt = int32ToAuxInt(0)
18428 return true
18429 }
18430
18431
18432 for {
18433 if v_0.Op != OpAMD64FlagGT_ULT {
18434 break
18435 }
18436 v.reset(OpAMD64MOVLconst)
18437 v.AuxInt = int32ToAuxInt(1)
18438 return true
18439 }
18440
18441
18442 for {
18443 if v_0.Op != OpAMD64FlagGT_UGT {
18444 break
18445 }
18446 v.reset(OpAMD64MOVLconst)
18447 v.AuxInt = int32ToAuxInt(1)
18448 return true
18449 }
18450 return false
18451 }
18452 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
18453 v_0 := v.Args[0]
18454 b := v.Block
18455
18456
18457
18458 for {
18459 c := v_0
18460 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
18461 break
18462 }
18463 x := c.Args[0]
18464 if !(c.Uses == 1) {
18465 break
18466 }
18467 v.reset(OpAMD64SETG)
18468 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18469 v0.AuxInt = int32ToAuxInt(127)
18470 v0.AddArg(x)
18471 v.AddArg(v0)
18472 return true
18473 }
18474
18475
18476
18477 for {
18478 c := v_0
18479 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
18480 break
18481 }
18482 x := c.Args[0]
18483 if !(c.Uses == 1) {
18484 break
18485 }
18486 v.reset(OpAMD64SETG)
18487 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18488 v0.AuxInt = int32ToAuxInt(127)
18489 v0.AddArg(x)
18490 v.AddArg(v0)
18491 return true
18492 }
18493
18494
18495 for {
18496 if v_0.Op != OpAMD64InvertFlags {
18497 break
18498 }
18499 x := v_0.Args[0]
18500 v.reset(OpAMD64SETLE)
18501 v.AddArg(x)
18502 return true
18503 }
18504
18505
18506 for {
18507 if v_0.Op != OpAMD64FlagEQ {
18508 break
18509 }
18510 v.reset(OpAMD64MOVLconst)
18511 v.AuxInt = int32ToAuxInt(1)
18512 return true
18513 }
18514
18515
18516 for {
18517 if v_0.Op != OpAMD64FlagLT_ULT {
18518 break
18519 }
18520 v.reset(OpAMD64MOVLconst)
18521 v.AuxInt = int32ToAuxInt(0)
18522 return true
18523 }
18524
18525
18526 for {
18527 if v_0.Op != OpAMD64FlagLT_UGT {
18528 break
18529 }
18530 v.reset(OpAMD64MOVLconst)
18531 v.AuxInt = int32ToAuxInt(0)
18532 return true
18533 }
18534
18535
18536 for {
18537 if v_0.Op != OpAMD64FlagGT_ULT {
18538 break
18539 }
18540 v.reset(OpAMD64MOVLconst)
18541 v.AuxInt = int32ToAuxInt(1)
18542 return true
18543 }
18544
18545
18546 for {
18547 if v_0.Op != OpAMD64FlagGT_UGT {
18548 break
18549 }
18550 v.reset(OpAMD64MOVLconst)
18551 v.AuxInt = int32ToAuxInt(1)
18552 return true
18553 }
18554 return false
18555 }
18556 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
18557 v_2 := v.Args[2]
18558 v_1 := v.Args[1]
18559 v_0 := v.Args[0]
18560 b := v.Block
18561 typ := &b.Func.Config.Types
18562
18563
18564 for {
18565 off := auxIntToInt32(v.AuxInt)
18566 sym := auxToSym(v.Aux)
18567 ptr := v_0
18568 if v_1.Op != OpAMD64InvertFlags {
18569 break
18570 }
18571 x := v_1.Args[0]
18572 mem := v_2
18573 v.reset(OpAMD64SETLEstore)
18574 v.AuxInt = int32ToAuxInt(off)
18575 v.Aux = symToAux(sym)
18576 v.AddArg3(ptr, x, mem)
18577 return true
18578 }
18579
18580
18581
18582 for {
18583 off1 := auxIntToInt32(v.AuxInt)
18584 sym := auxToSym(v.Aux)
18585 if v_0.Op != OpAMD64ADDQconst {
18586 break
18587 }
18588 off2 := auxIntToInt32(v_0.AuxInt)
18589 base := v_0.Args[0]
18590 val := v_1
18591 mem := v_2
18592 if !(is32Bit(int64(off1) + int64(off2))) {
18593 break
18594 }
18595 v.reset(OpAMD64SETGEstore)
18596 v.AuxInt = int32ToAuxInt(off1 + off2)
18597 v.Aux = symToAux(sym)
18598 v.AddArg3(base, val, mem)
18599 return true
18600 }
18601
18602
18603
18604 for {
18605 off1 := auxIntToInt32(v.AuxInt)
18606 sym1 := auxToSym(v.Aux)
18607 if v_0.Op != OpAMD64LEAQ {
18608 break
18609 }
18610 off2 := auxIntToInt32(v_0.AuxInt)
18611 sym2 := auxToSym(v_0.Aux)
18612 base := v_0.Args[0]
18613 val := v_1
18614 mem := v_2
18615 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18616 break
18617 }
18618 v.reset(OpAMD64SETGEstore)
18619 v.AuxInt = int32ToAuxInt(off1 + off2)
18620 v.Aux = symToAux(mergeSym(sym1, sym2))
18621 v.AddArg3(base, val, mem)
18622 return true
18623 }
18624
18625
18626 for {
18627 off := auxIntToInt32(v.AuxInt)
18628 sym := auxToSym(v.Aux)
18629 ptr := v_0
18630 if v_1.Op != OpAMD64FlagEQ {
18631 break
18632 }
18633 mem := v_2
18634 v.reset(OpAMD64MOVBstore)
18635 v.AuxInt = int32ToAuxInt(off)
18636 v.Aux = symToAux(sym)
18637 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18638 v0.AuxInt = int32ToAuxInt(1)
18639 v.AddArg3(ptr, v0, mem)
18640 return true
18641 }
18642
18643
18644 for {
18645 off := auxIntToInt32(v.AuxInt)
18646 sym := auxToSym(v.Aux)
18647 ptr := v_0
18648 if v_1.Op != OpAMD64FlagLT_ULT {
18649 break
18650 }
18651 mem := v_2
18652 v.reset(OpAMD64MOVBstore)
18653 v.AuxInt = int32ToAuxInt(off)
18654 v.Aux = symToAux(sym)
18655 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18656 v0.AuxInt = int32ToAuxInt(0)
18657 v.AddArg3(ptr, v0, mem)
18658 return true
18659 }
18660
18661
18662 for {
18663 off := auxIntToInt32(v.AuxInt)
18664 sym := auxToSym(v.Aux)
18665 ptr := v_0
18666 if v_1.Op != OpAMD64FlagLT_UGT {
18667 break
18668 }
18669 mem := v_2
18670 v.reset(OpAMD64MOVBstore)
18671 v.AuxInt = int32ToAuxInt(off)
18672 v.Aux = symToAux(sym)
18673 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18674 v0.AuxInt = int32ToAuxInt(0)
18675 v.AddArg3(ptr, v0, mem)
18676 return true
18677 }
18678
18679
18680 for {
18681 off := auxIntToInt32(v.AuxInt)
18682 sym := auxToSym(v.Aux)
18683 ptr := v_0
18684 if v_1.Op != OpAMD64FlagGT_ULT {
18685 break
18686 }
18687 mem := v_2
18688 v.reset(OpAMD64MOVBstore)
18689 v.AuxInt = int32ToAuxInt(off)
18690 v.Aux = symToAux(sym)
18691 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18692 v0.AuxInt = int32ToAuxInt(1)
18693 v.AddArg3(ptr, v0, mem)
18694 return true
18695 }
18696
18697
18698 for {
18699 off := auxIntToInt32(v.AuxInt)
18700 sym := auxToSym(v.Aux)
18701 ptr := v_0
18702 if v_1.Op != OpAMD64FlagGT_UGT {
18703 break
18704 }
18705 mem := v_2
18706 v.reset(OpAMD64MOVBstore)
18707 v.AuxInt = int32ToAuxInt(off)
18708 v.Aux = symToAux(sym)
18709 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18710 v0.AuxInt = int32ToAuxInt(1)
18711 v.AddArg3(ptr, v0, mem)
18712 return true
18713 }
18714 return false
18715 }
18716 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
18717 v_2 := v.Args[2]
18718 v_1 := v.Args[1]
18719 v_0 := v.Args[0]
18720 b := v.Block
18721 typ := &b.Func.Config.Types
18722
18723
18724 for {
18725 off := auxIntToInt32(v.AuxInt)
18726 sym := auxToSym(v.Aux)
18727 ptr := v_0
18728 if v_1.Op != OpAMD64InvertFlags {
18729 break
18730 }
18731 x := v_1.Args[0]
18732 mem := v_2
18733 v.reset(OpAMD64SETLstore)
18734 v.AuxInt = int32ToAuxInt(off)
18735 v.Aux = symToAux(sym)
18736 v.AddArg3(ptr, x, mem)
18737 return true
18738 }
18739
18740
18741
18742 for {
18743 off1 := auxIntToInt32(v.AuxInt)
18744 sym := auxToSym(v.Aux)
18745 if v_0.Op != OpAMD64ADDQconst {
18746 break
18747 }
18748 off2 := auxIntToInt32(v_0.AuxInt)
18749 base := v_0.Args[0]
18750 val := v_1
18751 mem := v_2
18752 if !(is32Bit(int64(off1) + int64(off2))) {
18753 break
18754 }
18755 v.reset(OpAMD64SETGstore)
18756 v.AuxInt = int32ToAuxInt(off1 + off2)
18757 v.Aux = symToAux(sym)
18758 v.AddArg3(base, val, mem)
18759 return true
18760 }
18761
18762
18763
18764 for {
18765 off1 := auxIntToInt32(v.AuxInt)
18766 sym1 := auxToSym(v.Aux)
18767 if v_0.Op != OpAMD64LEAQ {
18768 break
18769 }
18770 off2 := auxIntToInt32(v_0.AuxInt)
18771 sym2 := auxToSym(v_0.Aux)
18772 base := v_0.Args[0]
18773 val := v_1
18774 mem := v_2
18775 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18776 break
18777 }
18778 v.reset(OpAMD64SETGstore)
18779 v.AuxInt = int32ToAuxInt(off1 + off2)
18780 v.Aux = symToAux(mergeSym(sym1, sym2))
18781 v.AddArg3(base, val, mem)
18782 return true
18783 }
18784
18785
18786 for {
18787 off := auxIntToInt32(v.AuxInt)
18788 sym := auxToSym(v.Aux)
18789 ptr := v_0
18790 if v_1.Op != OpAMD64FlagEQ {
18791 break
18792 }
18793 mem := v_2
18794 v.reset(OpAMD64MOVBstore)
18795 v.AuxInt = int32ToAuxInt(off)
18796 v.Aux = symToAux(sym)
18797 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18798 v0.AuxInt = int32ToAuxInt(0)
18799 v.AddArg3(ptr, v0, mem)
18800 return true
18801 }
18802
18803
18804 for {
18805 off := auxIntToInt32(v.AuxInt)
18806 sym := auxToSym(v.Aux)
18807 ptr := v_0
18808 if v_1.Op != OpAMD64FlagLT_ULT {
18809 break
18810 }
18811 mem := v_2
18812 v.reset(OpAMD64MOVBstore)
18813 v.AuxInt = int32ToAuxInt(off)
18814 v.Aux = symToAux(sym)
18815 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18816 v0.AuxInt = int32ToAuxInt(0)
18817 v.AddArg3(ptr, v0, mem)
18818 return true
18819 }
18820
18821
18822 for {
18823 off := auxIntToInt32(v.AuxInt)
18824 sym := auxToSym(v.Aux)
18825 ptr := v_0
18826 if v_1.Op != OpAMD64FlagLT_UGT {
18827 break
18828 }
18829 mem := v_2
18830 v.reset(OpAMD64MOVBstore)
18831 v.AuxInt = int32ToAuxInt(off)
18832 v.Aux = symToAux(sym)
18833 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18834 v0.AuxInt = int32ToAuxInt(0)
18835 v.AddArg3(ptr, v0, mem)
18836 return true
18837 }
18838
18839
18840 for {
18841 off := auxIntToInt32(v.AuxInt)
18842 sym := auxToSym(v.Aux)
18843 ptr := v_0
18844 if v_1.Op != OpAMD64FlagGT_ULT {
18845 break
18846 }
18847 mem := v_2
18848 v.reset(OpAMD64MOVBstore)
18849 v.AuxInt = int32ToAuxInt(off)
18850 v.Aux = symToAux(sym)
18851 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18852 v0.AuxInt = int32ToAuxInt(1)
18853 v.AddArg3(ptr, v0, mem)
18854 return true
18855 }
18856
18857
18858 for {
18859 off := auxIntToInt32(v.AuxInt)
18860 sym := auxToSym(v.Aux)
18861 ptr := v_0
18862 if v_1.Op != OpAMD64FlagGT_UGT {
18863 break
18864 }
18865 mem := v_2
18866 v.reset(OpAMD64MOVBstore)
18867 v.AuxInt = int32ToAuxInt(off)
18868 v.Aux = symToAux(sym)
18869 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18870 v0.AuxInt = int32ToAuxInt(1)
18871 v.AddArg3(ptr, v0, mem)
18872 return true
18873 }
18874 return false
18875 }
18876 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
18877 v_0 := v.Args[0]
18878 b := v.Block
18879
18880
18881
18882 for {
18883 c := v_0
18884 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
18885 break
18886 }
18887 x := c.Args[0]
18888 if !(c.Uses == 1) {
18889 break
18890 }
18891 v.reset(OpAMD64SETLE)
18892 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18893 v0.AuxInt = int32ToAuxInt(127)
18894 v0.AddArg(x)
18895 v.AddArg(v0)
18896 return true
18897 }
18898
18899
18900
18901 for {
18902 c := v_0
18903 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
18904 break
18905 }
18906 x := c.Args[0]
18907 if !(c.Uses == 1) {
18908 break
18909 }
18910 v.reset(OpAMD64SETLE)
18911 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18912 v0.AuxInt = int32ToAuxInt(127)
18913 v0.AddArg(x)
18914 v.AddArg(v0)
18915 return true
18916 }
18917
18918
18919 for {
18920 if v_0.Op != OpAMD64InvertFlags {
18921 break
18922 }
18923 x := v_0.Args[0]
18924 v.reset(OpAMD64SETG)
18925 v.AddArg(x)
18926 return true
18927 }
18928
18929
18930 for {
18931 if v_0.Op != OpAMD64FlagEQ {
18932 break
18933 }
18934 v.reset(OpAMD64MOVLconst)
18935 v.AuxInt = int32ToAuxInt(0)
18936 return true
18937 }
18938
18939
18940 for {
18941 if v_0.Op != OpAMD64FlagLT_ULT {
18942 break
18943 }
18944 v.reset(OpAMD64MOVLconst)
18945 v.AuxInt = int32ToAuxInt(1)
18946 return true
18947 }
18948
18949
18950 for {
18951 if v_0.Op != OpAMD64FlagLT_UGT {
18952 break
18953 }
18954 v.reset(OpAMD64MOVLconst)
18955 v.AuxInt = int32ToAuxInt(1)
18956 return true
18957 }
18958
18959
18960 for {
18961 if v_0.Op != OpAMD64FlagGT_ULT {
18962 break
18963 }
18964 v.reset(OpAMD64MOVLconst)
18965 v.AuxInt = int32ToAuxInt(0)
18966 return true
18967 }
18968
18969
18970 for {
18971 if v_0.Op != OpAMD64FlagGT_UGT {
18972 break
18973 }
18974 v.reset(OpAMD64MOVLconst)
18975 v.AuxInt = int32ToAuxInt(0)
18976 return true
18977 }
18978 return false
18979 }
18980 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
18981 v_0 := v.Args[0]
18982
18983
18984 for {
18985 if v_0.Op != OpAMD64InvertFlags {
18986 break
18987 }
18988 x := v_0.Args[0]
18989 v.reset(OpAMD64SETGE)
18990 v.AddArg(x)
18991 return true
18992 }
18993
18994
18995 for {
18996 if v_0.Op != OpAMD64FlagEQ {
18997 break
18998 }
18999 v.reset(OpAMD64MOVLconst)
19000 v.AuxInt = int32ToAuxInt(1)
19001 return true
19002 }
19003
19004
19005 for {
19006 if v_0.Op != OpAMD64FlagLT_ULT {
19007 break
19008 }
19009 v.reset(OpAMD64MOVLconst)
19010 v.AuxInt = int32ToAuxInt(1)
19011 return true
19012 }
19013
19014
19015 for {
19016 if v_0.Op != OpAMD64FlagLT_UGT {
19017 break
19018 }
19019 v.reset(OpAMD64MOVLconst)
19020 v.AuxInt = int32ToAuxInt(1)
19021 return true
19022 }
19023
19024
19025 for {
19026 if v_0.Op != OpAMD64FlagGT_ULT {
19027 break
19028 }
19029 v.reset(OpAMD64MOVLconst)
19030 v.AuxInt = int32ToAuxInt(0)
19031 return true
19032 }
19033
19034
19035 for {
19036 if v_0.Op != OpAMD64FlagGT_UGT {
19037 break
19038 }
19039 v.reset(OpAMD64MOVLconst)
19040 v.AuxInt = int32ToAuxInt(0)
19041 return true
19042 }
19043 return false
19044 }
19045 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
19046 v_2 := v.Args[2]
19047 v_1 := v.Args[1]
19048 v_0 := v.Args[0]
19049 b := v.Block
19050 typ := &b.Func.Config.Types
19051
19052
19053 for {
19054 off := auxIntToInt32(v.AuxInt)
19055 sym := auxToSym(v.Aux)
19056 ptr := v_0
19057 if v_1.Op != OpAMD64InvertFlags {
19058 break
19059 }
19060 x := v_1.Args[0]
19061 mem := v_2
19062 v.reset(OpAMD64SETGEstore)
19063 v.AuxInt = int32ToAuxInt(off)
19064 v.Aux = symToAux(sym)
19065 v.AddArg3(ptr, x, mem)
19066 return true
19067 }
19068
19069
19070
19071 for {
19072 off1 := auxIntToInt32(v.AuxInt)
19073 sym := auxToSym(v.Aux)
19074 if v_0.Op != OpAMD64ADDQconst {
19075 break
19076 }
19077 off2 := auxIntToInt32(v_0.AuxInt)
19078 base := v_0.Args[0]
19079 val := v_1
19080 mem := v_2
19081 if !(is32Bit(int64(off1) + int64(off2))) {
19082 break
19083 }
19084 v.reset(OpAMD64SETLEstore)
19085 v.AuxInt = int32ToAuxInt(off1 + off2)
19086 v.Aux = symToAux(sym)
19087 v.AddArg3(base, val, mem)
19088 return true
19089 }
19090
19091
19092
19093 for {
19094 off1 := auxIntToInt32(v.AuxInt)
19095 sym1 := auxToSym(v.Aux)
19096 if v_0.Op != OpAMD64LEAQ {
19097 break
19098 }
19099 off2 := auxIntToInt32(v_0.AuxInt)
19100 sym2 := auxToSym(v_0.Aux)
19101 base := v_0.Args[0]
19102 val := v_1
19103 mem := v_2
19104 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19105 break
19106 }
19107 v.reset(OpAMD64SETLEstore)
19108 v.AuxInt = int32ToAuxInt(off1 + off2)
19109 v.Aux = symToAux(mergeSym(sym1, sym2))
19110 v.AddArg3(base, val, mem)
19111 return true
19112 }
19113
19114
19115 for {
19116 off := auxIntToInt32(v.AuxInt)
19117 sym := auxToSym(v.Aux)
19118 ptr := v_0
19119 if v_1.Op != OpAMD64FlagEQ {
19120 break
19121 }
19122 mem := v_2
19123 v.reset(OpAMD64MOVBstore)
19124 v.AuxInt = int32ToAuxInt(off)
19125 v.Aux = symToAux(sym)
19126 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19127 v0.AuxInt = int32ToAuxInt(1)
19128 v.AddArg3(ptr, v0, mem)
19129 return true
19130 }
19131
19132
19133 for {
19134 off := auxIntToInt32(v.AuxInt)
19135 sym := auxToSym(v.Aux)
19136 ptr := v_0
19137 if v_1.Op != OpAMD64FlagLT_ULT {
19138 break
19139 }
19140 mem := v_2
19141 v.reset(OpAMD64MOVBstore)
19142 v.AuxInt = int32ToAuxInt(off)
19143 v.Aux = symToAux(sym)
19144 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19145 v0.AuxInt = int32ToAuxInt(1)
19146 v.AddArg3(ptr, v0, mem)
19147 return true
19148 }
19149
19150
19151 for {
19152 off := auxIntToInt32(v.AuxInt)
19153 sym := auxToSym(v.Aux)
19154 ptr := v_0
19155 if v_1.Op != OpAMD64FlagLT_UGT {
19156 break
19157 }
19158 mem := v_2
19159 v.reset(OpAMD64MOVBstore)
19160 v.AuxInt = int32ToAuxInt(off)
19161 v.Aux = symToAux(sym)
19162 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19163 v0.AuxInt = int32ToAuxInt(1)
19164 v.AddArg3(ptr, v0, mem)
19165 return true
19166 }
19167
19168
19169 for {
19170 off := auxIntToInt32(v.AuxInt)
19171 sym := auxToSym(v.Aux)
19172 ptr := v_0
19173 if v_1.Op != OpAMD64FlagGT_ULT {
19174 break
19175 }
19176 mem := v_2
19177 v.reset(OpAMD64MOVBstore)
19178 v.AuxInt = int32ToAuxInt(off)
19179 v.Aux = symToAux(sym)
19180 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19181 v0.AuxInt = int32ToAuxInt(0)
19182 v.AddArg3(ptr, v0, mem)
19183 return true
19184 }
19185
19186
19187 for {
19188 off := auxIntToInt32(v.AuxInt)
19189 sym := auxToSym(v.Aux)
19190 ptr := v_0
19191 if v_1.Op != OpAMD64FlagGT_UGT {
19192 break
19193 }
19194 mem := v_2
19195 v.reset(OpAMD64MOVBstore)
19196 v.AuxInt = int32ToAuxInt(off)
19197 v.Aux = symToAux(sym)
19198 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19199 v0.AuxInt = int32ToAuxInt(0)
19200 v.AddArg3(ptr, v0, mem)
19201 return true
19202 }
19203 return false
19204 }
19205 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19206 v_2 := v.Args[2]
19207 v_1 := v.Args[1]
19208 v_0 := v.Args[0]
19209 b := v.Block
19210 typ := &b.Func.Config.Types
19211
19212
19213 for {
19214 off := auxIntToInt32(v.AuxInt)
19215 sym := auxToSym(v.Aux)
19216 ptr := v_0
19217 if v_1.Op != OpAMD64InvertFlags {
19218 break
19219 }
19220 x := v_1.Args[0]
19221 mem := v_2
19222 v.reset(OpAMD64SETGstore)
19223 v.AuxInt = int32ToAuxInt(off)
19224 v.Aux = symToAux(sym)
19225 v.AddArg3(ptr, x, mem)
19226 return true
19227 }
19228
19229
19230
19231 for {
19232 off1 := auxIntToInt32(v.AuxInt)
19233 sym := auxToSym(v.Aux)
19234 if v_0.Op != OpAMD64ADDQconst {
19235 break
19236 }
19237 off2 := auxIntToInt32(v_0.AuxInt)
19238 base := v_0.Args[0]
19239 val := v_1
19240 mem := v_2
19241 if !(is32Bit(int64(off1) + int64(off2))) {
19242 break
19243 }
19244 v.reset(OpAMD64SETLstore)
19245 v.AuxInt = int32ToAuxInt(off1 + off2)
19246 v.Aux = symToAux(sym)
19247 v.AddArg3(base, val, mem)
19248 return true
19249 }
19250
19251
19252
19253 for {
19254 off1 := auxIntToInt32(v.AuxInt)
19255 sym1 := auxToSym(v.Aux)
19256 if v_0.Op != OpAMD64LEAQ {
19257 break
19258 }
19259 off2 := auxIntToInt32(v_0.AuxInt)
19260 sym2 := auxToSym(v_0.Aux)
19261 base := v_0.Args[0]
19262 val := v_1
19263 mem := v_2
19264 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19265 break
19266 }
19267 v.reset(OpAMD64SETLstore)
19268 v.AuxInt = int32ToAuxInt(off1 + off2)
19269 v.Aux = symToAux(mergeSym(sym1, sym2))
19270 v.AddArg3(base, val, mem)
19271 return true
19272 }
19273
19274
19275 for {
19276 off := auxIntToInt32(v.AuxInt)
19277 sym := auxToSym(v.Aux)
19278 ptr := v_0
19279 if v_1.Op != OpAMD64FlagEQ {
19280 break
19281 }
19282 mem := v_2
19283 v.reset(OpAMD64MOVBstore)
19284 v.AuxInt = int32ToAuxInt(off)
19285 v.Aux = symToAux(sym)
19286 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19287 v0.AuxInt = int32ToAuxInt(0)
19288 v.AddArg3(ptr, v0, mem)
19289 return true
19290 }
19291
19292
19293 for {
19294 off := auxIntToInt32(v.AuxInt)
19295 sym := auxToSym(v.Aux)
19296 ptr := v_0
19297 if v_1.Op != OpAMD64FlagLT_ULT {
19298 break
19299 }
19300 mem := v_2
19301 v.reset(OpAMD64MOVBstore)
19302 v.AuxInt = int32ToAuxInt(off)
19303 v.Aux = symToAux(sym)
19304 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19305 v0.AuxInt = int32ToAuxInt(1)
19306 v.AddArg3(ptr, v0, mem)
19307 return true
19308 }
19309
19310
19311 for {
19312 off := auxIntToInt32(v.AuxInt)
19313 sym := auxToSym(v.Aux)
19314 ptr := v_0
19315 if v_1.Op != OpAMD64FlagLT_UGT {
19316 break
19317 }
19318 mem := v_2
19319 v.reset(OpAMD64MOVBstore)
19320 v.AuxInt = int32ToAuxInt(off)
19321 v.Aux = symToAux(sym)
19322 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19323 v0.AuxInt = int32ToAuxInt(1)
19324 v.AddArg3(ptr, v0, mem)
19325 return true
19326 }
19327
19328
19329 for {
19330 off := auxIntToInt32(v.AuxInt)
19331 sym := auxToSym(v.Aux)
19332 ptr := v_0
19333 if v_1.Op != OpAMD64FlagGT_ULT {
19334 break
19335 }
19336 mem := v_2
19337 v.reset(OpAMD64MOVBstore)
19338 v.AuxInt = int32ToAuxInt(off)
19339 v.Aux = symToAux(sym)
19340 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19341 v0.AuxInt = int32ToAuxInt(0)
19342 v.AddArg3(ptr, v0, mem)
19343 return true
19344 }
19345
19346
19347 for {
19348 off := auxIntToInt32(v.AuxInt)
19349 sym := auxToSym(v.Aux)
19350 ptr := v_0
19351 if v_1.Op != OpAMD64FlagGT_UGT {
19352 break
19353 }
19354 mem := v_2
19355 v.reset(OpAMD64MOVBstore)
19356 v.AuxInt = int32ToAuxInt(off)
19357 v.Aux = symToAux(sym)
19358 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19359 v0.AuxInt = int32ToAuxInt(0)
19360 v.AddArg3(ptr, v0, mem)
19361 return true
19362 }
19363 return false
19364 }
19365 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19366 v_0 := v.Args[0]
19367 b := v.Block
19368
19369
19370 for {
19371 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19372 break
19373 }
19374 x := v_0.Args[0]
19375 v.reset(OpAMD64ANDLconst)
19376 v.AuxInt = int32ToAuxInt(1)
19377 v.AddArg(x)
19378 return true
19379 }
19380
19381
19382 for {
19383 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19384 break
19385 }
19386 x := v_0.Args[0]
19387 v.reset(OpAMD64ANDLconst)
19388 v.AuxInt = int32ToAuxInt(1)
19389 v.AddArg(x)
19390 return true
19391 }
19392
19393
19394 for {
19395 if v_0.Op != OpAMD64TESTL {
19396 break
19397 }
19398 _ = v_0.Args[1]
19399 v_0_0 := v_0.Args[0]
19400 v_0_1 := v_0.Args[1]
19401 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19402 if v_0_0.Op != OpAMD64SHLL {
19403 continue
19404 }
19405 x := v_0_0.Args[1]
19406 v_0_0_0 := v_0_0.Args[0]
19407 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
19408 continue
19409 }
19410 y := v_0_1
19411 v.reset(OpAMD64SETB)
19412 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19413 v0.AddArg2(x, y)
19414 v.AddArg(v0)
19415 return true
19416 }
19417 break
19418 }
19419
19420
19421 for {
19422 if v_0.Op != OpAMD64TESTQ {
19423 break
19424 }
19425 _ = v_0.Args[1]
19426 v_0_0 := v_0.Args[0]
19427 v_0_1 := v_0.Args[1]
19428 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19429 if v_0_0.Op != OpAMD64SHLQ {
19430 continue
19431 }
19432 x := v_0_0.Args[1]
19433 v_0_0_0 := v_0_0.Args[0]
19434 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
19435 continue
19436 }
19437 y := v_0_1
19438 v.reset(OpAMD64SETB)
19439 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19440 v0.AddArg2(x, y)
19441 v.AddArg(v0)
19442 return true
19443 }
19444 break
19445 }
19446
19447
19448
19449 for {
19450 if v_0.Op != OpAMD64TESTLconst {
19451 break
19452 }
19453 c := auxIntToInt32(v_0.AuxInt)
19454 x := v_0.Args[0]
19455 if !(isUint32PowerOfTwo(int64(c))) {
19456 break
19457 }
19458 v.reset(OpAMD64SETB)
19459 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19460 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19461 v0.AddArg(x)
19462 v.AddArg(v0)
19463 return true
19464 }
19465
19466
19467
19468 for {
19469 if v_0.Op != OpAMD64TESTQconst {
19470 break
19471 }
19472 c := auxIntToInt32(v_0.AuxInt)
19473 x := v_0.Args[0]
19474 if !(isUint64PowerOfTwo(int64(c))) {
19475 break
19476 }
19477 v.reset(OpAMD64SETB)
19478 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19479 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19480 v0.AddArg(x)
19481 v.AddArg(v0)
19482 return true
19483 }
19484
19485
19486
19487 for {
19488 if v_0.Op != OpAMD64TESTQ {
19489 break
19490 }
19491 _ = v_0.Args[1]
19492 v_0_0 := v_0.Args[0]
19493 v_0_1 := v_0.Args[1]
19494 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19495 if v_0_0.Op != OpAMD64MOVQconst {
19496 continue
19497 }
19498 c := auxIntToInt64(v_0_0.AuxInt)
19499 x := v_0_1
19500 if !(isUint64PowerOfTwo(c)) {
19501 continue
19502 }
19503 v.reset(OpAMD64SETB)
19504 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19505 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19506 v0.AddArg(x)
19507 v.AddArg(v0)
19508 return true
19509 }
19510 break
19511 }
19512
19513
19514 for {
19515 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
19516 break
19517 }
19518 s := v_0.Args[0]
19519 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19520 break
19521 }
19522 v.reset(OpAMD64SETEQ)
19523 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19524 v0.AuxInt = int32ToAuxInt(0)
19525 v0.AddArg(s)
19526 v.AddArg(v0)
19527 return true
19528 }
19529
19530
19531 for {
19532 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
19533 break
19534 }
19535 s := v_0.Args[0]
19536 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19537 break
19538 }
19539 v.reset(OpAMD64SETEQ)
19540 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19541 v0.AuxInt = int32ToAuxInt(0)
19542 v0.AddArg(s)
19543 v.AddArg(v0)
19544 return true
19545 }
19546
19547
19548
19549 for {
19550 if v_0.Op != OpAMD64TESTQ {
19551 break
19552 }
19553 _ = v_0.Args[1]
19554 v_0_0 := v_0.Args[0]
19555 v_0_1 := v_0.Args[1]
19556 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19557 z1 := v_0_0
19558 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19559 continue
19560 }
19561 z1_0 := z1.Args[0]
19562 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19563 continue
19564 }
19565 x := z1_0.Args[0]
19566 z2 := v_0_1
19567 if !(z1 == z2) {
19568 continue
19569 }
19570 v.reset(OpAMD64SETB)
19571 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19572 v0.AuxInt = int8ToAuxInt(63)
19573 v0.AddArg(x)
19574 v.AddArg(v0)
19575 return true
19576 }
19577 break
19578 }
19579
19580
19581
19582 for {
19583 if v_0.Op != OpAMD64TESTL {
19584 break
19585 }
19586 _ = v_0.Args[1]
19587 v_0_0 := v_0.Args[0]
19588 v_0_1 := v_0.Args[1]
19589 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19590 z1 := v_0_0
19591 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
19592 continue
19593 }
19594 z1_0 := z1.Args[0]
19595 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19596 continue
19597 }
19598 x := z1_0.Args[0]
19599 z2 := v_0_1
19600 if !(z1 == z2) {
19601 continue
19602 }
19603 v.reset(OpAMD64SETB)
19604 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19605 v0.AuxInt = int8ToAuxInt(31)
19606 v0.AddArg(x)
19607 v.AddArg(v0)
19608 return true
19609 }
19610 break
19611 }
19612
19613
19614
19615 for {
19616 if v_0.Op != OpAMD64TESTQ {
19617 break
19618 }
19619 _ = v_0.Args[1]
19620 v_0_0 := v_0.Args[0]
19621 v_0_1 := v_0.Args[1]
19622 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19623 z1 := v_0_0
19624 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19625 continue
19626 }
19627 z1_0 := z1.Args[0]
19628 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19629 continue
19630 }
19631 x := z1_0.Args[0]
19632 z2 := v_0_1
19633 if !(z1 == z2) {
19634 continue
19635 }
19636 v.reset(OpAMD64SETB)
19637 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19638 v0.AuxInt = int8ToAuxInt(0)
19639 v0.AddArg(x)
19640 v.AddArg(v0)
19641 return true
19642 }
19643 break
19644 }
19645
19646
19647
19648 for {
19649 if v_0.Op != OpAMD64TESTL {
19650 break
19651 }
19652 _ = v_0.Args[1]
19653 v_0_0 := v_0.Args[0]
19654 v_0_1 := v_0.Args[1]
19655 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19656 z1 := v_0_0
19657 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19658 continue
19659 }
19660 z1_0 := z1.Args[0]
19661 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19662 continue
19663 }
19664 x := z1_0.Args[0]
19665 z2 := v_0_1
19666 if !(z1 == z2) {
19667 continue
19668 }
19669 v.reset(OpAMD64SETB)
19670 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19671 v0.AuxInt = int8ToAuxInt(0)
19672 v0.AddArg(x)
19673 v.AddArg(v0)
19674 return true
19675 }
19676 break
19677 }
19678
19679
19680
19681 for {
19682 if v_0.Op != OpAMD64TESTQ {
19683 break
19684 }
19685 _ = v_0.Args[1]
19686 v_0_0 := v_0.Args[0]
19687 v_0_1 := v_0.Args[1]
19688 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19689 z1 := v_0_0
19690 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19691 continue
19692 }
19693 x := z1.Args[0]
19694 z2 := v_0_1
19695 if !(z1 == z2) {
19696 continue
19697 }
19698 v.reset(OpAMD64SETB)
19699 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19700 v0.AuxInt = int8ToAuxInt(63)
19701 v0.AddArg(x)
19702 v.AddArg(v0)
19703 return true
19704 }
19705 break
19706 }
19707
19708
19709
19710 for {
19711 if v_0.Op != OpAMD64TESTL {
19712 break
19713 }
19714 _ = v_0.Args[1]
19715 v_0_0 := v_0.Args[0]
19716 v_0_1 := v_0.Args[1]
19717 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19718 z1 := v_0_0
19719 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19720 continue
19721 }
19722 x := z1.Args[0]
19723 z2 := v_0_1
19724 if !(z1 == z2) {
19725 continue
19726 }
19727 v.reset(OpAMD64SETB)
19728 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19729 v0.AuxInt = int8ToAuxInt(31)
19730 v0.AddArg(x)
19731 v.AddArg(v0)
19732 return true
19733 }
19734 break
19735 }
19736
19737
19738 for {
19739 if v_0.Op != OpAMD64InvertFlags {
19740 break
19741 }
19742 x := v_0.Args[0]
19743 v.reset(OpAMD64SETNE)
19744 v.AddArg(x)
19745 return true
19746 }
19747
19748
19749 for {
19750 if v_0.Op != OpAMD64FlagEQ {
19751 break
19752 }
19753 v.reset(OpAMD64MOVLconst)
19754 v.AuxInt = int32ToAuxInt(0)
19755 return true
19756 }
19757
19758
19759 for {
19760 if v_0.Op != OpAMD64FlagLT_ULT {
19761 break
19762 }
19763 v.reset(OpAMD64MOVLconst)
19764 v.AuxInt = int32ToAuxInt(1)
19765 return true
19766 }
19767
19768
19769 for {
19770 if v_0.Op != OpAMD64FlagLT_UGT {
19771 break
19772 }
19773 v.reset(OpAMD64MOVLconst)
19774 v.AuxInt = int32ToAuxInt(1)
19775 return true
19776 }
19777
19778
19779 for {
19780 if v_0.Op != OpAMD64FlagGT_ULT {
19781 break
19782 }
19783 v.reset(OpAMD64MOVLconst)
19784 v.AuxInt = int32ToAuxInt(1)
19785 return true
19786 }
19787
19788
19789 for {
19790 if v_0.Op != OpAMD64FlagGT_UGT {
19791 break
19792 }
19793 v.reset(OpAMD64MOVLconst)
19794 v.AuxInt = int32ToAuxInt(1)
19795 return true
19796 }
19797
19798
19799 for {
19800 if v_0.Op != OpAMD64TESTQ {
19801 break
19802 }
19803 _ = v_0.Args[1]
19804 v_0_0 := v_0.Args[0]
19805 v_0_1 := v_0.Args[1]
19806 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19807 s := v_0_0
19808 if s.Op != OpSelect0 {
19809 continue
19810 }
19811 blsr := s.Args[0]
19812 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
19813 continue
19814 }
19815 v.reset(OpAMD64SETNE)
19816 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19817 v0.AddArg(blsr)
19818 v.AddArg(v0)
19819 return true
19820 }
19821 break
19822 }
19823
19824
19825 for {
19826 if v_0.Op != OpAMD64TESTL {
19827 break
19828 }
19829 _ = v_0.Args[1]
19830 v_0_0 := v_0.Args[0]
19831 v_0_1 := v_0.Args[1]
19832 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19833 s := v_0_0
19834 if s.Op != OpSelect0 {
19835 continue
19836 }
19837 blsr := s.Args[0]
19838 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
19839 continue
19840 }
19841 v.reset(OpAMD64SETNE)
19842 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19843 v0.AddArg(blsr)
19844 v.AddArg(v0)
19845 return true
19846 }
19847 break
19848 }
19849 return false
19850 }
19851 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
19852 v_2 := v.Args[2]
19853 v_1 := v.Args[1]
19854 v_0 := v.Args[0]
19855 b := v.Block
19856 typ := &b.Func.Config.Types
19857
19858
19859 for {
19860 off := auxIntToInt32(v.AuxInt)
19861 sym := auxToSym(v.Aux)
19862 ptr := v_0
19863 if v_1.Op != OpAMD64TESTL {
19864 break
19865 }
19866 _ = v_1.Args[1]
19867 v_1_0 := v_1.Args[0]
19868 v_1_1 := v_1.Args[1]
19869 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19870 if v_1_0.Op != OpAMD64SHLL {
19871 continue
19872 }
19873 x := v_1_0.Args[1]
19874 v_1_0_0 := v_1_0.Args[0]
19875 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
19876 continue
19877 }
19878 y := v_1_1
19879 mem := v_2
19880 v.reset(OpAMD64SETBstore)
19881 v.AuxInt = int32ToAuxInt(off)
19882 v.Aux = symToAux(sym)
19883 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19884 v0.AddArg2(x, y)
19885 v.AddArg3(ptr, v0, mem)
19886 return true
19887 }
19888 break
19889 }
19890
19891
19892 for {
19893 off := auxIntToInt32(v.AuxInt)
19894 sym := auxToSym(v.Aux)
19895 ptr := v_0
19896 if v_1.Op != OpAMD64TESTQ {
19897 break
19898 }
19899 _ = v_1.Args[1]
19900 v_1_0 := v_1.Args[0]
19901 v_1_1 := v_1.Args[1]
19902 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19903 if v_1_0.Op != OpAMD64SHLQ {
19904 continue
19905 }
19906 x := v_1_0.Args[1]
19907 v_1_0_0 := v_1_0.Args[0]
19908 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
19909 continue
19910 }
19911 y := v_1_1
19912 mem := v_2
19913 v.reset(OpAMD64SETBstore)
19914 v.AuxInt = int32ToAuxInt(off)
19915 v.Aux = symToAux(sym)
19916 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19917 v0.AddArg2(x, y)
19918 v.AddArg3(ptr, v0, mem)
19919 return true
19920 }
19921 break
19922 }
19923
19924
19925
19926 for {
19927 off := auxIntToInt32(v.AuxInt)
19928 sym := auxToSym(v.Aux)
19929 ptr := v_0
19930 if v_1.Op != OpAMD64TESTLconst {
19931 break
19932 }
19933 c := auxIntToInt32(v_1.AuxInt)
19934 x := v_1.Args[0]
19935 mem := v_2
19936 if !(isUint32PowerOfTwo(int64(c))) {
19937 break
19938 }
19939 v.reset(OpAMD64SETBstore)
19940 v.AuxInt = int32ToAuxInt(off)
19941 v.Aux = symToAux(sym)
19942 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19943 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19944 v0.AddArg(x)
19945 v.AddArg3(ptr, v0, mem)
19946 return true
19947 }
19948
19949
19950
19951 for {
19952 off := auxIntToInt32(v.AuxInt)
19953 sym := auxToSym(v.Aux)
19954 ptr := v_0
19955 if v_1.Op != OpAMD64TESTQconst {
19956 break
19957 }
19958 c := auxIntToInt32(v_1.AuxInt)
19959 x := v_1.Args[0]
19960 mem := v_2
19961 if !(isUint64PowerOfTwo(int64(c))) {
19962 break
19963 }
19964 v.reset(OpAMD64SETBstore)
19965 v.AuxInt = int32ToAuxInt(off)
19966 v.Aux = symToAux(sym)
19967 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19968 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19969 v0.AddArg(x)
19970 v.AddArg3(ptr, v0, mem)
19971 return true
19972 }
19973
19974
19975
19976 for {
19977 off := auxIntToInt32(v.AuxInt)
19978 sym := auxToSym(v.Aux)
19979 ptr := v_0
19980 if v_1.Op != OpAMD64TESTQ {
19981 break
19982 }
19983 _ = v_1.Args[1]
19984 v_1_0 := v_1.Args[0]
19985 v_1_1 := v_1.Args[1]
19986 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19987 if v_1_0.Op != OpAMD64MOVQconst {
19988 continue
19989 }
19990 c := auxIntToInt64(v_1_0.AuxInt)
19991 x := v_1_1
19992 mem := v_2
19993 if !(isUint64PowerOfTwo(c)) {
19994 continue
19995 }
19996 v.reset(OpAMD64SETBstore)
19997 v.AuxInt = int32ToAuxInt(off)
19998 v.Aux = symToAux(sym)
19999 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20000 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
20001 v0.AddArg(x)
20002 v.AddArg3(ptr, v0, mem)
20003 return true
20004 }
20005 break
20006 }
20007
20008
20009 for {
20010 off := auxIntToInt32(v.AuxInt)
20011 sym := auxToSym(v.Aux)
20012 ptr := v_0
20013 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
20014 break
20015 }
20016 s := v_1.Args[0]
20017 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20018 break
20019 }
20020 mem := v_2
20021 v.reset(OpAMD64SETEQstore)
20022 v.AuxInt = int32ToAuxInt(off)
20023 v.Aux = symToAux(sym)
20024 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20025 v0.AuxInt = int32ToAuxInt(0)
20026 v0.AddArg(s)
20027 v.AddArg3(ptr, v0, mem)
20028 return true
20029 }
20030
20031
20032 for {
20033 off := auxIntToInt32(v.AuxInt)
20034 sym := auxToSym(v.Aux)
20035 ptr := v_0
20036 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
20037 break
20038 }
20039 s := v_1.Args[0]
20040 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20041 break
20042 }
20043 mem := v_2
20044 v.reset(OpAMD64SETEQstore)
20045 v.AuxInt = int32ToAuxInt(off)
20046 v.Aux = symToAux(sym)
20047 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20048 v0.AuxInt = int32ToAuxInt(0)
20049 v0.AddArg(s)
20050 v.AddArg3(ptr, v0, mem)
20051 return true
20052 }
20053
20054
20055
20056 for {
20057 off := auxIntToInt32(v.AuxInt)
20058 sym := auxToSym(v.Aux)
20059 ptr := v_0
20060 if v_1.Op != OpAMD64TESTQ {
20061 break
20062 }
20063 _ = v_1.Args[1]
20064 v_1_0 := v_1.Args[0]
20065 v_1_1 := v_1.Args[1]
20066 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20067 z1 := v_1_0
20068 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20069 continue
20070 }
20071 z1_0 := z1.Args[0]
20072 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20073 continue
20074 }
20075 x := z1_0.Args[0]
20076 z2 := v_1_1
20077 mem := v_2
20078 if !(z1 == z2) {
20079 continue
20080 }
20081 v.reset(OpAMD64SETBstore)
20082 v.AuxInt = int32ToAuxInt(off)
20083 v.Aux = symToAux(sym)
20084 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20085 v0.AuxInt = int8ToAuxInt(63)
20086 v0.AddArg(x)
20087 v.AddArg3(ptr, v0, mem)
20088 return true
20089 }
20090 break
20091 }
20092
20093
20094
20095 for {
20096 off := auxIntToInt32(v.AuxInt)
20097 sym := auxToSym(v.Aux)
20098 ptr := v_0
20099 if v_1.Op != OpAMD64TESTL {
20100 break
20101 }
20102 _ = v_1.Args[1]
20103 v_1_0 := v_1.Args[0]
20104 v_1_1 := v_1.Args[1]
20105 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20106 z1 := v_1_0
20107 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20108 continue
20109 }
20110 z1_0 := z1.Args[0]
20111 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20112 continue
20113 }
20114 x := z1_0.Args[0]
20115 z2 := v_1_1
20116 mem := v_2
20117 if !(z1 == z2) {
20118 continue
20119 }
20120 v.reset(OpAMD64SETBstore)
20121 v.AuxInt = int32ToAuxInt(off)
20122 v.Aux = symToAux(sym)
20123 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20124 v0.AuxInt = int8ToAuxInt(31)
20125 v0.AddArg(x)
20126 v.AddArg3(ptr, v0, mem)
20127 return true
20128 }
20129 break
20130 }
20131
20132
20133
20134 for {
20135 off := auxIntToInt32(v.AuxInt)
20136 sym := auxToSym(v.Aux)
20137 ptr := v_0
20138 if v_1.Op != OpAMD64TESTQ {
20139 break
20140 }
20141 _ = v_1.Args[1]
20142 v_1_0 := v_1.Args[0]
20143 v_1_1 := v_1.Args[1]
20144 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20145 z1 := v_1_0
20146 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20147 continue
20148 }
20149 z1_0 := z1.Args[0]
20150 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20151 continue
20152 }
20153 x := z1_0.Args[0]
20154 z2 := v_1_1
20155 mem := v_2
20156 if !(z1 == z2) {
20157 continue
20158 }
20159 v.reset(OpAMD64SETBstore)
20160 v.AuxInt = int32ToAuxInt(off)
20161 v.Aux = symToAux(sym)
20162 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20163 v0.AuxInt = int8ToAuxInt(0)
20164 v0.AddArg(x)
20165 v.AddArg3(ptr, v0, mem)
20166 return true
20167 }
20168 break
20169 }
20170
20171
20172
20173 for {
20174 off := auxIntToInt32(v.AuxInt)
20175 sym := auxToSym(v.Aux)
20176 ptr := v_0
20177 if v_1.Op != OpAMD64TESTL {
20178 break
20179 }
20180 _ = v_1.Args[1]
20181 v_1_0 := v_1.Args[0]
20182 v_1_1 := v_1.Args[1]
20183 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20184 z1 := v_1_0
20185 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20186 continue
20187 }
20188 z1_0 := z1.Args[0]
20189 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20190 continue
20191 }
20192 x := z1_0.Args[0]
20193 z2 := v_1_1
20194 mem := v_2
20195 if !(z1 == z2) {
20196 continue
20197 }
20198 v.reset(OpAMD64SETBstore)
20199 v.AuxInt = int32ToAuxInt(off)
20200 v.Aux = symToAux(sym)
20201 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20202 v0.AuxInt = int8ToAuxInt(0)
20203 v0.AddArg(x)
20204 v.AddArg3(ptr, v0, mem)
20205 return true
20206 }
20207 break
20208 }
20209
20210
20211
20212 for {
20213 off := auxIntToInt32(v.AuxInt)
20214 sym := auxToSym(v.Aux)
20215 ptr := v_0
20216 if v_1.Op != OpAMD64TESTQ {
20217 break
20218 }
20219 _ = v_1.Args[1]
20220 v_1_0 := v_1.Args[0]
20221 v_1_1 := v_1.Args[1]
20222 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20223 z1 := v_1_0
20224 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20225 continue
20226 }
20227 x := z1.Args[0]
20228 z2 := v_1_1
20229 mem := v_2
20230 if !(z1 == z2) {
20231 continue
20232 }
20233 v.reset(OpAMD64SETBstore)
20234 v.AuxInt = int32ToAuxInt(off)
20235 v.Aux = symToAux(sym)
20236 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20237 v0.AuxInt = int8ToAuxInt(63)
20238 v0.AddArg(x)
20239 v.AddArg3(ptr, v0, mem)
20240 return true
20241 }
20242 break
20243 }
20244
20245
20246
20247 for {
20248 off := auxIntToInt32(v.AuxInt)
20249 sym := auxToSym(v.Aux)
20250 ptr := v_0
20251 if v_1.Op != OpAMD64TESTL {
20252 break
20253 }
20254 _ = v_1.Args[1]
20255 v_1_0 := v_1.Args[0]
20256 v_1_1 := v_1.Args[1]
20257 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20258 z1 := v_1_0
20259 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20260 continue
20261 }
20262 x := z1.Args[0]
20263 z2 := v_1_1
20264 mem := v_2
20265 if !(z1 == z2) {
20266 continue
20267 }
20268 v.reset(OpAMD64SETBstore)
20269 v.AuxInt = int32ToAuxInt(off)
20270 v.Aux = symToAux(sym)
20271 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20272 v0.AuxInt = int8ToAuxInt(31)
20273 v0.AddArg(x)
20274 v.AddArg3(ptr, v0, mem)
20275 return true
20276 }
20277 break
20278 }
20279
20280
20281 for {
20282 off := auxIntToInt32(v.AuxInt)
20283 sym := auxToSym(v.Aux)
20284 ptr := v_0
20285 if v_1.Op != OpAMD64InvertFlags {
20286 break
20287 }
20288 x := v_1.Args[0]
20289 mem := v_2
20290 v.reset(OpAMD64SETNEstore)
20291 v.AuxInt = int32ToAuxInt(off)
20292 v.Aux = symToAux(sym)
20293 v.AddArg3(ptr, x, mem)
20294 return true
20295 }
20296
20297
20298
20299 for {
20300 off1 := auxIntToInt32(v.AuxInt)
20301 sym := auxToSym(v.Aux)
20302 if v_0.Op != OpAMD64ADDQconst {
20303 break
20304 }
20305 off2 := auxIntToInt32(v_0.AuxInt)
20306 base := v_0.Args[0]
20307 val := v_1
20308 mem := v_2
20309 if !(is32Bit(int64(off1) + int64(off2))) {
20310 break
20311 }
20312 v.reset(OpAMD64SETNEstore)
20313 v.AuxInt = int32ToAuxInt(off1 + off2)
20314 v.Aux = symToAux(sym)
20315 v.AddArg3(base, val, mem)
20316 return true
20317 }
20318
20319
20320
20321 for {
20322 off1 := auxIntToInt32(v.AuxInt)
20323 sym1 := auxToSym(v.Aux)
20324 if v_0.Op != OpAMD64LEAQ {
20325 break
20326 }
20327 off2 := auxIntToInt32(v_0.AuxInt)
20328 sym2 := auxToSym(v_0.Aux)
20329 base := v_0.Args[0]
20330 val := v_1
20331 mem := v_2
20332 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20333 break
20334 }
20335 v.reset(OpAMD64SETNEstore)
20336 v.AuxInt = int32ToAuxInt(off1 + off2)
20337 v.Aux = symToAux(mergeSym(sym1, sym2))
20338 v.AddArg3(base, val, mem)
20339 return true
20340 }
20341
20342
20343 for {
20344 off := auxIntToInt32(v.AuxInt)
20345 sym := auxToSym(v.Aux)
20346 ptr := v_0
20347 if v_1.Op != OpAMD64FlagEQ {
20348 break
20349 }
20350 mem := v_2
20351 v.reset(OpAMD64MOVBstore)
20352 v.AuxInt = int32ToAuxInt(off)
20353 v.Aux = symToAux(sym)
20354 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20355 v0.AuxInt = int32ToAuxInt(0)
20356 v.AddArg3(ptr, v0, mem)
20357 return true
20358 }
20359
20360
20361 for {
20362 off := auxIntToInt32(v.AuxInt)
20363 sym := auxToSym(v.Aux)
20364 ptr := v_0
20365 if v_1.Op != OpAMD64FlagLT_ULT {
20366 break
20367 }
20368 mem := v_2
20369 v.reset(OpAMD64MOVBstore)
20370 v.AuxInt = int32ToAuxInt(off)
20371 v.Aux = symToAux(sym)
20372 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20373 v0.AuxInt = int32ToAuxInt(1)
20374 v.AddArg3(ptr, v0, mem)
20375 return true
20376 }
20377
20378
20379 for {
20380 off := auxIntToInt32(v.AuxInt)
20381 sym := auxToSym(v.Aux)
20382 ptr := v_0
20383 if v_1.Op != OpAMD64FlagLT_UGT {
20384 break
20385 }
20386 mem := v_2
20387 v.reset(OpAMD64MOVBstore)
20388 v.AuxInt = int32ToAuxInt(off)
20389 v.Aux = symToAux(sym)
20390 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20391 v0.AuxInt = int32ToAuxInt(1)
20392 v.AddArg3(ptr, v0, mem)
20393 return true
20394 }
20395
20396
20397 for {
20398 off := auxIntToInt32(v.AuxInt)
20399 sym := auxToSym(v.Aux)
20400 ptr := v_0
20401 if v_1.Op != OpAMD64FlagGT_ULT {
20402 break
20403 }
20404 mem := v_2
20405 v.reset(OpAMD64MOVBstore)
20406 v.AuxInt = int32ToAuxInt(off)
20407 v.Aux = symToAux(sym)
20408 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20409 v0.AuxInt = int32ToAuxInt(1)
20410 v.AddArg3(ptr, v0, mem)
20411 return true
20412 }
20413
20414
20415 for {
20416 off := auxIntToInt32(v.AuxInt)
20417 sym := auxToSym(v.Aux)
20418 ptr := v_0
20419 if v_1.Op != OpAMD64FlagGT_UGT {
20420 break
20421 }
20422 mem := v_2
20423 v.reset(OpAMD64MOVBstore)
20424 v.AuxInt = int32ToAuxInt(off)
20425 v.Aux = symToAux(sym)
20426 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20427 v0.AuxInt = int32ToAuxInt(1)
20428 v.AddArg3(ptr, v0, mem)
20429 return true
20430 }
20431 return false
20432 }
20433 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
20434 v_1 := v.Args[1]
20435 v_0 := v.Args[0]
20436 b := v.Block
20437
20438
20439 for {
20440 x := v_0
20441 if v_1.Op != OpAMD64MOVQconst {
20442 break
20443 }
20444 c := auxIntToInt64(v_1.AuxInt)
20445 v.reset(OpAMD64SHLLconst)
20446 v.AuxInt = int8ToAuxInt(int8(c & 31))
20447 v.AddArg(x)
20448 return true
20449 }
20450
20451
20452 for {
20453 x := v_0
20454 if v_1.Op != OpAMD64MOVLconst {
20455 break
20456 }
20457 c := auxIntToInt32(v_1.AuxInt)
20458 v.reset(OpAMD64SHLLconst)
20459 v.AuxInt = int8ToAuxInt(int8(c & 31))
20460 v.AddArg(x)
20461 return true
20462 }
20463
20464
20465
20466 for {
20467 x := v_0
20468 if v_1.Op != OpAMD64ADDQconst {
20469 break
20470 }
20471 c := auxIntToInt32(v_1.AuxInt)
20472 y := v_1.Args[0]
20473 if !(c&31 == 0) {
20474 break
20475 }
20476 v.reset(OpAMD64SHLL)
20477 v.AddArg2(x, y)
20478 return true
20479 }
20480
20481
20482
20483 for {
20484 x := v_0
20485 if v_1.Op != OpAMD64NEGQ {
20486 break
20487 }
20488 t := v_1.Type
20489 v_1_0 := v_1.Args[0]
20490 if v_1_0.Op != OpAMD64ADDQconst {
20491 break
20492 }
20493 c := auxIntToInt32(v_1_0.AuxInt)
20494 y := v_1_0.Args[0]
20495 if !(c&31 == 0) {
20496 break
20497 }
20498 v.reset(OpAMD64SHLL)
20499 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20500 v0.AddArg(y)
20501 v.AddArg2(x, v0)
20502 return true
20503 }
20504
20505
20506
20507 for {
20508 x := v_0
20509 if v_1.Op != OpAMD64ANDQconst {
20510 break
20511 }
20512 c := auxIntToInt32(v_1.AuxInt)
20513 y := v_1.Args[0]
20514 if !(c&31 == 31) {
20515 break
20516 }
20517 v.reset(OpAMD64SHLL)
20518 v.AddArg2(x, y)
20519 return true
20520 }
20521
20522
20523
20524 for {
20525 x := v_0
20526 if v_1.Op != OpAMD64NEGQ {
20527 break
20528 }
20529 t := v_1.Type
20530 v_1_0 := v_1.Args[0]
20531 if v_1_0.Op != OpAMD64ANDQconst {
20532 break
20533 }
20534 c := auxIntToInt32(v_1_0.AuxInt)
20535 y := v_1_0.Args[0]
20536 if !(c&31 == 31) {
20537 break
20538 }
20539 v.reset(OpAMD64SHLL)
20540 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20541 v0.AddArg(y)
20542 v.AddArg2(x, v0)
20543 return true
20544 }
20545
20546
20547
20548 for {
20549 x := v_0
20550 if v_1.Op != OpAMD64ADDLconst {
20551 break
20552 }
20553 c := auxIntToInt32(v_1.AuxInt)
20554 y := v_1.Args[0]
20555 if !(c&31 == 0) {
20556 break
20557 }
20558 v.reset(OpAMD64SHLL)
20559 v.AddArg2(x, y)
20560 return true
20561 }
20562
20563
20564
20565 for {
20566 x := v_0
20567 if v_1.Op != OpAMD64NEGL {
20568 break
20569 }
20570 t := v_1.Type
20571 v_1_0 := v_1.Args[0]
20572 if v_1_0.Op != OpAMD64ADDLconst {
20573 break
20574 }
20575 c := auxIntToInt32(v_1_0.AuxInt)
20576 y := v_1_0.Args[0]
20577 if !(c&31 == 0) {
20578 break
20579 }
20580 v.reset(OpAMD64SHLL)
20581 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20582 v0.AddArg(y)
20583 v.AddArg2(x, v0)
20584 return true
20585 }
20586
20587
20588
20589 for {
20590 x := v_0
20591 if v_1.Op != OpAMD64ANDLconst {
20592 break
20593 }
20594 c := auxIntToInt32(v_1.AuxInt)
20595 y := v_1.Args[0]
20596 if !(c&31 == 31) {
20597 break
20598 }
20599 v.reset(OpAMD64SHLL)
20600 v.AddArg2(x, y)
20601 return true
20602 }
20603
20604
20605
20606 for {
20607 x := v_0
20608 if v_1.Op != OpAMD64NEGL {
20609 break
20610 }
20611 t := v_1.Type
20612 v_1_0 := v_1.Args[0]
20613 if v_1_0.Op != OpAMD64ANDLconst {
20614 break
20615 }
20616 c := auxIntToInt32(v_1_0.AuxInt)
20617 y := v_1_0.Args[0]
20618 if !(c&31 == 31) {
20619 break
20620 }
20621 v.reset(OpAMD64SHLL)
20622 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20623 v0.AddArg(y)
20624 v.AddArg2(x, v0)
20625 return true
20626 }
20627
20628
20629
20630 for {
20631 l := v_0
20632 if l.Op != OpAMD64MOVLload {
20633 break
20634 }
20635 off := auxIntToInt32(l.AuxInt)
20636 sym := auxToSym(l.Aux)
20637 mem := l.Args[1]
20638 ptr := l.Args[0]
20639 x := v_1
20640 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20641 break
20642 }
20643 v.reset(OpAMD64SHLXLload)
20644 v.AuxInt = int32ToAuxInt(off)
20645 v.Aux = symToAux(sym)
20646 v.AddArg3(ptr, x, mem)
20647 return true
20648 }
20649 return false
20650 }
20651 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
20652 v_0 := v.Args[0]
20653
20654
20655 for {
20656 if auxIntToInt8(v.AuxInt) != 0 {
20657 break
20658 }
20659 x := v_0
20660 v.copyOf(x)
20661 return true
20662 }
20663
20664
20665 for {
20666 if auxIntToInt8(v.AuxInt) != 1 {
20667 break
20668 }
20669 x := v_0
20670 v.reset(OpAMD64ADDL)
20671 v.AddArg2(x, x)
20672 return true
20673 }
20674
20675
20676 for {
20677 c := auxIntToInt8(v.AuxInt)
20678 if v_0.Op != OpAMD64ADDL {
20679 break
20680 }
20681 x := v_0.Args[1]
20682 if x != v_0.Args[0] {
20683 break
20684 }
20685 v.reset(OpAMD64SHLLconst)
20686 v.AuxInt = int8ToAuxInt(c + 1)
20687 v.AddArg(x)
20688 return true
20689 }
20690
20691
20692 for {
20693 d := auxIntToInt8(v.AuxInt)
20694 if v_0.Op != OpAMD64MOVLconst {
20695 break
20696 }
20697 c := auxIntToInt32(v_0.AuxInt)
20698 v.reset(OpAMD64MOVLconst)
20699 v.AuxInt = int32ToAuxInt(c << uint64(d))
20700 return true
20701 }
20702 return false
20703 }
20704 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
20705 v_1 := v.Args[1]
20706 v_0 := v.Args[0]
20707 b := v.Block
20708
20709
20710 for {
20711 x := v_0
20712 if v_1.Op != OpAMD64MOVQconst {
20713 break
20714 }
20715 c := auxIntToInt64(v_1.AuxInt)
20716 v.reset(OpAMD64SHLQconst)
20717 v.AuxInt = int8ToAuxInt(int8(c & 63))
20718 v.AddArg(x)
20719 return true
20720 }
20721
20722
20723 for {
20724 x := v_0
20725 if v_1.Op != OpAMD64MOVLconst {
20726 break
20727 }
20728 c := auxIntToInt32(v_1.AuxInt)
20729 v.reset(OpAMD64SHLQconst)
20730 v.AuxInt = int8ToAuxInt(int8(c & 63))
20731 v.AddArg(x)
20732 return true
20733 }
20734
20735
20736
20737 for {
20738 x := v_0
20739 if v_1.Op != OpAMD64ADDQconst {
20740 break
20741 }
20742 c := auxIntToInt32(v_1.AuxInt)
20743 y := v_1.Args[0]
20744 if !(c&63 == 0) {
20745 break
20746 }
20747 v.reset(OpAMD64SHLQ)
20748 v.AddArg2(x, y)
20749 return true
20750 }
20751
20752
20753
20754 for {
20755 x := v_0
20756 if v_1.Op != OpAMD64NEGQ {
20757 break
20758 }
20759 t := v_1.Type
20760 v_1_0 := v_1.Args[0]
20761 if v_1_0.Op != OpAMD64ADDQconst {
20762 break
20763 }
20764 c := auxIntToInt32(v_1_0.AuxInt)
20765 y := v_1_0.Args[0]
20766 if !(c&63 == 0) {
20767 break
20768 }
20769 v.reset(OpAMD64SHLQ)
20770 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20771 v0.AddArg(y)
20772 v.AddArg2(x, v0)
20773 return true
20774 }
20775
20776
20777
20778 for {
20779 x := v_0
20780 if v_1.Op != OpAMD64ANDQconst {
20781 break
20782 }
20783 c := auxIntToInt32(v_1.AuxInt)
20784 y := v_1.Args[0]
20785 if !(c&63 == 63) {
20786 break
20787 }
20788 v.reset(OpAMD64SHLQ)
20789 v.AddArg2(x, y)
20790 return true
20791 }
20792
20793
20794
20795 for {
20796 x := v_0
20797 if v_1.Op != OpAMD64NEGQ {
20798 break
20799 }
20800 t := v_1.Type
20801 v_1_0 := v_1.Args[0]
20802 if v_1_0.Op != OpAMD64ANDQconst {
20803 break
20804 }
20805 c := auxIntToInt32(v_1_0.AuxInt)
20806 y := v_1_0.Args[0]
20807 if !(c&63 == 63) {
20808 break
20809 }
20810 v.reset(OpAMD64SHLQ)
20811 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20812 v0.AddArg(y)
20813 v.AddArg2(x, v0)
20814 return true
20815 }
20816
20817
20818
20819 for {
20820 x := v_0
20821 if v_1.Op != OpAMD64ADDLconst {
20822 break
20823 }
20824 c := auxIntToInt32(v_1.AuxInt)
20825 y := v_1.Args[0]
20826 if !(c&63 == 0) {
20827 break
20828 }
20829 v.reset(OpAMD64SHLQ)
20830 v.AddArg2(x, y)
20831 return true
20832 }
20833
20834
20835
20836 for {
20837 x := v_0
20838 if v_1.Op != OpAMD64NEGL {
20839 break
20840 }
20841 t := v_1.Type
20842 v_1_0 := v_1.Args[0]
20843 if v_1_0.Op != OpAMD64ADDLconst {
20844 break
20845 }
20846 c := auxIntToInt32(v_1_0.AuxInt)
20847 y := v_1_0.Args[0]
20848 if !(c&63 == 0) {
20849 break
20850 }
20851 v.reset(OpAMD64SHLQ)
20852 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20853 v0.AddArg(y)
20854 v.AddArg2(x, v0)
20855 return true
20856 }
20857
20858
20859
20860 for {
20861 x := v_0
20862 if v_1.Op != OpAMD64ANDLconst {
20863 break
20864 }
20865 c := auxIntToInt32(v_1.AuxInt)
20866 y := v_1.Args[0]
20867 if !(c&63 == 63) {
20868 break
20869 }
20870 v.reset(OpAMD64SHLQ)
20871 v.AddArg2(x, y)
20872 return true
20873 }
20874
20875
20876
20877 for {
20878 x := v_0
20879 if v_1.Op != OpAMD64NEGL {
20880 break
20881 }
20882 t := v_1.Type
20883 v_1_0 := v_1.Args[0]
20884 if v_1_0.Op != OpAMD64ANDLconst {
20885 break
20886 }
20887 c := auxIntToInt32(v_1_0.AuxInt)
20888 y := v_1_0.Args[0]
20889 if !(c&63 == 63) {
20890 break
20891 }
20892 v.reset(OpAMD64SHLQ)
20893 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20894 v0.AddArg(y)
20895 v.AddArg2(x, v0)
20896 return true
20897 }
20898
20899
20900
20901 for {
20902 l := v_0
20903 if l.Op != OpAMD64MOVQload {
20904 break
20905 }
20906 off := auxIntToInt32(l.AuxInt)
20907 sym := auxToSym(l.Aux)
20908 mem := l.Args[1]
20909 ptr := l.Args[0]
20910 x := v_1
20911 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20912 break
20913 }
20914 v.reset(OpAMD64SHLXQload)
20915 v.AuxInt = int32ToAuxInt(off)
20916 v.Aux = symToAux(sym)
20917 v.AddArg3(ptr, x, mem)
20918 return true
20919 }
20920 return false
20921 }
20922 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
20923 v_0 := v.Args[0]
20924
20925
20926 for {
20927 if auxIntToInt8(v.AuxInt) != 0 {
20928 break
20929 }
20930 x := v_0
20931 v.copyOf(x)
20932 return true
20933 }
20934
20935
20936 for {
20937 if auxIntToInt8(v.AuxInt) != 1 {
20938 break
20939 }
20940 x := v_0
20941 v.reset(OpAMD64ADDQ)
20942 v.AddArg2(x, x)
20943 return true
20944 }
20945
20946
20947 for {
20948 c := auxIntToInt8(v.AuxInt)
20949 if v_0.Op != OpAMD64ADDQ {
20950 break
20951 }
20952 x := v_0.Args[1]
20953 if x != v_0.Args[0] {
20954 break
20955 }
20956 v.reset(OpAMD64SHLQconst)
20957 v.AuxInt = int8ToAuxInt(c + 1)
20958 v.AddArg(x)
20959 return true
20960 }
20961
20962
20963 for {
20964 d := auxIntToInt8(v.AuxInt)
20965 if v_0.Op != OpAMD64MOVQconst {
20966 break
20967 }
20968 c := auxIntToInt64(v_0.AuxInt)
20969 v.reset(OpAMD64MOVQconst)
20970 v.AuxInt = int64ToAuxInt(c << uint64(d))
20971 return true
20972 }
20973
20974
20975 for {
20976 d := auxIntToInt8(v.AuxInt)
20977 if v_0.Op != OpAMD64MOVLconst {
20978 break
20979 }
20980 c := auxIntToInt32(v_0.AuxInt)
20981 v.reset(OpAMD64MOVQconst)
20982 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
20983 return true
20984 }
20985 return false
20986 }
20987 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
20988 v_2 := v.Args[2]
20989 v_1 := v.Args[1]
20990 v_0 := v.Args[0]
20991 b := v.Block
20992 typ := &b.Func.Config.Types
20993
20994
20995 for {
20996 off := auxIntToInt32(v.AuxInt)
20997 sym := auxToSym(v.Aux)
20998 ptr := v_0
20999 if v_1.Op != OpAMD64MOVLconst {
21000 break
21001 }
21002 c := auxIntToInt32(v_1.AuxInt)
21003 mem := v_2
21004 v.reset(OpAMD64SHLLconst)
21005 v.AuxInt = int8ToAuxInt(int8(c & 31))
21006 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21007 v0.AuxInt = int32ToAuxInt(off)
21008 v0.Aux = symToAux(sym)
21009 v0.AddArg2(ptr, mem)
21010 v.AddArg(v0)
21011 return true
21012 }
21013 return false
21014 }
21015 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
21016 v_2 := v.Args[2]
21017 v_1 := v.Args[1]
21018 v_0 := v.Args[0]
21019 b := v.Block
21020 typ := &b.Func.Config.Types
21021
21022
21023 for {
21024 off := auxIntToInt32(v.AuxInt)
21025 sym := auxToSym(v.Aux)
21026 ptr := v_0
21027 if v_1.Op != OpAMD64MOVQconst {
21028 break
21029 }
21030 c := auxIntToInt64(v_1.AuxInt)
21031 mem := v_2
21032 v.reset(OpAMD64SHLQconst)
21033 v.AuxInt = int8ToAuxInt(int8(c & 63))
21034 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21035 v0.AuxInt = int32ToAuxInt(off)
21036 v0.Aux = symToAux(sym)
21037 v0.AddArg2(ptr, mem)
21038 v.AddArg(v0)
21039 return true
21040 }
21041
21042
21043 for {
21044 off := auxIntToInt32(v.AuxInt)
21045 sym := auxToSym(v.Aux)
21046 ptr := v_0
21047 if v_1.Op != OpAMD64MOVLconst {
21048 break
21049 }
21050 c := auxIntToInt32(v_1.AuxInt)
21051 mem := v_2
21052 v.reset(OpAMD64SHLQconst)
21053 v.AuxInt = int8ToAuxInt(int8(c & 63))
21054 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21055 v0.AuxInt = int32ToAuxInt(off)
21056 v0.Aux = symToAux(sym)
21057 v0.AddArg2(ptr, mem)
21058 v.AddArg(v0)
21059 return true
21060 }
21061 return false
21062 }
21063 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
21064 v_1 := v.Args[1]
21065 v_0 := v.Args[0]
21066
21067
21068
21069 for {
21070 x := v_0
21071 if v_1.Op != OpAMD64MOVQconst {
21072 break
21073 }
21074 c := auxIntToInt64(v_1.AuxInt)
21075 if !(c&31 < 8) {
21076 break
21077 }
21078 v.reset(OpAMD64SHRBconst)
21079 v.AuxInt = int8ToAuxInt(int8(c & 31))
21080 v.AddArg(x)
21081 return true
21082 }
21083
21084
21085
21086 for {
21087 x := v_0
21088 if v_1.Op != OpAMD64MOVLconst {
21089 break
21090 }
21091 c := auxIntToInt32(v_1.AuxInt)
21092 if !(c&31 < 8) {
21093 break
21094 }
21095 v.reset(OpAMD64SHRBconst)
21096 v.AuxInt = int8ToAuxInt(int8(c & 31))
21097 v.AddArg(x)
21098 return true
21099 }
21100
21101
21102
21103 for {
21104 if v_1.Op != OpAMD64MOVQconst {
21105 break
21106 }
21107 c := auxIntToInt64(v_1.AuxInt)
21108 if !(c&31 >= 8) {
21109 break
21110 }
21111 v.reset(OpAMD64MOVLconst)
21112 v.AuxInt = int32ToAuxInt(0)
21113 return true
21114 }
21115
21116
21117
21118 for {
21119 if v_1.Op != OpAMD64MOVLconst {
21120 break
21121 }
21122 c := auxIntToInt32(v_1.AuxInt)
21123 if !(c&31 >= 8) {
21124 break
21125 }
21126 v.reset(OpAMD64MOVLconst)
21127 v.AuxInt = int32ToAuxInt(0)
21128 return true
21129 }
21130 return false
21131 }
21132 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21133 v_0 := v.Args[0]
21134
21135
21136 for {
21137 if auxIntToInt8(v.AuxInt) != 0 {
21138 break
21139 }
21140 x := v_0
21141 v.copyOf(x)
21142 return true
21143 }
21144 return false
21145 }
21146 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21147 v_1 := v.Args[1]
21148 v_0 := v.Args[0]
21149 b := v.Block
21150
21151
21152 for {
21153 x := v_0
21154 if v_1.Op != OpAMD64MOVQconst {
21155 break
21156 }
21157 c := auxIntToInt64(v_1.AuxInt)
21158 v.reset(OpAMD64SHRLconst)
21159 v.AuxInt = int8ToAuxInt(int8(c & 31))
21160 v.AddArg(x)
21161 return true
21162 }
21163
21164
21165 for {
21166 x := v_0
21167 if v_1.Op != OpAMD64MOVLconst {
21168 break
21169 }
21170 c := auxIntToInt32(v_1.AuxInt)
21171 v.reset(OpAMD64SHRLconst)
21172 v.AuxInt = int8ToAuxInt(int8(c & 31))
21173 v.AddArg(x)
21174 return true
21175 }
21176
21177
21178
21179 for {
21180 x := v_0
21181 if v_1.Op != OpAMD64ADDQconst {
21182 break
21183 }
21184 c := auxIntToInt32(v_1.AuxInt)
21185 y := v_1.Args[0]
21186 if !(c&31 == 0) {
21187 break
21188 }
21189 v.reset(OpAMD64SHRL)
21190 v.AddArg2(x, y)
21191 return true
21192 }
21193
21194
21195
21196 for {
21197 x := v_0
21198 if v_1.Op != OpAMD64NEGQ {
21199 break
21200 }
21201 t := v_1.Type
21202 v_1_0 := v_1.Args[0]
21203 if v_1_0.Op != OpAMD64ADDQconst {
21204 break
21205 }
21206 c := auxIntToInt32(v_1_0.AuxInt)
21207 y := v_1_0.Args[0]
21208 if !(c&31 == 0) {
21209 break
21210 }
21211 v.reset(OpAMD64SHRL)
21212 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21213 v0.AddArg(y)
21214 v.AddArg2(x, v0)
21215 return true
21216 }
21217
21218
21219
21220 for {
21221 x := v_0
21222 if v_1.Op != OpAMD64ANDQconst {
21223 break
21224 }
21225 c := auxIntToInt32(v_1.AuxInt)
21226 y := v_1.Args[0]
21227 if !(c&31 == 31) {
21228 break
21229 }
21230 v.reset(OpAMD64SHRL)
21231 v.AddArg2(x, y)
21232 return true
21233 }
21234
21235
21236
21237 for {
21238 x := v_0
21239 if v_1.Op != OpAMD64NEGQ {
21240 break
21241 }
21242 t := v_1.Type
21243 v_1_0 := v_1.Args[0]
21244 if v_1_0.Op != OpAMD64ANDQconst {
21245 break
21246 }
21247 c := auxIntToInt32(v_1_0.AuxInt)
21248 y := v_1_0.Args[0]
21249 if !(c&31 == 31) {
21250 break
21251 }
21252 v.reset(OpAMD64SHRL)
21253 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21254 v0.AddArg(y)
21255 v.AddArg2(x, v0)
21256 return true
21257 }
21258
21259
21260
21261 for {
21262 x := v_0
21263 if v_1.Op != OpAMD64ADDLconst {
21264 break
21265 }
21266 c := auxIntToInt32(v_1.AuxInt)
21267 y := v_1.Args[0]
21268 if !(c&31 == 0) {
21269 break
21270 }
21271 v.reset(OpAMD64SHRL)
21272 v.AddArg2(x, y)
21273 return true
21274 }
21275
21276
21277
21278 for {
21279 x := v_0
21280 if v_1.Op != OpAMD64NEGL {
21281 break
21282 }
21283 t := v_1.Type
21284 v_1_0 := v_1.Args[0]
21285 if v_1_0.Op != OpAMD64ADDLconst {
21286 break
21287 }
21288 c := auxIntToInt32(v_1_0.AuxInt)
21289 y := v_1_0.Args[0]
21290 if !(c&31 == 0) {
21291 break
21292 }
21293 v.reset(OpAMD64SHRL)
21294 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21295 v0.AddArg(y)
21296 v.AddArg2(x, v0)
21297 return true
21298 }
21299
21300
21301
21302 for {
21303 x := v_0
21304 if v_1.Op != OpAMD64ANDLconst {
21305 break
21306 }
21307 c := auxIntToInt32(v_1.AuxInt)
21308 y := v_1.Args[0]
21309 if !(c&31 == 31) {
21310 break
21311 }
21312 v.reset(OpAMD64SHRL)
21313 v.AddArg2(x, y)
21314 return true
21315 }
21316
21317
21318
21319 for {
21320 x := v_0
21321 if v_1.Op != OpAMD64NEGL {
21322 break
21323 }
21324 t := v_1.Type
21325 v_1_0 := v_1.Args[0]
21326 if v_1_0.Op != OpAMD64ANDLconst {
21327 break
21328 }
21329 c := auxIntToInt32(v_1_0.AuxInt)
21330 y := v_1_0.Args[0]
21331 if !(c&31 == 31) {
21332 break
21333 }
21334 v.reset(OpAMD64SHRL)
21335 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21336 v0.AddArg(y)
21337 v.AddArg2(x, v0)
21338 return true
21339 }
21340
21341
21342
21343 for {
21344 l := v_0
21345 if l.Op != OpAMD64MOVLload {
21346 break
21347 }
21348 off := auxIntToInt32(l.AuxInt)
21349 sym := auxToSym(l.Aux)
21350 mem := l.Args[1]
21351 ptr := l.Args[0]
21352 x := v_1
21353 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21354 break
21355 }
21356 v.reset(OpAMD64SHRXLload)
21357 v.AuxInt = int32ToAuxInt(off)
21358 v.Aux = symToAux(sym)
21359 v.AddArg3(ptr, x, mem)
21360 return true
21361 }
21362 return false
21363 }
21364 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21365 v_0 := v.Args[0]
21366
21367
21368 for {
21369 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDL {
21370 break
21371 }
21372 x := v_0.Args[1]
21373 if x != v_0.Args[0] {
21374 break
21375 }
21376 v.reset(OpAMD64ANDLconst)
21377 v.AuxInt = int32ToAuxInt(0x7fffffff)
21378 v.AddArg(x)
21379 return true
21380 }
21381
21382
21383 for {
21384 if auxIntToInt8(v.AuxInt) != 0 {
21385 break
21386 }
21387 x := v_0
21388 v.copyOf(x)
21389 return true
21390 }
21391 return false
21392 }
21393 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
21394 v_1 := v.Args[1]
21395 v_0 := v.Args[0]
21396 b := v.Block
21397
21398
21399 for {
21400 x := v_0
21401 if v_1.Op != OpAMD64MOVQconst {
21402 break
21403 }
21404 c := auxIntToInt64(v_1.AuxInt)
21405 v.reset(OpAMD64SHRQconst)
21406 v.AuxInt = int8ToAuxInt(int8(c & 63))
21407 v.AddArg(x)
21408 return true
21409 }
21410
21411
21412 for {
21413 x := v_0
21414 if v_1.Op != OpAMD64MOVLconst {
21415 break
21416 }
21417 c := auxIntToInt32(v_1.AuxInt)
21418 v.reset(OpAMD64SHRQconst)
21419 v.AuxInt = int8ToAuxInt(int8(c & 63))
21420 v.AddArg(x)
21421 return true
21422 }
21423
21424
21425
21426 for {
21427 x := v_0
21428 if v_1.Op != OpAMD64ADDQconst {
21429 break
21430 }
21431 c := auxIntToInt32(v_1.AuxInt)
21432 y := v_1.Args[0]
21433 if !(c&63 == 0) {
21434 break
21435 }
21436 v.reset(OpAMD64SHRQ)
21437 v.AddArg2(x, y)
21438 return true
21439 }
21440
21441
21442
21443 for {
21444 x := v_0
21445 if v_1.Op != OpAMD64NEGQ {
21446 break
21447 }
21448 t := v_1.Type
21449 v_1_0 := v_1.Args[0]
21450 if v_1_0.Op != OpAMD64ADDQconst {
21451 break
21452 }
21453 c := auxIntToInt32(v_1_0.AuxInt)
21454 y := v_1_0.Args[0]
21455 if !(c&63 == 0) {
21456 break
21457 }
21458 v.reset(OpAMD64SHRQ)
21459 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21460 v0.AddArg(y)
21461 v.AddArg2(x, v0)
21462 return true
21463 }
21464
21465
21466
21467 for {
21468 x := v_0
21469 if v_1.Op != OpAMD64ANDQconst {
21470 break
21471 }
21472 c := auxIntToInt32(v_1.AuxInt)
21473 y := v_1.Args[0]
21474 if !(c&63 == 63) {
21475 break
21476 }
21477 v.reset(OpAMD64SHRQ)
21478 v.AddArg2(x, y)
21479 return true
21480 }
21481
21482
21483
21484 for {
21485 x := v_0
21486 if v_1.Op != OpAMD64NEGQ {
21487 break
21488 }
21489 t := v_1.Type
21490 v_1_0 := v_1.Args[0]
21491 if v_1_0.Op != OpAMD64ANDQconst {
21492 break
21493 }
21494 c := auxIntToInt32(v_1_0.AuxInt)
21495 y := v_1_0.Args[0]
21496 if !(c&63 == 63) {
21497 break
21498 }
21499 v.reset(OpAMD64SHRQ)
21500 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21501 v0.AddArg(y)
21502 v.AddArg2(x, v0)
21503 return true
21504 }
21505
21506
21507
21508 for {
21509 x := v_0
21510 if v_1.Op != OpAMD64ADDLconst {
21511 break
21512 }
21513 c := auxIntToInt32(v_1.AuxInt)
21514 y := v_1.Args[0]
21515 if !(c&63 == 0) {
21516 break
21517 }
21518 v.reset(OpAMD64SHRQ)
21519 v.AddArg2(x, y)
21520 return true
21521 }
21522
21523
21524
21525 for {
21526 x := v_0
21527 if v_1.Op != OpAMD64NEGL {
21528 break
21529 }
21530 t := v_1.Type
21531 v_1_0 := v_1.Args[0]
21532 if v_1_0.Op != OpAMD64ADDLconst {
21533 break
21534 }
21535 c := auxIntToInt32(v_1_0.AuxInt)
21536 y := v_1_0.Args[0]
21537 if !(c&63 == 0) {
21538 break
21539 }
21540 v.reset(OpAMD64SHRQ)
21541 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21542 v0.AddArg(y)
21543 v.AddArg2(x, v0)
21544 return true
21545 }
21546
21547
21548
21549 for {
21550 x := v_0
21551 if v_1.Op != OpAMD64ANDLconst {
21552 break
21553 }
21554 c := auxIntToInt32(v_1.AuxInt)
21555 y := v_1.Args[0]
21556 if !(c&63 == 63) {
21557 break
21558 }
21559 v.reset(OpAMD64SHRQ)
21560 v.AddArg2(x, y)
21561 return true
21562 }
21563
21564
21565
21566 for {
21567 x := v_0
21568 if v_1.Op != OpAMD64NEGL {
21569 break
21570 }
21571 t := v_1.Type
21572 v_1_0 := v_1.Args[0]
21573 if v_1_0.Op != OpAMD64ANDLconst {
21574 break
21575 }
21576 c := auxIntToInt32(v_1_0.AuxInt)
21577 y := v_1_0.Args[0]
21578 if !(c&63 == 63) {
21579 break
21580 }
21581 v.reset(OpAMD64SHRQ)
21582 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21583 v0.AddArg(y)
21584 v.AddArg2(x, v0)
21585 return true
21586 }
21587
21588
21589
21590 for {
21591 l := v_0
21592 if l.Op != OpAMD64MOVQload {
21593 break
21594 }
21595 off := auxIntToInt32(l.AuxInt)
21596 sym := auxToSym(l.Aux)
21597 mem := l.Args[1]
21598 ptr := l.Args[0]
21599 x := v_1
21600 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21601 break
21602 }
21603 v.reset(OpAMD64SHRXQload)
21604 v.AuxInt = int32ToAuxInt(off)
21605 v.Aux = symToAux(sym)
21606 v.AddArg3(ptr, x, mem)
21607 return true
21608 }
21609 return false
21610 }
21611 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
21612 v_0 := v.Args[0]
21613
21614
21615 for {
21616 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDQ {
21617 break
21618 }
21619 x := v_0.Args[1]
21620 if x != v_0.Args[0] {
21621 break
21622 }
21623 v.reset(OpAMD64BTRQconst)
21624 v.AuxInt = int8ToAuxInt(63)
21625 v.AddArg(x)
21626 return true
21627 }
21628
21629
21630 for {
21631 if auxIntToInt8(v.AuxInt) != 0 {
21632 break
21633 }
21634 x := v_0
21635 v.copyOf(x)
21636 return true
21637 }
21638 return false
21639 }
21640 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
21641 v_1 := v.Args[1]
21642 v_0 := v.Args[0]
21643
21644
21645
21646 for {
21647 x := v_0
21648 if v_1.Op != OpAMD64MOVQconst {
21649 break
21650 }
21651 c := auxIntToInt64(v_1.AuxInt)
21652 if !(c&31 < 16) {
21653 break
21654 }
21655 v.reset(OpAMD64SHRWconst)
21656 v.AuxInt = int8ToAuxInt(int8(c & 31))
21657 v.AddArg(x)
21658 return true
21659 }
21660
21661
21662
21663 for {
21664 x := v_0
21665 if v_1.Op != OpAMD64MOVLconst {
21666 break
21667 }
21668 c := auxIntToInt32(v_1.AuxInt)
21669 if !(c&31 < 16) {
21670 break
21671 }
21672 v.reset(OpAMD64SHRWconst)
21673 v.AuxInt = int8ToAuxInt(int8(c & 31))
21674 v.AddArg(x)
21675 return true
21676 }
21677
21678
21679
21680 for {
21681 if v_1.Op != OpAMD64MOVQconst {
21682 break
21683 }
21684 c := auxIntToInt64(v_1.AuxInt)
21685 if !(c&31 >= 16) {
21686 break
21687 }
21688 v.reset(OpAMD64MOVLconst)
21689 v.AuxInt = int32ToAuxInt(0)
21690 return true
21691 }
21692
21693
21694
21695 for {
21696 if v_1.Op != OpAMD64MOVLconst {
21697 break
21698 }
21699 c := auxIntToInt32(v_1.AuxInt)
21700 if !(c&31 >= 16) {
21701 break
21702 }
21703 v.reset(OpAMD64MOVLconst)
21704 v.AuxInt = int32ToAuxInt(0)
21705 return true
21706 }
21707 return false
21708 }
21709 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
21710 v_0 := v.Args[0]
21711
21712
21713 for {
21714 if auxIntToInt8(v.AuxInt) != 0 {
21715 break
21716 }
21717 x := v_0
21718 v.copyOf(x)
21719 return true
21720 }
21721 return false
21722 }
21723 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
21724 v_2 := v.Args[2]
21725 v_1 := v.Args[1]
21726 v_0 := v.Args[0]
21727 b := v.Block
21728 typ := &b.Func.Config.Types
21729
21730
21731 for {
21732 off := auxIntToInt32(v.AuxInt)
21733 sym := auxToSym(v.Aux)
21734 ptr := v_0
21735 if v_1.Op != OpAMD64MOVLconst {
21736 break
21737 }
21738 c := auxIntToInt32(v_1.AuxInt)
21739 mem := v_2
21740 v.reset(OpAMD64SHRLconst)
21741 v.AuxInt = int8ToAuxInt(int8(c & 31))
21742 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21743 v0.AuxInt = int32ToAuxInt(off)
21744 v0.Aux = symToAux(sym)
21745 v0.AddArg2(ptr, mem)
21746 v.AddArg(v0)
21747 return true
21748 }
21749 return false
21750 }
21751 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
21752 v_2 := v.Args[2]
21753 v_1 := v.Args[1]
21754 v_0 := v.Args[0]
21755 b := v.Block
21756 typ := &b.Func.Config.Types
21757
21758
21759 for {
21760 off := auxIntToInt32(v.AuxInt)
21761 sym := auxToSym(v.Aux)
21762 ptr := v_0
21763 if v_1.Op != OpAMD64MOVQconst {
21764 break
21765 }
21766 c := auxIntToInt64(v_1.AuxInt)
21767 mem := v_2
21768 v.reset(OpAMD64SHRQconst)
21769 v.AuxInt = int8ToAuxInt(int8(c & 63))
21770 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21771 v0.AuxInt = int32ToAuxInt(off)
21772 v0.Aux = symToAux(sym)
21773 v0.AddArg2(ptr, mem)
21774 v.AddArg(v0)
21775 return true
21776 }
21777
21778
21779 for {
21780 off := auxIntToInt32(v.AuxInt)
21781 sym := auxToSym(v.Aux)
21782 ptr := v_0
21783 if v_1.Op != OpAMD64MOVLconst {
21784 break
21785 }
21786 c := auxIntToInt32(v_1.AuxInt)
21787 mem := v_2
21788 v.reset(OpAMD64SHRQconst)
21789 v.AuxInt = int8ToAuxInt(int8(c & 63))
21790 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21791 v0.AuxInt = int32ToAuxInt(off)
21792 v0.Aux = symToAux(sym)
21793 v0.AddArg2(ptr, mem)
21794 v.AddArg(v0)
21795 return true
21796 }
21797 return false
21798 }
21799 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
21800 v_1 := v.Args[1]
21801 v_0 := v.Args[0]
21802 b := v.Block
21803
21804
21805 for {
21806 x := v_0
21807 if v_1.Op != OpAMD64MOVLconst {
21808 break
21809 }
21810 c := auxIntToInt32(v_1.AuxInt)
21811 v.reset(OpAMD64SUBLconst)
21812 v.AuxInt = int32ToAuxInt(c)
21813 v.AddArg(x)
21814 return true
21815 }
21816
21817
21818 for {
21819 if v_0.Op != OpAMD64MOVLconst {
21820 break
21821 }
21822 c := auxIntToInt32(v_0.AuxInt)
21823 x := v_1
21824 v.reset(OpAMD64NEGL)
21825 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
21826 v0.AuxInt = int32ToAuxInt(c)
21827 v0.AddArg(x)
21828 v.AddArg(v0)
21829 return true
21830 }
21831
21832
21833 for {
21834 x := v_0
21835 if x != v_1 {
21836 break
21837 }
21838 v.reset(OpAMD64MOVLconst)
21839 v.AuxInt = int32ToAuxInt(0)
21840 return true
21841 }
21842
21843
21844
21845 for {
21846 x := v_0
21847 l := v_1
21848 if l.Op != OpAMD64MOVLload {
21849 break
21850 }
21851 off := auxIntToInt32(l.AuxInt)
21852 sym := auxToSym(l.Aux)
21853 mem := l.Args[1]
21854 ptr := l.Args[0]
21855 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21856 break
21857 }
21858 v.reset(OpAMD64SUBLload)
21859 v.AuxInt = int32ToAuxInt(off)
21860 v.Aux = symToAux(sym)
21861 v.AddArg3(x, ptr, mem)
21862 return true
21863 }
21864 return false
21865 }
21866 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
21867 v_0 := v.Args[0]
21868
21869
21870
21871 for {
21872 c := auxIntToInt32(v.AuxInt)
21873 x := v_0
21874 if !(c == 0) {
21875 break
21876 }
21877 v.copyOf(x)
21878 return true
21879 }
21880
21881
21882 for {
21883 c := auxIntToInt32(v.AuxInt)
21884 x := v_0
21885 v.reset(OpAMD64ADDLconst)
21886 v.AuxInt = int32ToAuxInt(-c)
21887 v.AddArg(x)
21888 return true
21889 }
21890 }
21891 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
21892 v_2 := v.Args[2]
21893 v_1 := v.Args[1]
21894 v_0 := v.Args[0]
21895 b := v.Block
21896 typ := &b.Func.Config.Types
21897
21898
21899
21900 for {
21901 off1 := auxIntToInt32(v.AuxInt)
21902 sym := auxToSym(v.Aux)
21903 val := v_0
21904 if v_1.Op != OpAMD64ADDQconst {
21905 break
21906 }
21907 off2 := auxIntToInt32(v_1.AuxInt)
21908 base := v_1.Args[0]
21909 mem := v_2
21910 if !(is32Bit(int64(off1) + int64(off2))) {
21911 break
21912 }
21913 v.reset(OpAMD64SUBLload)
21914 v.AuxInt = int32ToAuxInt(off1 + off2)
21915 v.Aux = symToAux(sym)
21916 v.AddArg3(val, base, mem)
21917 return true
21918 }
21919
21920
21921
21922 for {
21923 off1 := auxIntToInt32(v.AuxInt)
21924 sym1 := auxToSym(v.Aux)
21925 val := v_0
21926 if v_1.Op != OpAMD64LEAQ {
21927 break
21928 }
21929 off2 := auxIntToInt32(v_1.AuxInt)
21930 sym2 := auxToSym(v_1.Aux)
21931 base := v_1.Args[0]
21932 mem := v_2
21933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21934 break
21935 }
21936 v.reset(OpAMD64SUBLload)
21937 v.AuxInt = int32ToAuxInt(off1 + off2)
21938 v.Aux = symToAux(mergeSym(sym1, sym2))
21939 v.AddArg3(val, base, mem)
21940 return true
21941 }
21942
21943
21944 for {
21945 off := auxIntToInt32(v.AuxInt)
21946 sym := auxToSym(v.Aux)
21947 x := v_0
21948 ptr := v_1
21949 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
21950 break
21951 }
21952 y := v_2.Args[1]
21953 if ptr != v_2.Args[0] {
21954 break
21955 }
21956 v.reset(OpAMD64SUBL)
21957 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
21958 v0.AddArg(y)
21959 v.AddArg2(x, v0)
21960 return true
21961 }
21962 return false
21963 }
21964 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
21965 v_2 := v.Args[2]
21966 v_1 := v.Args[1]
21967 v_0 := v.Args[0]
21968
21969
21970
21971 for {
21972 off1 := auxIntToInt32(v.AuxInt)
21973 sym := auxToSym(v.Aux)
21974 if v_0.Op != OpAMD64ADDQconst {
21975 break
21976 }
21977 off2 := auxIntToInt32(v_0.AuxInt)
21978 base := v_0.Args[0]
21979 val := v_1
21980 mem := v_2
21981 if !(is32Bit(int64(off1) + int64(off2))) {
21982 break
21983 }
21984 v.reset(OpAMD64SUBLmodify)
21985 v.AuxInt = int32ToAuxInt(off1 + off2)
21986 v.Aux = symToAux(sym)
21987 v.AddArg3(base, val, mem)
21988 return true
21989 }
21990
21991
21992
21993 for {
21994 off1 := auxIntToInt32(v.AuxInt)
21995 sym1 := auxToSym(v.Aux)
21996 if v_0.Op != OpAMD64LEAQ {
21997 break
21998 }
21999 off2 := auxIntToInt32(v_0.AuxInt)
22000 sym2 := auxToSym(v_0.Aux)
22001 base := v_0.Args[0]
22002 val := v_1
22003 mem := v_2
22004 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22005 break
22006 }
22007 v.reset(OpAMD64SUBLmodify)
22008 v.AuxInt = int32ToAuxInt(off1 + off2)
22009 v.Aux = symToAux(mergeSym(sym1, sym2))
22010 v.AddArg3(base, val, mem)
22011 return true
22012 }
22013 return false
22014 }
22015 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
22016 v_1 := v.Args[1]
22017 v_0 := v.Args[0]
22018 b := v.Block
22019
22020
22021
22022 for {
22023 x := v_0
22024 if v_1.Op != OpAMD64MOVQconst {
22025 break
22026 }
22027 c := auxIntToInt64(v_1.AuxInt)
22028 if !(is32Bit(c)) {
22029 break
22030 }
22031 v.reset(OpAMD64SUBQconst)
22032 v.AuxInt = int32ToAuxInt(int32(c))
22033 v.AddArg(x)
22034 return true
22035 }
22036
22037
22038
22039 for {
22040 if v_0.Op != OpAMD64MOVQconst {
22041 break
22042 }
22043 c := auxIntToInt64(v_0.AuxInt)
22044 x := v_1
22045 if !(is32Bit(c)) {
22046 break
22047 }
22048 v.reset(OpAMD64NEGQ)
22049 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
22050 v0.AuxInt = int32ToAuxInt(int32(c))
22051 v0.AddArg(x)
22052 v.AddArg(v0)
22053 return true
22054 }
22055
22056
22057 for {
22058 x := v_0
22059 if x != v_1 {
22060 break
22061 }
22062 v.reset(OpAMD64MOVQconst)
22063 v.AuxInt = int64ToAuxInt(0)
22064 return true
22065 }
22066
22067
22068
22069 for {
22070 x := v_0
22071 l := v_1
22072 if l.Op != OpAMD64MOVQload {
22073 break
22074 }
22075 off := auxIntToInt32(l.AuxInt)
22076 sym := auxToSym(l.Aux)
22077 mem := l.Args[1]
22078 ptr := l.Args[0]
22079 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22080 break
22081 }
22082 v.reset(OpAMD64SUBQload)
22083 v.AuxInt = int32ToAuxInt(off)
22084 v.Aux = symToAux(sym)
22085 v.AddArg3(x, ptr, mem)
22086 return true
22087 }
22088 return false
22089 }
22090 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22091 v_1 := v.Args[1]
22092 v_0 := v.Args[0]
22093
22094
22095
22096 for {
22097 x := v_0
22098 if v_1.Op != OpAMD64MOVQconst {
22099 break
22100 }
22101 c := auxIntToInt64(v_1.AuxInt)
22102 if !(is32Bit(c)) {
22103 break
22104 }
22105 v.reset(OpAMD64SUBQconstborrow)
22106 v.AuxInt = int32ToAuxInt(int32(c))
22107 v.AddArg(x)
22108 return true
22109 }
22110 return false
22111 }
22112 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22113 v_0 := v.Args[0]
22114
22115
22116 for {
22117 if auxIntToInt32(v.AuxInt) != 0 {
22118 break
22119 }
22120 x := v_0
22121 v.copyOf(x)
22122 return true
22123 }
22124
22125
22126
22127 for {
22128 c := auxIntToInt32(v.AuxInt)
22129 x := v_0
22130 if !(c != -(1 << 31)) {
22131 break
22132 }
22133 v.reset(OpAMD64ADDQconst)
22134 v.AuxInt = int32ToAuxInt(-c)
22135 v.AddArg(x)
22136 return true
22137 }
22138
22139
22140 for {
22141 c := auxIntToInt32(v.AuxInt)
22142 if v_0.Op != OpAMD64MOVQconst {
22143 break
22144 }
22145 d := auxIntToInt64(v_0.AuxInt)
22146 v.reset(OpAMD64MOVQconst)
22147 v.AuxInt = int64ToAuxInt(d - int64(c))
22148 return true
22149 }
22150
22151
22152
22153 for {
22154 c := auxIntToInt32(v.AuxInt)
22155 if v_0.Op != OpAMD64SUBQconst {
22156 break
22157 }
22158 d := auxIntToInt32(v_0.AuxInt)
22159 x := v_0.Args[0]
22160 if !(is32Bit(int64(-c) - int64(d))) {
22161 break
22162 }
22163 v.reset(OpAMD64ADDQconst)
22164 v.AuxInt = int32ToAuxInt(-c - d)
22165 v.AddArg(x)
22166 return true
22167 }
22168 return false
22169 }
22170 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22171 v_2 := v.Args[2]
22172 v_1 := v.Args[1]
22173 v_0 := v.Args[0]
22174 b := v.Block
22175 typ := &b.Func.Config.Types
22176
22177
22178
22179 for {
22180 off1 := auxIntToInt32(v.AuxInt)
22181 sym := auxToSym(v.Aux)
22182 val := v_0
22183 if v_1.Op != OpAMD64ADDQconst {
22184 break
22185 }
22186 off2 := auxIntToInt32(v_1.AuxInt)
22187 base := v_1.Args[0]
22188 mem := v_2
22189 if !(is32Bit(int64(off1) + int64(off2))) {
22190 break
22191 }
22192 v.reset(OpAMD64SUBQload)
22193 v.AuxInt = int32ToAuxInt(off1 + off2)
22194 v.Aux = symToAux(sym)
22195 v.AddArg3(val, base, mem)
22196 return true
22197 }
22198
22199
22200
22201 for {
22202 off1 := auxIntToInt32(v.AuxInt)
22203 sym1 := auxToSym(v.Aux)
22204 val := v_0
22205 if v_1.Op != OpAMD64LEAQ {
22206 break
22207 }
22208 off2 := auxIntToInt32(v_1.AuxInt)
22209 sym2 := auxToSym(v_1.Aux)
22210 base := v_1.Args[0]
22211 mem := v_2
22212 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22213 break
22214 }
22215 v.reset(OpAMD64SUBQload)
22216 v.AuxInt = int32ToAuxInt(off1 + off2)
22217 v.Aux = symToAux(mergeSym(sym1, sym2))
22218 v.AddArg3(val, base, mem)
22219 return true
22220 }
22221
22222
22223 for {
22224 off := auxIntToInt32(v.AuxInt)
22225 sym := auxToSym(v.Aux)
22226 x := v_0
22227 ptr := v_1
22228 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22229 break
22230 }
22231 y := v_2.Args[1]
22232 if ptr != v_2.Args[0] {
22233 break
22234 }
22235 v.reset(OpAMD64SUBQ)
22236 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22237 v0.AddArg(y)
22238 v.AddArg2(x, v0)
22239 return true
22240 }
22241 return false
22242 }
22243 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22244 v_2 := v.Args[2]
22245 v_1 := v.Args[1]
22246 v_0 := v.Args[0]
22247
22248
22249
22250 for {
22251 off1 := auxIntToInt32(v.AuxInt)
22252 sym := auxToSym(v.Aux)
22253 if v_0.Op != OpAMD64ADDQconst {
22254 break
22255 }
22256 off2 := auxIntToInt32(v_0.AuxInt)
22257 base := v_0.Args[0]
22258 val := v_1
22259 mem := v_2
22260 if !(is32Bit(int64(off1) + int64(off2))) {
22261 break
22262 }
22263 v.reset(OpAMD64SUBQmodify)
22264 v.AuxInt = int32ToAuxInt(off1 + off2)
22265 v.Aux = symToAux(sym)
22266 v.AddArg3(base, val, mem)
22267 return true
22268 }
22269
22270
22271
22272 for {
22273 off1 := auxIntToInt32(v.AuxInt)
22274 sym1 := auxToSym(v.Aux)
22275 if v_0.Op != OpAMD64LEAQ {
22276 break
22277 }
22278 off2 := auxIntToInt32(v_0.AuxInt)
22279 sym2 := auxToSym(v_0.Aux)
22280 base := v_0.Args[0]
22281 val := v_1
22282 mem := v_2
22283 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22284 break
22285 }
22286 v.reset(OpAMD64SUBQmodify)
22287 v.AuxInt = int32ToAuxInt(off1 + off2)
22288 v.Aux = symToAux(mergeSym(sym1, sym2))
22289 v.AddArg3(base, val, mem)
22290 return true
22291 }
22292 return false
22293 }
22294 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22295 v_1 := v.Args[1]
22296 v_0 := v.Args[0]
22297
22298
22299
22300 for {
22301 x := v_0
22302 l := v_1
22303 if l.Op != OpAMD64MOVSDload {
22304 break
22305 }
22306 off := auxIntToInt32(l.AuxInt)
22307 sym := auxToSym(l.Aux)
22308 mem := l.Args[1]
22309 ptr := l.Args[0]
22310 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22311 break
22312 }
22313 v.reset(OpAMD64SUBSDload)
22314 v.AuxInt = int32ToAuxInt(off)
22315 v.Aux = symToAux(sym)
22316 v.AddArg3(x, ptr, mem)
22317 return true
22318 }
22319 return false
22320 }
22321 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22322 v_2 := v.Args[2]
22323 v_1 := v.Args[1]
22324 v_0 := v.Args[0]
22325 b := v.Block
22326 typ := &b.Func.Config.Types
22327
22328
22329
22330 for {
22331 off1 := auxIntToInt32(v.AuxInt)
22332 sym := auxToSym(v.Aux)
22333 val := v_0
22334 if v_1.Op != OpAMD64ADDQconst {
22335 break
22336 }
22337 off2 := auxIntToInt32(v_1.AuxInt)
22338 base := v_1.Args[0]
22339 mem := v_2
22340 if !(is32Bit(int64(off1) + int64(off2))) {
22341 break
22342 }
22343 v.reset(OpAMD64SUBSDload)
22344 v.AuxInt = int32ToAuxInt(off1 + off2)
22345 v.Aux = symToAux(sym)
22346 v.AddArg3(val, base, mem)
22347 return true
22348 }
22349
22350
22351
22352 for {
22353 off1 := auxIntToInt32(v.AuxInt)
22354 sym1 := auxToSym(v.Aux)
22355 val := v_0
22356 if v_1.Op != OpAMD64LEAQ {
22357 break
22358 }
22359 off2 := auxIntToInt32(v_1.AuxInt)
22360 sym2 := auxToSym(v_1.Aux)
22361 base := v_1.Args[0]
22362 mem := v_2
22363 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22364 break
22365 }
22366 v.reset(OpAMD64SUBSDload)
22367 v.AuxInt = int32ToAuxInt(off1 + off2)
22368 v.Aux = symToAux(mergeSym(sym1, sym2))
22369 v.AddArg3(val, base, mem)
22370 return true
22371 }
22372
22373
22374 for {
22375 off := auxIntToInt32(v.AuxInt)
22376 sym := auxToSym(v.Aux)
22377 x := v_0
22378 ptr := v_1
22379 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22380 break
22381 }
22382 y := v_2.Args[1]
22383 if ptr != v_2.Args[0] {
22384 break
22385 }
22386 v.reset(OpAMD64SUBSD)
22387 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22388 v0.AddArg(y)
22389 v.AddArg2(x, v0)
22390 return true
22391 }
22392 return false
22393 }
22394 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
22395 v_1 := v.Args[1]
22396 v_0 := v.Args[0]
22397
22398
22399
22400 for {
22401 x := v_0
22402 l := v_1
22403 if l.Op != OpAMD64MOVSSload {
22404 break
22405 }
22406 off := auxIntToInt32(l.AuxInt)
22407 sym := auxToSym(l.Aux)
22408 mem := l.Args[1]
22409 ptr := l.Args[0]
22410 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22411 break
22412 }
22413 v.reset(OpAMD64SUBSSload)
22414 v.AuxInt = int32ToAuxInt(off)
22415 v.Aux = symToAux(sym)
22416 v.AddArg3(x, ptr, mem)
22417 return true
22418 }
22419 return false
22420 }
22421 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
22422 v_2 := v.Args[2]
22423 v_1 := v.Args[1]
22424 v_0 := v.Args[0]
22425 b := v.Block
22426 typ := &b.Func.Config.Types
22427
22428
22429
22430 for {
22431 off1 := auxIntToInt32(v.AuxInt)
22432 sym := auxToSym(v.Aux)
22433 val := v_0
22434 if v_1.Op != OpAMD64ADDQconst {
22435 break
22436 }
22437 off2 := auxIntToInt32(v_1.AuxInt)
22438 base := v_1.Args[0]
22439 mem := v_2
22440 if !(is32Bit(int64(off1) + int64(off2))) {
22441 break
22442 }
22443 v.reset(OpAMD64SUBSSload)
22444 v.AuxInt = int32ToAuxInt(off1 + off2)
22445 v.Aux = symToAux(sym)
22446 v.AddArg3(val, base, mem)
22447 return true
22448 }
22449
22450
22451
22452 for {
22453 off1 := auxIntToInt32(v.AuxInt)
22454 sym1 := auxToSym(v.Aux)
22455 val := v_0
22456 if v_1.Op != OpAMD64LEAQ {
22457 break
22458 }
22459 off2 := auxIntToInt32(v_1.AuxInt)
22460 sym2 := auxToSym(v_1.Aux)
22461 base := v_1.Args[0]
22462 mem := v_2
22463 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22464 break
22465 }
22466 v.reset(OpAMD64SUBSSload)
22467 v.AuxInt = int32ToAuxInt(off1 + off2)
22468 v.Aux = symToAux(mergeSym(sym1, sym2))
22469 v.AddArg3(val, base, mem)
22470 return true
22471 }
22472
22473
22474 for {
22475 off := auxIntToInt32(v.AuxInt)
22476 sym := auxToSym(v.Aux)
22477 x := v_0
22478 ptr := v_1
22479 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22480 break
22481 }
22482 y := v_2.Args[1]
22483 if ptr != v_2.Args[0] {
22484 break
22485 }
22486 v.reset(OpAMD64SUBSS)
22487 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
22488 v0.AddArg(y)
22489 v.AddArg2(x, v0)
22490 return true
22491 }
22492 return false
22493 }
22494 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
22495 v_1 := v.Args[1]
22496 v_0 := v.Args[0]
22497 b := v.Block
22498
22499
22500 for {
22501 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22502 if v_0.Op != OpAMD64MOVLconst {
22503 continue
22504 }
22505 c := auxIntToInt32(v_0.AuxInt)
22506 x := v_1
22507 v.reset(OpAMD64TESTBconst)
22508 v.AuxInt = int8ToAuxInt(int8(c))
22509 v.AddArg(x)
22510 return true
22511 }
22512 break
22513 }
22514
22515
22516
22517 for {
22518 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22519 l := v_0
22520 if l.Op != OpAMD64MOVBload {
22521 continue
22522 }
22523 off := auxIntToInt32(l.AuxInt)
22524 sym := auxToSym(l.Aux)
22525 mem := l.Args[1]
22526 ptr := l.Args[0]
22527 l2 := v_1
22528 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22529 continue
22530 }
22531 b = l.Block
22532 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
22533 v.copyOf(v0)
22534 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22535 v0.Aux = symToAux(sym)
22536 v0.AddArg2(ptr, mem)
22537 return true
22538 }
22539 break
22540 }
22541 return false
22542 }
22543 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
22544 v_0 := v.Args[0]
22545
22546
22547
22548 for {
22549 if auxIntToInt8(v.AuxInt) != -1 {
22550 break
22551 }
22552 x := v_0
22553 if !(x.Op != OpAMD64MOVLconst) {
22554 break
22555 }
22556 v.reset(OpAMD64TESTB)
22557 v.AddArg2(x, x)
22558 return true
22559 }
22560 return false
22561 }
22562 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
22563 v_1 := v.Args[1]
22564 v_0 := v.Args[0]
22565 b := v.Block
22566
22567
22568 for {
22569 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22570 if v_0.Op != OpAMD64MOVLconst {
22571 continue
22572 }
22573 c := auxIntToInt32(v_0.AuxInt)
22574 x := v_1
22575 v.reset(OpAMD64TESTLconst)
22576 v.AuxInt = int32ToAuxInt(c)
22577 v.AddArg(x)
22578 return true
22579 }
22580 break
22581 }
22582
22583
22584
22585 for {
22586 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22587 l := v_0
22588 if l.Op != OpAMD64MOVLload {
22589 continue
22590 }
22591 off := auxIntToInt32(l.AuxInt)
22592 sym := auxToSym(l.Aux)
22593 mem := l.Args[1]
22594 ptr := l.Args[0]
22595 l2 := v_1
22596 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22597 continue
22598 }
22599 b = l.Block
22600 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
22601 v.copyOf(v0)
22602 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22603 v0.Aux = symToAux(sym)
22604 v0.AddArg2(ptr, mem)
22605 return true
22606 }
22607 break
22608 }
22609
22610
22611
22612 for {
22613 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22614 a := v_0
22615 if a.Op != OpAMD64ANDLload {
22616 continue
22617 }
22618 off := auxIntToInt32(a.AuxInt)
22619 sym := auxToSym(a.Aux)
22620 mem := a.Args[2]
22621 x := a.Args[0]
22622 ptr := a.Args[1]
22623 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22624 continue
22625 }
22626 v.reset(OpAMD64TESTL)
22627 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
22628 v0.AuxInt = int32ToAuxInt(off)
22629 v0.Aux = symToAux(sym)
22630 v0.AddArg2(ptr, mem)
22631 v.AddArg2(v0, x)
22632 return true
22633 }
22634 break
22635 }
22636 return false
22637 }
22638 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
22639 v_0 := v.Args[0]
22640
22641
22642
22643 for {
22644 c := auxIntToInt32(v.AuxInt)
22645 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
22646 break
22647 }
22648 v.reset(OpAMD64FlagEQ)
22649 return true
22650 }
22651
22652
22653
22654 for {
22655 c := auxIntToInt32(v.AuxInt)
22656 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
22657 break
22658 }
22659 v.reset(OpAMD64FlagLT_UGT)
22660 return true
22661 }
22662
22663
22664
22665 for {
22666 c := auxIntToInt32(v.AuxInt)
22667 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
22668 break
22669 }
22670 v.reset(OpAMD64FlagGT_UGT)
22671 return true
22672 }
22673
22674
22675
22676 for {
22677 if auxIntToInt32(v.AuxInt) != -1 {
22678 break
22679 }
22680 x := v_0
22681 if !(x.Op != OpAMD64MOVLconst) {
22682 break
22683 }
22684 v.reset(OpAMD64TESTL)
22685 v.AddArg2(x, x)
22686 return true
22687 }
22688 return false
22689 }
22690 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
22691 v_1 := v.Args[1]
22692 v_0 := v.Args[0]
22693 b := v.Block
22694
22695
22696
22697 for {
22698 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22699 if v_0.Op != OpAMD64MOVQconst {
22700 continue
22701 }
22702 c := auxIntToInt64(v_0.AuxInt)
22703 x := v_1
22704 if !(is32Bit(c)) {
22705 continue
22706 }
22707 v.reset(OpAMD64TESTQconst)
22708 v.AuxInt = int32ToAuxInt(int32(c))
22709 v.AddArg(x)
22710 return true
22711 }
22712 break
22713 }
22714
22715
22716
22717 for {
22718 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22719 l := v_0
22720 if l.Op != OpAMD64MOVQload {
22721 continue
22722 }
22723 off := auxIntToInt32(l.AuxInt)
22724 sym := auxToSym(l.Aux)
22725 mem := l.Args[1]
22726 ptr := l.Args[0]
22727 l2 := v_1
22728 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22729 continue
22730 }
22731 b = l.Block
22732 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
22733 v.copyOf(v0)
22734 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22735 v0.Aux = symToAux(sym)
22736 v0.AddArg2(ptr, mem)
22737 return true
22738 }
22739 break
22740 }
22741
22742
22743
22744 for {
22745 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22746 a := v_0
22747 if a.Op != OpAMD64ANDQload {
22748 continue
22749 }
22750 off := auxIntToInt32(a.AuxInt)
22751 sym := auxToSym(a.Aux)
22752 mem := a.Args[2]
22753 x := a.Args[0]
22754 ptr := a.Args[1]
22755 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22756 continue
22757 }
22758 v.reset(OpAMD64TESTQ)
22759 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
22760 v0.AuxInt = int32ToAuxInt(off)
22761 v0.Aux = symToAux(sym)
22762 v0.AddArg2(ptr, mem)
22763 v.AddArg2(v0, x)
22764 return true
22765 }
22766 break
22767 }
22768 return false
22769 }
22770 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
22771 v_0 := v.Args[0]
22772
22773
22774
22775 for {
22776 c := auxIntToInt32(v.AuxInt)
22777 if v_0.Op != OpAMD64MOVQconst {
22778 break
22779 }
22780 d := auxIntToInt64(v_0.AuxInt)
22781 if !(int64(c) == d && c == 0) {
22782 break
22783 }
22784 v.reset(OpAMD64FlagEQ)
22785 return true
22786 }
22787
22788
22789
22790 for {
22791 c := auxIntToInt32(v.AuxInt)
22792 if v_0.Op != OpAMD64MOVQconst {
22793 break
22794 }
22795 d := auxIntToInt64(v_0.AuxInt)
22796 if !(int64(c) == d && c < 0) {
22797 break
22798 }
22799 v.reset(OpAMD64FlagLT_UGT)
22800 return true
22801 }
22802
22803
22804
22805 for {
22806 c := auxIntToInt32(v.AuxInt)
22807 if v_0.Op != OpAMD64MOVQconst {
22808 break
22809 }
22810 d := auxIntToInt64(v_0.AuxInt)
22811 if !(int64(c) == d && c > 0) {
22812 break
22813 }
22814 v.reset(OpAMD64FlagGT_UGT)
22815 return true
22816 }
22817
22818
22819
22820 for {
22821 if auxIntToInt32(v.AuxInt) != -1 {
22822 break
22823 }
22824 x := v_0
22825 if !(x.Op != OpAMD64MOVQconst) {
22826 break
22827 }
22828 v.reset(OpAMD64TESTQ)
22829 v.AddArg2(x, x)
22830 return true
22831 }
22832 return false
22833 }
22834 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
22835 v_1 := v.Args[1]
22836 v_0 := v.Args[0]
22837 b := v.Block
22838
22839
22840 for {
22841 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22842 if v_0.Op != OpAMD64MOVLconst {
22843 continue
22844 }
22845 c := auxIntToInt32(v_0.AuxInt)
22846 x := v_1
22847 v.reset(OpAMD64TESTWconst)
22848 v.AuxInt = int16ToAuxInt(int16(c))
22849 v.AddArg(x)
22850 return true
22851 }
22852 break
22853 }
22854
22855
22856
22857 for {
22858 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22859 l := v_0
22860 if l.Op != OpAMD64MOVWload {
22861 continue
22862 }
22863 off := auxIntToInt32(l.AuxInt)
22864 sym := auxToSym(l.Aux)
22865 mem := l.Args[1]
22866 ptr := l.Args[0]
22867 l2 := v_1
22868 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22869 continue
22870 }
22871 b = l.Block
22872 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
22873 v.copyOf(v0)
22874 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22875 v0.Aux = symToAux(sym)
22876 v0.AddArg2(ptr, mem)
22877 return true
22878 }
22879 break
22880 }
22881 return false
22882 }
22883 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
22884 v_0 := v.Args[0]
22885
22886
22887
22888 for {
22889 if auxIntToInt16(v.AuxInt) != -1 {
22890 break
22891 }
22892 x := v_0
22893 if !(x.Op != OpAMD64MOVLconst) {
22894 break
22895 }
22896 v.reset(OpAMD64TESTW)
22897 v.AddArg2(x, x)
22898 return true
22899 }
22900 return false
22901 }
22902 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
22903 v_2 := v.Args[2]
22904 v_1 := v.Args[1]
22905 v_0 := v.Args[0]
22906
22907
22908
22909 for {
22910 off1 := auxIntToInt32(v.AuxInt)
22911 sym := auxToSym(v.Aux)
22912 val := v_0
22913 if v_1.Op != OpAMD64ADDQconst {
22914 break
22915 }
22916 off2 := auxIntToInt32(v_1.AuxInt)
22917 ptr := v_1.Args[0]
22918 mem := v_2
22919 if !(is32Bit(int64(off1) + int64(off2))) {
22920 break
22921 }
22922 v.reset(OpAMD64XADDLlock)
22923 v.AuxInt = int32ToAuxInt(off1 + off2)
22924 v.Aux = symToAux(sym)
22925 v.AddArg3(val, ptr, mem)
22926 return true
22927 }
22928 return false
22929 }
22930 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
22931 v_2 := v.Args[2]
22932 v_1 := v.Args[1]
22933 v_0 := v.Args[0]
22934
22935
22936
22937 for {
22938 off1 := auxIntToInt32(v.AuxInt)
22939 sym := auxToSym(v.Aux)
22940 val := v_0
22941 if v_1.Op != OpAMD64ADDQconst {
22942 break
22943 }
22944 off2 := auxIntToInt32(v_1.AuxInt)
22945 ptr := v_1.Args[0]
22946 mem := v_2
22947 if !(is32Bit(int64(off1) + int64(off2))) {
22948 break
22949 }
22950 v.reset(OpAMD64XADDQlock)
22951 v.AuxInt = int32ToAuxInt(off1 + off2)
22952 v.Aux = symToAux(sym)
22953 v.AddArg3(val, ptr, mem)
22954 return true
22955 }
22956 return false
22957 }
22958 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
22959 v_2 := v.Args[2]
22960 v_1 := v.Args[1]
22961 v_0 := v.Args[0]
22962
22963
22964
22965 for {
22966 off1 := auxIntToInt32(v.AuxInt)
22967 sym := auxToSym(v.Aux)
22968 val := v_0
22969 if v_1.Op != OpAMD64ADDQconst {
22970 break
22971 }
22972 off2 := auxIntToInt32(v_1.AuxInt)
22973 ptr := v_1.Args[0]
22974 mem := v_2
22975 if !(is32Bit(int64(off1) + int64(off2))) {
22976 break
22977 }
22978 v.reset(OpAMD64XCHGL)
22979 v.AuxInt = int32ToAuxInt(off1 + off2)
22980 v.Aux = symToAux(sym)
22981 v.AddArg3(val, ptr, mem)
22982 return true
22983 }
22984
22985
22986
22987 for {
22988 off1 := auxIntToInt32(v.AuxInt)
22989 sym1 := auxToSym(v.Aux)
22990 val := v_0
22991 if v_1.Op != OpAMD64LEAQ {
22992 break
22993 }
22994 off2 := auxIntToInt32(v_1.AuxInt)
22995 sym2 := auxToSym(v_1.Aux)
22996 ptr := v_1.Args[0]
22997 mem := v_2
22998 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
22999 break
23000 }
23001 v.reset(OpAMD64XCHGL)
23002 v.AuxInt = int32ToAuxInt(off1 + off2)
23003 v.Aux = symToAux(mergeSym(sym1, sym2))
23004 v.AddArg3(val, ptr, mem)
23005 return true
23006 }
23007 return false
23008 }
23009 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
23010 v_2 := v.Args[2]
23011 v_1 := v.Args[1]
23012 v_0 := v.Args[0]
23013
23014
23015
23016 for {
23017 off1 := auxIntToInt32(v.AuxInt)
23018 sym := auxToSym(v.Aux)
23019 val := v_0
23020 if v_1.Op != OpAMD64ADDQconst {
23021 break
23022 }
23023 off2 := auxIntToInt32(v_1.AuxInt)
23024 ptr := v_1.Args[0]
23025 mem := v_2
23026 if !(is32Bit(int64(off1) + int64(off2))) {
23027 break
23028 }
23029 v.reset(OpAMD64XCHGQ)
23030 v.AuxInt = int32ToAuxInt(off1 + off2)
23031 v.Aux = symToAux(sym)
23032 v.AddArg3(val, ptr, mem)
23033 return true
23034 }
23035
23036
23037
23038 for {
23039 off1 := auxIntToInt32(v.AuxInt)
23040 sym1 := auxToSym(v.Aux)
23041 val := v_0
23042 if v_1.Op != OpAMD64LEAQ {
23043 break
23044 }
23045 off2 := auxIntToInt32(v_1.AuxInt)
23046 sym2 := auxToSym(v_1.Aux)
23047 ptr := v_1.Args[0]
23048 mem := v_2
23049 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23050 break
23051 }
23052 v.reset(OpAMD64XCHGQ)
23053 v.AuxInt = int32ToAuxInt(off1 + off2)
23054 v.Aux = symToAux(mergeSym(sym1, sym2))
23055 v.AddArg3(val, ptr, mem)
23056 return true
23057 }
23058 return false
23059 }
23060 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
23061 v_1 := v.Args[1]
23062 v_0 := v.Args[0]
23063
23064
23065 for {
23066 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23067 if v_0.Op != OpAMD64SHLL {
23068 continue
23069 }
23070 y := v_0.Args[1]
23071 v_0_0 := v_0.Args[0]
23072 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
23073 continue
23074 }
23075 x := v_1
23076 v.reset(OpAMD64BTCL)
23077 v.AddArg2(x, y)
23078 return true
23079 }
23080 break
23081 }
23082
23083
23084 for {
23085 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23086 x := v_0
23087 if v_1.Op != OpAMD64MOVLconst {
23088 continue
23089 }
23090 c := auxIntToInt32(v_1.AuxInt)
23091 v.reset(OpAMD64XORLconst)
23092 v.AuxInt = int32ToAuxInt(c)
23093 v.AddArg(x)
23094 return true
23095 }
23096 break
23097 }
23098
23099
23100 for {
23101 x := v_0
23102 if x != v_1 {
23103 break
23104 }
23105 v.reset(OpAMD64MOVLconst)
23106 v.AuxInt = int32ToAuxInt(0)
23107 return true
23108 }
23109
23110
23111
23112 for {
23113 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23114 x := v_0
23115 l := v_1
23116 if l.Op != OpAMD64MOVLload {
23117 continue
23118 }
23119 off := auxIntToInt32(l.AuxInt)
23120 sym := auxToSym(l.Aux)
23121 mem := l.Args[1]
23122 ptr := l.Args[0]
23123 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23124 continue
23125 }
23126 v.reset(OpAMD64XORLload)
23127 v.AuxInt = int32ToAuxInt(off)
23128 v.Aux = symToAux(sym)
23129 v.AddArg3(x, ptr, mem)
23130 return true
23131 }
23132 break
23133 }
23134
23135
23136
23137 for {
23138 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23139 x := v_0
23140 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23141 continue
23142 }
23143 v.reset(OpAMD64BLSMSKL)
23144 v.AddArg(x)
23145 return true
23146 }
23147 break
23148 }
23149 return false
23150 }
23151 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23152 v_0 := v.Args[0]
23153
23154
23155 for {
23156 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23157 break
23158 }
23159 x := v_0.Args[0]
23160 v.reset(OpAMD64SETEQ)
23161 v.AddArg(x)
23162 return true
23163 }
23164
23165
23166 for {
23167 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23168 break
23169 }
23170 x := v_0.Args[0]
23171 v.reset(OpAMD64SETNE)
23172 v.AddArg(x)
23173 return true
23174 }
23175
23176
23177 for {
23178 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23179 break
23180 }
23181 x := v_0.Args[0]
23182 v.reset(OpAMD64SETGE)
23183 v.AddArg(x)
23184 return true
23185 }
23186
23187
23188 for {
23189 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23190 break
23191 }
23192 x := v_0.Args[0]
23193 v.reset(OpAMD64SETL)
23194 v.AddArg(x)
23195 return true
23196 }
23197
23198
23199 for {
23200 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23201 break
23202 }
23203 x := v_0.Args[0]
23204 v.reset(OpAMD64SETG)
23205 v.AddArg(x)
23206 return true
23207 }
23208
23209
23210 for {
23211 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23212 break
23213 }
23214 x := v_0.Args[0]
23215 v.reset(OpAMD64SETLE)
23216 v.AddArg(x)
23217 return true
23218 }
23219
23220
23221 for {
23222 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23223 break
23224 }
23225 x := v_0.Args[0]
23226 v.reset(OpAMD64SETAE)
23227 v.AddArg(x)
23228 return true
23229 }
23230
23231
23232 for {
23233 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23234 break
23235 }
23236 x := v_0.Args[0]
23237 v.reset(OpAMD64SETB)
23238 v.AddArg(x)
23239 return true
23240 }
23241
23242
23243 for {
23244 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23245 break
23246 }
23247 x := v_0.Args[0]
23248 v.reset(OpAMD64SETA)
23249 v.AddArg(x)
23250 return true
23251 }
23252
23253
23254 for {
23255 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23256 break
23257 }
23258 x := v_0.Args[0]
23259 v.reset(OpAMD64SETBE)
23260 v.AddArg(x)
23261 return true
23262 }
23263
23264
23265 for {
23266 c := auxIntToInt32(v.AuxInt)
23267 if v_0.Op != OpAMD64XORLconst {
23268 break
23269 }
23270 d := auxIntToInt32(v_0.AuxInt)
23271 x := v_0.Args[0]
23272 v.reset(OpAMD64XORLconst)
23273 v.AuxInt = int32ToAuxInt(c ^ d)
23274 v.AddArg(x)
23275 return true
23276 }
23277
23278
23279
23280 for {
23281 c := auxIntToInt32(v.AuxInt)
23282 x := v_0
23283 if !(c == 0) {
23284 break
23285 }
23286 v.copyOf(x)
23287 return true
23288 }
23289
23290
23291 for {
23292 c := auxIntToInt32(v.AuxInt)
23293 if v_0.Op != OpAMD64MOVLconst {
23294 break
23295 }
23296 d := auxIntToInt32(v_0.AuxInt)
23297 v.reset(OpAMD64MOVLconst)
23298 v.AuxInt = int32ToAuxInt(c ^ d)
23299 return true
23300 }
23301 return false
23302 }
23303 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23304 v_1 := v.Args[1]
23305 v_0 := v.Args[0]
23306
23307
23308
23309 for {
23310 valoff1 := auxIntToValAndOff(v.AuxInt)
23311 sym := auxToSym(v.Aux)
23312 if v_0.Op != OpAMD64ADDQconst {
23313 break
23314 }
23315 off2 := auxIntToInt32(v_0.AuxInt)
23316 base := v_0.Args[0]
23317 mem := v_1
23318 if !(ValAndOff(valoff1).canAdd32(off2)) {
23319 break
23320 }
23321 v.reset(OpAMD64XORLconstmodify)
23322 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23323 v.Aux = symToAux(sym)
23324 v.AddArg2(base, mem)
23325 return true
23326 }
23327
23328
23329
23330 for {
23331 valoff1 := auxIntToValAndOff(v.AuxInt)
23332 sym1 := auxToSym(v.Aux)
23333 if v_0.Op != OpAMD64LEAQ {
23334 break
23335 }
23336 off2 := auxIntToInt32(v_0.AuxInt)
23337 sym2 := auxToSym(v_0.Aux)
23338 base := v_0.Args[0]
23339 mem := v_1
23340 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23341 break
23342 }
23343 v.reset(OpAMD64XORLconstmodify)
23344 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23345 v.Aux = symToAux(mergeSym(sym1, sym2))
23346 v.AddArg2(base, mem)
23347 return true
23348 }
23349 return false
23350 }
23351 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23352 v_2 := v.Args[2]
23353 v_1 := v.Args[1]
23354 v_0 := v.Args[0]
23355 b := v.Block
23356 typ := &b.Func.Config.Types
23357
23358
23359
23360 for {
23361 off1 := auxIntToInt32(v.AuxInt)
23362 sym := auxToSym(v.Aux)
23363 val := v_0
23364 if v_1.Op != OpAMD64ADDQconst {
23365 break
23366 }
23367 off2 := auxIntToInt32(v_1.AuxInt)
23368 base := v_1.Args[0]
23369 mem := v_2
23370 if !(is32Bit(int64(off1) + int64(off2))) {
23371 break
23372 }
23373 v.reset(OpAMD64XORLload)
23374 v.AuxInt = int32ToAuxInt(off1 + off2)
23375 v.Aux = symToAux(sym)
23376 v.AddArg3(val, base, mem)
23377 return true
23378 }
23379
23380
23381
23382 for {
23383 off1 := auxIntToInt32(v.AuxInt)
23384 sym1 := auxToSym(v.Aux)
23385 val := v_0
23386 if v_1.Op != OpAMD64LEAQ {
23387 break
23388 }
23389 off2 := auxIntToInt32(v_1.AuxInt)
23390 sym2 := auxToSym(v_1.Aux)
23391 base := v_1.Args[0]
23392 mem := v_2
23393 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23394 break
23395 }
23396 v.reset(OpAMD64XORLload)
23397 v.AuxInt = int32ToAuxInt(off1 + off2)
23398 v.Aux = symToAux(mergeSym(sym1, sym2))
23399 v.AddArg3(val, base, mem)
23400 return true
23401 }
23402
23403
23404 for {
23405 off := auxIntToInt32(v.AuxInt)
23406 sym := auxToSym(v.Aux)
23407 x := v_0
23408 ptr := v_1
23409 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23410 break
23411 }
23412 y := v_2.Args[1]
23413 if ptr != v_2.Args[0] {
23414 break
23415 }
23416 v.reset(OpAMD64XORL)
23417 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
23418 v0.AddArg(y)
23419 v.AddArg2(x, v0)
23420 return true
23421 }
23422 return false
23423 }
23424 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
23425 v_2 := v.Args[2]
23426 v_1 := v.Args[1]
23427 v_0 := v.Args[0]
23428
23429
23430
23431 for {
23432 off1 := auxIntToInt32(v.AuxInt)
23433 sym := auxToSym(v.Aux)
23434 if v_0.Op != OpAMD64ADDQconst {
23435 break
23436 }
23437 off2 := auxIntToInt32(v_0.AuxInt)
23438 base := v_0.Args[0]
23439 val := v_1
23440 mem := v_2
23441 if !(is32Bit(int64(off1) + int64(off2))) {
23442 break
23443 }
23444 v.reset(OpAMD64XORLmodify)
23445 v.AuxInt = int32ToAuxInt(off1 + off2)
23446 v.Aux = symToAux(sym)
23447 v.AddArg3(base, val, mem)
23448 return true
23449 }
23450
23451
23452
23453 for {
23454 off1 := auxIntToInt32(v.AuxInt)
23455 sym1 := auxToSym(v.Aux)
23456 if v_0.Op != OpAMD64LEAQ {
23457 break
23458 }
23459 off2 := auxIntToInt32(v_0.AuxInt)
23460 sym2 := auxToSym(v_0.Aux)
23461 base := v_0.Args[0]
23462 val := v_1
23463 mem := v_2
23464 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23465 break
23466 }
23467 v.reset(OpAMD64XORLmodify)
23468 v.AuxInt = int32ToAuxInt(off1 + off2)
23469 v.Aux = symToAux(mergeSym(sym1, sym2))
23470 v.AddArg3(base, val, mem)
23471 return true
23472 }
23473 return false
23474 }
23475 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
23476 v_1 := v.Args[1]
23477 v_0 := v.Args[0]
23478
23479
23480 for {
23481 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23482 if v_0.Op != OpAMD64SHLQ {
23483 continue
23484 }
23485 y := v_0.Args[1]
23486 v_0_0 := v_0.Args[0]
23487 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
23488 continue
23489 }
23490 x := v_1
23491 v.reset(OpAMD64BTCQ)
23492 v.AddArg2(x, y)
23493 return true
23494 }
23495 break
23496 }
23497
23498
23499
23500 for {
23501 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23502 if v_0.Op != OpAMD64MOVQconst {
23503 continue
23504 }
23505 c := auxIntToInt64(v_0.AuxInt)
23506 x := v_1
23507 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
23508 continue
23509 }
23510 v.reset(OpAMD64BTCQconst)
23511 v.AuxInt = int8ToAuxInt(int8(log64(c)))
23512 v.AddArg(x)
23513 return true
23514 }
23515 break
23516 }
23517
23518
23519
23520 for {
23521 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23522 x := v_0
23523 if v_1.Op != OpAMD64MOVQconst {
23524 continue
23525 }
23526 c := auxIntToInt64(v_1.AuxInt)
23527 if !(is32Bit(c)) {
23528 continue
23529 }
23530 v.reset(OpAMD64XORQconst)
23531 v.AuxInt = int32ToAuxInt(int32(c))
23532 v.AddArg(x)
23533 return true
23534 }
23535 break
23536 }
23537
23538
23539 for {
23540 x := v_0
23541 if x != v_1 {
23542 break
23543 }
23544 v.reset(OpAMD64MOVQconst)
23545 v.AuxInt = int64ToAuxInt(0)
23546 return true
23547 }
23548
23549
23550
23551 for {
23552 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23553 x := v_0
23554 l := v_1
23555 if l.Op != OpAMD64MOVQload {
23556 continue
23557 }
23558 off := auxIntToInt32(l.AuxInt)
23559 sym := auxToSym(l.Aux)
23560 mem := l.Args[1]
23561 ptr := l.Args[0]
23562 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23563 continue
23564 }
23565 v.reset(OpAMD64XORQload)
23566 v.AuxInt = int32ToAuxInt(off)
23567 v.Aux = symToAux(sym)
23568 v.AddArg3(x, ptr, mem)
23569 return true
23570 }
23571 break
23572 }
23573
23574
23575
23576 for {
23577 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23578 x := v_0
23579 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23580 continue
23581 }
23582 v.reset(OpAMD64BLSMSKQ)
23583 v.AddArg(x)
23584 return true
23585 }
23586 break
23587 }
23588 return false
23589 }
23590 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
23591 v_0 := v.Args[0]
23592
23593
23594 for {
23595 c := auxIntToInt32(v.AuxInt)
23596 if v_0.Op != OpAMD64XORQconst {
23597 break
23598 }
23599 d := auxIntToInt32(v_0.AuxInt)
23600 x := v_0.Args[0]
23601 v.reset(OpAMD64XORQconst)
23602 v.AuxInt = int32ToAuxInt(c ^ d)
23603 v.AddArg(x)
23604 return true
23605 }
23606
23607
23608 for {
23609 if auxIntToInt32(v.AuxInt) != 0 {
23610 break
23611 }
23612 x := v_0
23613 v.copyOf(x)
23614 return true
23615 }
23616
23617
23618 for {
23619 c := auxIntToInt32(v.AuxInt)
23620 if v_0.Op != OpAMD64MOVQconst {
23621 break
23622 }
23623 d := auxIntToInt64(v_0.AuxInt)
23624 v.reset(OpAMD64MOVQconst)
23625 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
23626 return true
23627 }
23628 return false
23629 }
23630 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
23631 v_1 := v.Args[1]
23632 v_0 := v.Args[0]
23633
23634
23635
23636 for {
23637 valoff1 := auxIntToValAndOff(v.AuxInt)
23638 sym := auxToSym(v.Aux)
23639 if v_0.Op != OpAMD64ADDQconst {
23640 break
23641 }
23642 off2 := auxIntToInt32(v_0.AuxInt)
23643 base := v_0.Args[0]
23644 mem := v_1
23645 if !(ValAndOff(valoff1).canAdd32(off2)) {
23646 break
23647 }
23648 v.reset(OpAMD64XORQconstmodify)
23649 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23650 v.Aux = symToAux(sym)
23651 v.AddArg2(base, mem)
23652 return true
23653 }
23654
23655
23656
23657 for {
23658 valoff1 := auxIntToValAndOff(v.AuxInt)
23659 sym1 := auxToSym(v.Aux)
23660 if v_0.Op != OpAMD64LEAQ {
23661 break
23662 }
23663 off2 := auxIntToInt32(v_0.AuxInt)
23664 sym2 := auxToSym(v_0.Aux)
23665 base := v_0.Args[0]
23666 mem := v_1
23667 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23668 break
23669 }
23670 v.reset(OpAMD64XORQconstmodify)
23671 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23672 v.Aux = symToAux(mergeSym(sym1, sym2))
23673 v.AddArg2(base, mem)
23674 return true
23675 }
23676 return false
23677 }
23678 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
23679 v_2 := v.Args[2]
23680 v_1 := v.Args[1]
23681 v_0 := v.Args[0]
23682 b := v.Block
23683 typ := &b.Func.Config.Types
23684
23685
23686
23687 for {
23688 off1 := auxIntToInt32(v.AuxInt)
23689 sym := auxToSym(v.Aux)
23690 val := v_0
23691 if v_1.Op != OpAMD64ADDQconst {
23692 break
23693 }
23694 off2 := auxIntToInt32(v_1.AuxInt)
23695 base := v_1.Args[0]
23696 mem := v_2
23697 if !(is32Bit(int64(off1) + int64(off2))) {
23698 break
23699 }
23700 v.reset(OpAMD64XORQload)
23701 v.AuxInt = int32ToAuxInt(off1 + off2)
23702 v.Aux = symToAux(sym)
23703 v.AddArg3(val, base, mem)
23704 return true
23705 }
23706
23707
23708
23709 for {
23710 off1 := auxIntToInt32(v.AuxInt)
23711 sym1 := auxToSym(v.Aux)
23712 val := v_0
23713 if v_1.Op != OpAMD64LEAQ {
23714 break
23715 }
23716 off2 := auxIntToInt32(v_1.AuxInt)
23717 sym2 := auxToSym(v_1.Aux)
23718 base := v_1.Args[0]
23719 mem := v_2
23720 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23721 break
23722 }
23723 v.reset(OpAMD64XORQload)
23724 v.AuxInt = int32ToAuxInt(off1 + off2)
23725 v.Aux = symToAux(mergeSym(sym1, sym2))
23726 v.AddArg3(val, base, mem)
23727 return true
23728 }
23729
23730
23731 for {
23732 off := auxIntToInt32(v.AuxInt)
23733 sym := auxToSym(v.Aux)
23734 x := v_0
23735 ptr := v_1
23736 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23737 break
23738 }
23739 y := v_2.Args[1]
23740 if ptr != v_2.Args[0] {
23741 break
23742 }
23743 v.reset(OpAMD64XORQ)
23744 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
23745 v0.AddArg(y)
23746 v.AddArg2(x, v0)
23747 return true
23748 }
23749 return false
23750 }
23751 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
23752 v_2 := v.Args[2]
23753 v_1 := v.Args[1]
23754 v_0 := v.Args[0]
23755
23756
23757
23758 for {
23759 off1 := auxIntToInt32(v.AuxInt)
23760 sym := auxToSym(v.Aux)
23761 if v_0.Op != OpAMD64ADDQconst {
23762 break
23763 }
23764 off2 := auxIntToInt32(v_0.AuxInt)
23765 base := v_0.Args[0]
23766 val := v_1
23767 mem := v_2
23768 if !(is32Bit(int64(off1) + int64(off2))) {
23769 break
23770 }
23771 v.reset(OpAMD64XORQmodify)
23772 v.AuxInt = int32ToAuxInt(off1 + off2)
23773 v.Aux = symToAux(sym)
23774 v.AddArg3(base, val, mem)
23775 return true
23776 }
23777
23778
23779
23780 for {
23781 off1 := auxIntToInt32(v.AuxInt)
23782 sym1 := auxToSym(v.Aux)
23783 if v_0.Op != OpAMD64LEAQ {
23784 break
23785 }
23786 off2 := auxIntToInt32(v_0.AuxInt)
23787 sym2 := auxToSym(v_0.Aux)
23788 base := v_0.Args[0]
23789 val := v_1
23790 mem := v_2
23791 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23792 break
23793 }
23794 v.reset(OpAMD64XORQmodify)
23795 v.AuxInt = int32ToAuxInt(off1 + off2)
23796 v.Aux = symToAux(mergeSym(sym1, sym2))
23797 v.AddArg3(base, val, mem)
23798 return true
23799 }
23800 return false
23801 }
23802 func rewriteValueAMD64_OpAddr(v *Value) bool {
23803 v_0 := v.Args[0]
23804
23805
23806 for {
23807 sym := auxToSym(v.Aux)
23808 base := v_0
23809 v.reset(OpAMD64LEAQ)
23810 v.Aux = symToAux(sym)
23811 v.AddArg(base)
23812 return true
23813 }
23814 }
23815 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
23816 v_2 := v.Args[2]
23817 v_1 := v.Args[1]
23818 v_0 := v.Args[0]
23819 b := v.Block
23820 typ := &b.Func.Config.Types
23821
23822
23823 for {
23824 ptr := v_0
23825 val := v_1
23826 mem := v_2
23827 v.reset(OpAMD64AddTupleFirst32)
23828 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
23829 v0.AddArg3(val, ptr, mem)
23830 v.AddArg2(val, v0)
23831 return true
23832 }
23833 }
23834 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
23835 v_2 := v.Args[2]
23836 v_1 := v.Args[1]
23837 v_0 := v.Args[0]
23838 b := v.Block
23839 typ := &b.Func.Config.Types
23840
23841
23842 for {
23843 ptr := v_0
23844 val := v_1
23845 mem := v_2
23846 v.reset(OpAMD64AddTupleFirst64)
23847 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
23848 v0.AddArg3(val, ptr, mem)
23849 v.AddArg2(val, v0)
23850 return true
23851 }
23852 }
23853 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
23854 v_2 := v.Args[2]
23855 v_1 := v.Args[1]
23856 v_0 := v.Args[0]
23857
23858
23859 for {
23860 ptr := v_0
23861 val := v_1
23862 mem := v_2
23863 v.reset(OpAMD64ANDLlock)
23864 v.AddArg3(ptr, val, mem)
23865 return true
23866 }
23867 }
23868 func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool {
23869 v_2 := v.Args[2]
23870 v_1 := v.Args[1]
23871 v_0 := v.Args[0]
23872
23873
23874 for {
23875 ptr := v_0
23876 val := v_1
23877 mem := v_2
23878 v.reset(OpAMD64LoweredAtomicAnd32)
23879 v.AddArg3(ptr, val, mem)
23880 return true
23881 }
23882 }
23883 func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool {
23884 v_2 := v.Args[2]
23885 v_1 := v.Args[1]
23886 v_0 := v.Args[0]
23887
23888
23889 for {
23890 ptr := v_0
23891 val := v_1
23892 mem := v_2
23893 v.reset(OpAMD64LoweredAtomicAnd64)
23894 v.AddArg3(ptr, val, mem)
23895 return true
23896 }
23897 }
23898 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
23899 v_2 := v.Args[2]
23900 v_1 := v.Args[1]
23901 v_0 := v.Args[0]
23902
23903
23904 for {
23905 ptr := v_0
23906 val := v_1
23907 mem := v_2
23908 v.reset(OpAMD64ANDBlock)
23909 v.AddArg3(ptr, val, mem)
23910 return true
23911 }
23912 }
23913 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
23914 v_3 := v.Args[3]
23915 v_2 := v.Args[2]
23916 v_1 := v.Args[1]
23917 v_0 := v.Args[0]
23918
23919
23920 for {
23921 ptr := v_0
23922 old := v_1
23923 new_ := v_2
23924 mem := v_3
23925 v.reset(OpAMD64CMPXCHGLlock)
23926 v.AddArg4(ptr, old, new_, mem)
23927 return true
23928 }
23929 }
23930 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
23931 v_3 := v.Args[3]
23932 v_2 := v.Args[2]
23933 v_1 := v.Args[1]
23934 v_0 := v.Args[0]
23935
23936
23937 for {
23938 ptr := v_0
23939 old := v_1
23940 new_ := v_2
23941 mem := v_3
23942 v.reset(OpAMD64CMPXCHGQlock)
23943 v.AddArg4(ptr, old, new_, mem)
23944 return true
23945 }
23946 }
23947 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
23948 v_2 := v.Args[2]
23949 v_1 := v.Args[1]
23950 v_0 := v.Args[0]
23951
23952
23953 for {
23954 ptr := v_0
23955 val := v_1
23956 mem := v_2
23957 v.reset(OpAMD64XCHGL)
23958 v.AddArg3(val, ptr, mem)
23959 return true
23960 }
23961 }
23962 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
23963 v_2 := v.Args[2]
23964 v_1 := v.Args[1]
23965 v_0 := v.Args[0]
23966
23967
23968 for {
23969 ptr := v_0
23970 val := v_1
23971 mem := v_2
23972 v.reset(OpAMD64XCHGQ)
23973 v.AddArg3(val, ptr, mem)
23974 return true
23975 }
23976 }
23977 func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool {
23978 v_2 := v.Args[2]
23979 v_1 := v.Args[1]
23980 v_0 := v.Args[0]
23981
23982
23983 for {
23984 ptr := v_0
23985 val := v_1
23986 mem := v_2
23987 v.reset(OpAMD64XCHGB)
23988 v.AddArg3(val, ptr, mem)
23989 return true
23990 }
23991 }
23992 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
23993 v_1 := v.Args[1]
23994 v_0 := v.Args[0]
23995
23996
23997 for {
23998 ptr := v_0
23999 mem := v_1
24000 v.reset(OpAMD64MOVLatomicload)
24001 v.AddArg2(ptr, mem)
24002 return true
24003 }
24004 }
24005 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
24006 v_1 := v.Args[1]
24007 v_0 := v.Args[0]
24008
24009
24010 for {
24011 ptr := v_0
24012 mem := v_1
24013 v.reset(OpAMD64MOVQatomicload)
24014 v.AddArg2(ptr, mem)
24015 return true
24016 }
24017 }
24018 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
24019 v_1 := v.Args[1]
24020 v_0 := v.Args[0]
24021
24022
24023 for {
24024 ptr := v_0
24025 mem := v_1
24026 v.reset(OpAMD64MOVBatomicload)
24027 v.AddArg2(ptr, mem)
24028 return true
24029 }
24030 }
24031 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
24032 v_1 := v.Args[1]
24033 v_0 := v.Args[0]
24034
24035
24036 for {
24037 ptr := v_0
24038 mem := v_1
24039 v.reset(OpAMD64MOVQatomicload)
24040 v.AddArg2(ptr, mem)
24041 return true
24042 }
24043 }
24044 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
24045 v_2 := v.Args[2]
24046 v_1 := v.Args[1]
24047 v_0 := v.Args[0]
24048
24049
24050 for {
24051 ptr := v_0
24052 val := v_1
24053 mem := v_2
24054 v.reset(OpAMD64ORLlock)
24055 v.AddArg3(ptr, val, mem)
24056 return true
24057 }
24058 }
24059 func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool {
24060 v_2 := v.Args[2]
24061 v_1 := v.Args[1]
24062 v_0 := v.Args[0]
24063
24064
24065 for {
24066 ptr := v_0
24067 val := v_1
24068 mem := v_2
24069 v.reset(OpAMD64LoweredAtomicOr32)
24070 v.AddArg3(ptr, val, mem)
24071 return true
24072 }
24073 }
24074 func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool {
24075 v_2 := v.Args[2]
24076 v_1 := v.Args[1]
24077 v_0 := v.Args[0]
24078
24079
24080 for {
24081 ptr := v_0
24082 val := v_1
24083 mem := v_2
24084 v.reset(OpAMD64LoweredAtomicOr64)
24085 v.AddArg3(ptr, val, mem)
24086 return true
24087 }
24088 }
24089 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
24090 v_2 := v.Args[2]
24091 v_1 := v.Args[1]
24092 v_0 := v.Args[0]
24093
24094
24095 for {
24096 ptr := v_0
24097 val := v_1
24098 mem := v_2
24099 v.reset(OpAMD64ORBlock)
24100 v.AddArg3(ptr, val, mem)
24101 return true
24102 }
24103 }
24104 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
24105 v_2 := v.Args[2]
24106 v_1 := v.Args[1]
24107 v_0 := v.Args[0]
24108 b := v.Block
24109 typ := &b.Func.Config.Types
24110
24111
24112 for {
24113 ptr := v_0
24114 val := v_1
24115 mem := v_2
24116 v.reset(OpSelect1)
24117 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24118 v0.AddArg3(val, ptr, mem)
24119 v.AddArg(v0)
24120 return true
24121 }
24122 }
24123 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24124 v_2 := v.Args[2]
24125 v_1 := v.Args[1]
24126 v_0 := v.Args[0]
24127 b := v.Block
24128 typ := &b.Func.Config.Types
24129
24130
24131 for {
24132 ptr := v_0
24133 val := v_1
24134 mem := v_2
24135 v.reset(OpSelect1)
24136 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24137 v0.AddArg3(val, ptr, mem)
24138 v.AddArg(v0)
24139 return true
24140 }
24141 }
24142 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24143 v_2 := v.Args[2]
24144 v_1 := v.Args[1]
24145 v_0 := v.Args[0]
24146 b := v.Block
24147 typ := &b.Func.Config.Types
24148
24149
24150 for {
24151 ptr := v_0
24152 val := v_1
24153 mem := v_2
24154 v.reset(OpSelect1)
24155 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24156 v0.AddArg3(val, ptr, mem)
24157 v.AddArg(v0)
24158 return true
24159 }
24160 }
24161 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24162 v_2 := v.Args[2]
24163 v_1 := v.Args[1]
24164 v_0 := v.Args[0]
24165 b := v.Block
24166 typ := &b.Func.Config.Types
24167
24168
24169 for {
24170 ptr := v_0
24171 val := v_1
24172 mem := v_2
24173 v.reset(OpSelect1)
24174 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24175 v0.AddArg3(val, ptr, mem)
24176 v.AddArg(v0)
24177 return true
24178 }
24179 }
24180 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24181 v_0 := v.Args[0]
24182 b := v.Block
24183 typ := &b.Func.Config.Types
24184
24185
24186
24187 for {
24188 x := v_0
24189 if !(buildcfg.GOAMD64 < 3) {
24190 break
24191 }
24192 v.reset(OpAMD64BSRL)
24193 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24194 v0.AuxInt = int32ToAuxInt(1)
24195 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24196 v1.AddArg(x)
24197 v0.AddArg2(v1, v1)
24198 v.AddArg(v0)
24199 return true
24200 }
24201
24202
24203
24204 for {
24205 t := v.Type
24206 x := v_0
24207 if !(buildcfg.GOAMD64 >= 3) {
24208 break
24209 }
24210 v.reset(OpAMD64NEGQ)
24211 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24212 v0.AuxInt = int32ToAuxInt(-32)
24213 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24214 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24215 v2.AddArg(x)
24216 v1.AddArg(v2)
24217 v0.AddArg(v1)
24218 v.AddArg(v0)
24219 return true
24220 }
24221 return false
24222 }
24223 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24224 v_0 := v.Args[0]
24225 b := v.Block
24226 typ := &b.Func.Config.Types
24227
24228
24229
24230 for {
24231 x := v_0
24232 if !(buildcfg.GOAMD64 < 3) {
24233 break
24234 }
24235 v.reset(OpSelect0)
24236 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24237 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24238 v1.AuxInt = int32ToAuxInt(1)
24239 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24240 v2.AddArg(x)
24241 v1.AddArg2(v2, v2)
24242 v0.AddArg(v1)
24243 v.AddArg(v0)
24244 return true
24245 }
24246
24247
24248
24249 for {
24250 t := v.Type
24251 x := v_0
24252 if !(buildcfg.GOAMD64 >= 3) {
24253 break
24254 }
24255 v.reset(OpAMD64NEGQ)
24256 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24257 v0.AuxInt = int32ToAuxInt(-32)
24258 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24259 v1.AddArg(x)
24260 v0.AddArg(v1)
24261 v.AddArg(v0)
24262 return true
24263 }
24264 return false
24265 }
24266 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24267 v_0 := v.Args[0]
24268 b := v.Block
24269 typ := &b.Func.Config.Types
24270
24271
24272
24273 for {
24274 t := v.Type
24275 x := v_0
24276 if !(buildcfg.GOAMD64 < 3) {
24277 break
24278 }
24279 v.reset(OpAMD64ADDQconst)
24280 v.AuxInt = int32ToAuxInt(1)
24281 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24282 v1 := b.NewValue0(v.Pos, OpSelect0, t)
24283 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24284 v2.AddArg(x)
24285 v1.AddArg(v2)
24286 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24287 v3.AuxInt = int64ToAuxInt(-1)
24288 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24289 v4.AddArg(v2)
24290 v0.AddArg3(v1, v3, v4)
24291 v.AddArg(v0)
24292 return true
24293 }
24294
24295
24296
24297 for {
24298 t := v.Type
24299 x := v_0
24300 if !(buildcfg.GOAMD64 >= 3) {
24301 break
24302 }
24303 v.reset(OpAMD64NEGQ)
24304 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24305 v0.AuxInt = int32ToAuxInt(-64)
24306 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24307 v1.AddArg(x)
24308 v0.AddArg(v1)
24309 v.AddArg(v0)
24310 return true
24311 }
24312 return false
24313 }
24314 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24315 v_0 := v.Args[0]
24316 b := v.Block
24317 typ := &b.Func.Config.Types
24318
24319
24320
24321 for {
24322 x := v_0
24323 if !(buildcfg.GOAMD64 < 3) {
24324 break
24325 }
24326 v.reset(OpAMD64BSRL)
24327 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24328 v0.AuxInt = int32ToAuxInt(1)
24329 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24330 v1.AddArg(x)
24331 v0.AddArg2(v1, v1)
24332 v.AddArg(v0)
24333 return true
24334 }
24335
24336
24337
24338 for {
24339 t := v.Type
24340 x := v_0
24341 if !(buildcfg.GOAMD64 >= 3) {
24342 break
24343 }
24344 v.reset(OpAMD64NEGQ)
24345 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24346 v0.AuxInt = int32ToAuxInt(-32)
24347 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24348 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24349 v2.AddArg(x)
24350 v1.AddArg(v2)
24351 v0.AddArg(v1)
24352 v.AddArg(v0)
24353 return true
24354 }
24355 return false
24356 }
24357 func rewriteValueAMD64_OpBswap16(v *Value) bool {
24358 v_0 := v.Args[0]
24359
24360
24361 for {
24362 x := v_0
24363 v.reset(OpAMD64ROLWconst)
24364 v.AuxInt = int8ToAuxInt(8)
24365 v.AddArg(x)
24366 return true
24367 }
24368 }
24369 func rewriteValueAMD64_OpCeil(v *Value) bool {
24370 v_0 := v.Args[0]
24371
24372
24373 for {
24374 x := v_0
24375 v.reset(OpAMD64ROUNDSD)
24376 v.AuxInt = int8ToAuxInt(2)
24377 v.AddArg(x)
24378 return true
24379 }
24380 }
24381 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24382 v_2 := v.Args[2]
24383 v_1 := v.Args[1]
24384 v_0 := v.Args[0]
24385 b := v.Block
24386 typ := &b.Func.Config.Types
24387
24388
24389
24390 for {
24391 t := v.Type
24392 x := v_0
24393 y := v_1
24394 if v_2.Op != OpAMD64SETEQ {
24395 break
24396 }
24397 cond := v_2.Args[0]
24398 if !(is64BitInt(t) || isPtr(t)) {
24399 break
24400 }
24401 v.reset(OpAMD64CMOVQEQ)
24402 v.AddArg3(y, x, cond)
24403 return true
24404 }
24405
24406
24407
24408 for {
24409 t := v.Type
24410 x := v_0
24411 y := v_1
24412 if v_2.Op != OpAMD64SETNE {
24413 break
24414 }
24415 cond := v_2.Args[0]
24416 if !(is64BitInt(t) || isPtr(t)) {
24417 break
24418 }
24419 v.reset(OpAMD64CMOVQNE)
24420 v.AddArg3(y, x, cond)
24421 return true
24422 }
24423
24424
24425
24426 for {
24427 t := v.Type
24428 x := v_0
24429 y := v_1
24430 if v_2.Op != OpAMD64SETL {
24431 break
24432 }
24433 cond := v_2.Args[0]
24434 if !(is64BitInt(t) || isPtr(t)) {
24435 break
24436 }
24437 v.reset(OpAMD64CMOVQLT)
24438 v.AddArg3(y, x, cond)
24439 return true
24440 }
24441
24442
24443
24444 for {
24445 t := v.Type
24446 x := v_0
24447 y := v_1
24448 if v_2.Op != OpAMD64SETG {
24449 break
24450 }
24451 cond := v_2.Args[0]
24452 if !(is64BitInt(t) || isPtr(t)) {
24453 break
24454 }
24455 v.reset(OpAMD64CMOVQGT)
24456 v.AddArg3(y, x, cond)
24457 return true
24458 }
24459
24460
24461
24462 for {
24463 t := v.Type
24464 x := v_0
24465 y := v_1
24466 if v_2.Op != OpAMD64SETLE {
24467 break
24468 }
24469 cond := v_2.Args[0]
24470 if !(is64BitInt(t) || isPtr(t)) {
24471 break
24472 }
24473 v.reset(OpAMD64CMOVQLE)
24474 v.AddArg3(y, x, cond)
24475 return true
24476 }
24477
24478
24479
24480 for {
24481 t := v.Type
24482 x := v_0
24483 y := v_1
24484 if v_2.Op != OpAMD64SETGE {
24485 break
24486 }
24487 cond := v_2.Args[0]
24488 if !(is64BitInt(t) || isPtr(t)) {
24489 break
24490 }
24491 v.reset(OpAMD64CMOVQGE)
24492 v.AddArg3(y, x, cond)
24493 return true
24494 }
24495
24496
24497
24498 for {
24499 t := v.Type
24500 x := v_0
24501 y := v_1
24502 if v_2.Op != OpAMD64SETA {
24503 break
24504 }
24505 cond := v_2.Args[0]
24506 if !(is64BitInt(t) || isPtr(t)) {
24507 break
24508 }
24509 v.reset(OpAMD64CMOVQHI)
24510 v.AddArg3(y, x, cond)
24511 return true
24512 }
24513
24514
24515
24516 for {
24517 t := v.Type
24518 x := v_0
24519 y := v_1
24520 if v_2.Op != OpAMD64SETB {
24521 break
24522 }
24523 cond := v_2.Args[0]
24524 if !(is64BitInt(t) || isPtr(t)) {
24525 break
24526 }
24527 v.reset(OpAMD64CMOVQCS)
24528 v.AddArg3(y, x, cond)
24529 return true
24530 }
24531
24532
24533
24534 for {
24535 t := v.Type
24536 x := v_0
24537 y := v_1
24538 if v_2.Op != OpAMD64SETAE {
24539 break
24540 }
24541 cond := v_2.Args[0]
24542 if !(is64BitInt(t) || isPtr(t)) {
24543 break
24544 }
24545 v.reset(OpAMD64CMOVQCC)
24546 v.AddArg3(y, x, cond)
24547 return true
24548 }
24549
24550
24551
24552 for {
24553 t := v.Type
24554 x := v_0
24555 y := v_1
24556 if v_2.Op != OpAMD64SETBE {
24557 break
24558 }
24559 cond := v_2.Args[0]
24560 if !(is64BitInt(t) || isPtr(t)) {
24561 break
24562 }
24563 v.reset(OpAMD64CMOVQLS)
24564 v.AddArg3(y, x, cond)
24565 return true
24566 }
24567
24568
24569
24570 for {
24571 t := v.Type
24572 x := v_0
24573 y := v_1
24574 if v_2.Op != OpAMD64SETEQF {
24575 break
24576 }
24577 cond := v_2.Args[0]
24578 if !(is64BitInt(t) || isPtr(t)) {
24579 break
24580 }
24581 v.reset(OpAMD64CMOVQEQF)
24582 v.AddArg3(y, x, cond)
24583 return true
24584 }
24585
24586
24587
24588 for {
24589 t := v.Type
24590 x := v_0
24591 y := v_1
24592 if v_2.Op != OpAMD64SETNEF {
24593 break
24594 }
24595 cond := v_2.Args[0]
24596 if !(is64BitInt(t) || isPtr(t)) {
24597 break
24598 }
24599 v.reset(OpAMD64CMOVQNEF)
24600 v.AddArg3(y, x, cond)
24601 return true
24602 }
24603
24604
24605
24606 for {
24607 t := v.Type
24608 x := v_0
24609 y := v_1
24610 if v_2.Op != OpAMD64SETGF {
24611 break
24612 }
24613 cond := v_2.Args[0]
24614 if !(is64BitInt(t) || isPtr(t)) {
24615 break
24616 }
24617 v.reset(OpAMD64CMOVQGTF)
24618 v.AddArg3(y, x, cond)
24619 return true
24620 }
24621
24622
24623
24624 for {
24625 t := v.Type
24626 x := v_0
24627 y := v_1
24628 if v_2.Op != OpAMD64SETGEF {
24629 break
24630 }
24631 cond := v_2.Args[0]
24632 if !(is64BitInt(t) || isPtr(t)) {
24633 break
24634 }
24635 v.reset(OpAMD64CMOVQGEF)
24636 v.AddArg3(y, x, cond)
24637 return true
24638 }
24639
24640
24641
24642 for {
24643 t := v.Type
24644 x := v_0
24645 y := v_1
24646 if v_2.Op != OpAMD64SETEQ {
24647 break
24648 }
24649 cond := v_2.Args[0]
24650 if !(is32BitInt(t)) {
24651 break
24652 }
24653 v.reset(OpAMD64CMOVLEQ)
24654 v.AddArg3(y, x, cond)
24655 return true
24656 }
24657
24658
24659
24660 for {
24661 t := v.Type
24662 x := v_0
24663 y := v_1
24664 if v_2.Op != OpAMD64SETNE {
24665 break
24666 }
24667 cond := v_2.Args[0]
24668 if !(is32BitInt(t)) {
24669 break
24670 }
24671 v.reset(OpAMD64CMOVLNE)
24672 v.AddArg3(y, x, cond)
24673 return true
24674 }
24675
24676
24677
24678 for {
24679 t := v.Type
24680 x := v_0
24681 y := v_1
24682 if v_2.Op != OpAMD64SETL {
24683 break
24684 }
24685 cond := v_2.Args[0]
24686 if !(is32BitInt(t)) {
24687 break
24688 }
24689 v.reset(OpAMD64CMOVLLT)
24690 v.AddArg3(y, x, cond)
24691 return true
24692 }
24693
24694
24695
24696 for {
24697 t := v.Type
24698 x := v_0
24699 y := v_1
24700 if v_2.Op != OpAMD64SETG {
24701 break
24702 }
24703 cond := v_2.Args[0]
24704 if !(is32BitInt(t)) {
24705 break
24706 }
24707 v.reset(OpAMD64CMOVLGT)
24708 v.AddArg3(y, x, cond)
24709 return true
24710 }
24711
24712
24713
24714 for {
24715 t := v.Type
24716 x := v_0
24717 y := v_1
24718 if v_2.Op != OpAMD64SETLE {
24719 break
24720 }
24721 cond := v_2.Args[0]
24722 if !(is32BitInt(t)) {
24723 break
24724 }
24725 v.reset(OpAMD64CMOVLLE)
24726 v.AddArg3(y, x, cond)
24727 return true
24728 }
24729
24730
24731
24732 for {
24733 t := v.Type
24734 x := v_0
24735 y := v_1
24736 if v_2.Op != OpAMD64SETGE {
24737 break
24738 }
24739 cond := v_2.Args[0]
24740 if !(is32BitInt(t)) {
24741 break
24742 }
24743 v.reset(OpAMD64CMOVLGE)
24744 v.AddArg3(y, x, cond)
24745 return true
24746 }
24747
24748
24749
24750 for {
24751 t := v.Type
24752 x := v_0
24753 y := v_1
24754 if v_2.Op != OpAMD64SETA {
24755 break
24756 }
24757 cond := v_2.Args[0]
24758 if !(is32BitInt(t)) {
24759 break
24760 }
24761 v.reset(OpAMD64CMOVLHI)
24762 v.AddArg3(y, x, cond)
24763 return true
24764 }
24765
24766
24767
24768 for {
24769 t := v.Type
24770 x := v_0
24771 y := v_1
24772 if v_2.Op != OpAMD64SETB {
24773 break
24774 }
24775 cond := v_2.Args[0]
24776 if !(is32BitInt(t)) {
24777 break
24778 }
24779 v.reset(OpAMD64CMOVLCS)
24780 v.AddArg3(y, x, cond)
24781 return true
24782 }
24783
24784
24785
24786 for {
24787 t := v.Type
24788 x := v_0
24789 y := v_1
24790 if v_2.Op != OpAMD64SETAE {
24791 break
24792 }
24793 cond := v_2.Args[0]
24794 if !(is32BitInt(t)) {
24795 break
24796 }
24797 v.reset(OpAMD64CMOVLCC)
24798 v.AddArg3(y, x, cond)
24799 return true
24800 }
24801
24802
24803
24804 for {
24805 t := v.Type
24806 x := v_0
24807 y := v_1
24808 if v_2.Op != OpAMD64SETBE {
24809 break
24810 }
24811 cond := v_2.Args[0]
24812 if !(is32BitInt(t)) {
24813 break
24814 }
24815 v.reset(OpAMD64CMOVLLS)
24816 v.AddArg3(y, x, cond)
24817 return true
24818 }
24819
24820
24821
24822 for {
24823 t := v.Type
24824 x := v_0
24825 y := v_1
24826 if v_2.Op != OpAMD64SETEQF {
24827 break
24828 }
24829 cond := v_2.Args[0]
24830 if !(is32BitInt(t)) {
24831 break
24832 }
24833 v.reset(OpAMD64CMOVLEQF)
24834 v.AddArg3(y, x, cond)
24835 return true
24836 }
24837
24838
24839
24840 for {
24841 t := v.Type
24842 x := v_0
24843 y := v_1
24844 if v_2.Op != OpAMD64SETNEF {
24845 break
24846 }
24847 cond := v_2.Args[0]
24848 if !(is32BitInt(t)) {
24849 break
24850 }
24851 v.reset(OpAMD64CMOVLNEF)
24852 v.AddArg3(y, x, cond)
24853 return true
24854 }
24855
24856
24857
24858 for {
24859 t := v.Type
24860 x := v_0
24861 y := v_1
24862 if v_2.Op != OpAMD64SETGF {
24863 break
24864 }
24865 cond := v_2.Args[0]
24866 if !(is32BitInt(t)) {
24867 break
24868 }
24869 v.reset(OpAMD64CMOVLGTF)
24870 v.AddArg3(y, x, cond)
24871 return true
24872 }
24873
24874
24875
24876 for {
24877 t := v.Type
24878 x := v_0
24879 y := v_1
24880 if v_2.Op != OpAMD64SETGEF {
24881 break
24882 }
24883 cond := v_2.Args[0]
24884 if !(is32BitInt(t)) {
24885 break
24886 }
24887 v.reset(OpAMD64CMOVLGEF)
24888 v.AddArg3(y, x, cond)
24889 return true
24890 }
24891
24892
24893
24894 for {
24895 t := v.Type
24896 x := v_0
24897 y := v_1
24898 if v_2.Op != OpAMD64SETEQ {
24899 break
24900 }
24901 cond := v_2.Args[0]
24902 if !(is16BitInt(t)) {
24903 break
24904 }
24905 v.reset(OpAMD64CMOVWEQ)
24906 v.AddArg3(y, x, cond)
24907 return true
24908 }
24909
24910
24911
24912 for {
24913 t := v.Type
24914 x := v_0
24915 y := v_1
24916 if v_2.Op != OpAMD64SETNE {
24917 break
24918 }
24919 cond := v_2.Args[0]
24920 if !(is16BitInt(t)) {
24921 break
24922 }
24923 v.reset(OpAMD64CMOVWNE)
24924 v.AddArg3(y, x, cond)
24925 return true
24926 }
24927
24928
24929
24930 for {
24931 t := v.Type
24932 x := v_0
24933 y := v_1
24934 if v_2.Op != OpAMD64SETL {
24935 break
24936 }
24937 cond := v_2.Args[0]
24938 if !(is16BitInt(t)) {
24939 break
24940 }
24941 v.reset(OpAMD64CMOVWLT)
24942 v.AddArg3(y, x, cond)
24943 return true
24944 }
24945
24946
24947
24948 for {
24949 t := v.Type
24950 x := v_0
24951 y := v_1
24952 if v_2.Op != OpAMD64SETG {
24953 break
24954 }
24955 cond := v_2.Args[0]
24956 if !(is16BitInt(t)) {
24957 break
24958 }
24959 v.reset(OpAMD64CMOVWGT)
24960 v.AddArg3(y, x, cond)
24961 return true
24962 }
24963
24964
24965
24966 for {
24967 t := v.Type
24968 x := v_0
24969 y := v_1
24970 if v_2.Op != OpAMD64SETLE {
24971 break
24972 }
24973 cond := v_2.Args[0]
24974 if !(is16BitInt(t)) {
24975 break
24976 }
24977 v.reset(OpAMD64CMOVWLE)
24978 v.AddArg3(y, x, cond)
24979 return true
24980 }
24981
24982
24983
24984 for {
24985 t := v.Type
24986 x := v_0
24987 y := v_1
24988 if v_2.Op != OpAMD64SETGE {
24989 break
24990 }
24991 cond := v_2.Args[0]
24992 if !(is16BitInt(t)) {
24993 break
24994 }
24995 v.reset(OpAMD64CMOVWGE)
24996 v.AddArg3(y, x, cond)
24997 return true
24998 }
24999
25000
25001
25002 for {
25003 t := v.Type
25004 x := v_0
25005 y := v_1
25006 if v_2.Op != OpAMD64SETA {
25007 break
25008 }
25009 cond := v_2.Args[0]
25010 if !(is16BitInt(t)) {
25011 break
25012 }
25013 v.reset(OpAMD64CMOVWHI)
25014 v.AddArg3(y, x, cond)
25015 return true
25016 }
25017
25018
25019
25020 for {
25021 t := v.Type
25022 x := v_0
25023 y := v_1
25024 if v_2.Op != OpAMD64SETB {
25025 break
25026 }
25027 cond := v_2.Args[0]
25028 if !(is16BitInt(t)) {
25029 break
25030 }
25031 v.reset(OpAMD64CMOVWCS)
25032 v.AddArg3(y, x, cond)
25033 return true
25034 }
25035
25036
25037
25038 for {
25039 t := v.Type
25040 x := v_0
25041 y := v_1
25042 if v_2.Op != OpAMD64SETAE {
25043 break
25044 }
25045 cond := v_2.Args[0]
25046 if !(is16BitInt(t)) {
25047 break
25048 }
25049 v.reset(OpAMD64CMOVWCC)
25050 v.AddArg3(y, x, cond)
25051 return true
25052 }
25053
25054
25055
25056 for {
25057 t := v.Type
25058 x := v_0
25059 y := v_1
25060 if v_2.Op != OpAMD64SETBE {
25061 break
25062 }
25063 cond := v_2.Args[0]
25064 if !(is16BitInt(t)) {
25065 break
25066 }
25067 v.reset(OpAMD64CMOVWLS)
25068 v.AddArg3(y, x, cond)
25069 return true
25070 }
25071
25072
25073
25074 for {
25075 t := v.Type
25076 x := v_0
25077 y := v_1
25078 if v_2.Op != OpAMD64SETEQF {
25079 break
25080 }
25081 cond := v_2.Args[0]
25082 if !(is16BitInt(t)) {
25083 break
25084 }
25085 v.reset(OpAMD64CMOVWEQF)
25086 v.AddArg3(y, x, cond)
25087 return true
25088 }
25089
25090
25091
25092 for {
25093 t := v.Type
25094 x := v_0
25095 y := v_1
25096 if v_2.Op != OpAMD64SETNEF {
25097 break
25098 }
25099 cond := v_2.Args[0]
25100 if !(is16BitInt(t)) {
25101 break
25102 }
25103 v.reset(OpAMD64CMOVWNEF)
25104 v.AddArg3(y, x, cond)
25105 return true
25106 }
25107
25108
25109
25110 for {
25111 t := v.Type
25112 x := v_0
25113 y := v_1
25114 if v_2.Op != OpAMD64SETGF {
25115 break
25116 }
25117 cond := v_2.Args[0]
25118 if !(is16BitInt(t)) {
25119 break
25120 }
25121 v.reset(OpAMD64CMOVWGTF)
25122 v.AddArg3(y, x, cond)
25123 return true
25124 }
25125
25126
25127
25128 for {
25129 t := v.Type
25130 x := v_0
25131 y := v_1
25132 if v_2.Op != OpAMD64SETGEF {
25133 break
25134 }
25135 cond := v_2.Args[0]
25136 if !(is16BitInt(t)) {
25137 break
25138 }
25139 v.reset(OpAMD64CMOVWGEF)
25140 v.AddArg3(y, x, cond)
25141 return true
25142 }
25143
25144
25145
25146 for {
25147 t := v.Type
25148 x := v_0
25149 y := v_1
25150 check := v_2
25151 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25152 break
25153 }
25154 v.reset(OpCondSelect)
25155 v.Type = t
25156 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25157 v0.AddArg(check)
25158 v.AddArg3(x, y, v0)
25159 return true
25160 }
25161
25162
25163
25164 for {
25165 t := v.Type
25166 x := v_0
25167 y := v_1
25168 check := v_2
25169 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25170 break
25171 }
25172 v.reset(OpCondSelect)
25173 v.Type = t
25174 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25175 v0.AddArg(check)
25176 v.AddArg3(x, y, v0)
25177 return true
25178 }
25179
25180
25181
25182 for {
25183 t := v.Type
25184 x := v_0
25185 y := v_1
25186 check := v_2
25187 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25188 break
25189 }
25190 v.reset(OpCondSelect)
25191 v.Type = t
25192 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25193 v0.AddArg(check)
25194 v.AddArg3(x, y, v0)
25195 return true
25196 }
25197
25198
25199
25200 for {
25201 t := v.Type
25202 x := v_0
25203 y := v_1
25204 check := v_2
25205 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25206 break
25207 }
25208 v.reset(OpAMD64CMOVQNE)
25209 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25210 v0.AuxInt = int32ToAuxInt(0)
25211 v0.AddArg(check)
25212 v.AddArg3(y, x, v0)
25213 return true
25214 }
25215
25216
25217
25218 for {
25219 t := v.Type
25220 x := v_0
25221 y := v_1
25222 check := v_2
25223 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25224 break
25225 }
25226 v.reset(OpAMD64CMOVLNE)
25227 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25228 v0.AuxInt = int32ToAuxInt(0)
25229 v0.AddArg(check)
25230 v.AddArg3(y, x, v0)
25231 return true
25232 }
25233
25234
25235
25236 for {
25237 t := v.Type
25238 x := v_0
25239 y := v_1
25240 check := v_2
25241 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25242 break
25243 }
25244 v.reset(OpAMD64CMOVWNE)
25245 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25246 v0.AuxInt = int32ToAuxInt(0)
25247 v0.AddArg(check)
25248 v.AddArg3(y, x, v0)
25249 return true
25250 }
25251 return false
25252 }
25253 func rewriteValueAMD64_OpConst16(v *Value) bool {
25254
25255
25256 for {
25257 c := auxIntToInt16(v.AuxInt)
25258 v.reset(OpAMD64MOVLconst)
25259 v.AuxInt = int32ToAuxInt(int32(c))
25260 return true
25261 }
25262 }
25263 func rewriteValueAMD64_OpConst8(v *Value) bool {
25264
25265
25266 for {
25267 c := auxIntToInt8(v.AuxInt)
25268 v.reset(OpAMD64MOVLconst)
25269 v.AuxInt = int32ToAuxInt(int32(c))
25270 return true
25271 }
25272 }
25273 func rewriteValueAMD64_OpConstBool(v *Value) bool {
25274
25275
25276 for {
25277 c := auxIntToBool(v.AuxInt)
25278 v.reset(OpAMD64MOVLconst)
25279 v.AuxInt = int32ToAuxInt(b2i32(c))
25280 return true
25281 }
25282 }
25283 func rewriteValueAMD64_OpConstNil(v *Value) bool {
25284
25285
25286 for {
25287 v.reset(OpAMD64MOVQconst)
25288 v.AuxInt = int64ToAuxInt(0)
25289 return true
25290 }
25291 }
25292 func rewriteValueAMD64_OpCtz16(v *Value) bool {
25293 v_0 := v.Args[0]
25294 b := v.Block
25295 typ := &b.Func.Config.Types
25296
25297
25298 for {
25299 x := v_0
25300 v.reset(OpAMD64BSFL)
25301 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25302 v0.AuxInt = int32ToAuxInt(1 << 16)
25303 v0.AddArg(x)
25304 v.AddArg(v0)
25305 return true
25306 }
25307 }
25308 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25309 v_0 := v.Args[0]
25310
25311
25312
25313 for {
25314 x := v_0
25315 if !(buildcfg.GOAMD64 >= 3) {
25316 break
25317 }
25318 v.reset(OpAMD64TZCNTL)
25319 v.AddArg(x)
25320 return true
25321 }
25322
25323
25324
25325 for {
25326 x := v_0
25327 if !(buildcfg.GOAMD64 < 3) {
25328 break
25329 }
25330 v.reset(OpAMD64BSFL)
25331 v.AddArg(x)
25332 return true
25333 }
25334 return false
25335 }
25336 func rewriteValueAMD64_OpCtz32(v *Value) bool {
25337 v_0 := v.Args[0]
25338 b := v.Block
25339 typ := &b.Func.Config.Types
25340
25341
25342
25343 for {
25344 x := v_0
25345 if !(buildcfg.GOAMD64 >= 3) {
25346 break
25347 }
25348 v.reset(OpAMD64TZCNTL)
25349 v.AddArg(x)
25350 return true
25351 }
25352
25353
25354
25355 for {
25356 x := v_0
25357 if !(buildcfg.GOAMD64 < 3) {
25358 break
25359 }
25360 v.reset(OpSelect0)
25361 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25362 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25363 v1.AuxInt = int8ToAuxInt(32)
25364 v1.AddArg(x)
25365 v0.AddArg(v1)
25366 v.AddArg(v0)
25367 return true
25368 }
25369 return false
25370 }
25371 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25372 v_0 := v.Args[0]
25373
25374
25375
25376 for {
25377 x := v_0
25378 if !(buildcfg.GOAMD64 >= 3) {
25379 break
25380 }
25381 v.reset(OpAMD64TZCNTL)
25382 v.AddArg(x)
25383 return true
25384 }
25385
25386
25387
25388 for {
25389 x := v_0
25390 if !(buildcfg.GOAMD64 < 3) {
25391 break
25392 }
25393 v.reset(OpAMD64BSFL)
25394 v.AddArg(x)
25395 return true
25396 }
25397 return false
25398 }
25399 func rewriteValueAMD64_OpCtz64(v *Value) bool {
25400 v_0 := v.Args[0]
25401 b := v.Block
25402 typ := &b.Func.Config.Types
25403
25404
25405
25406 for {
25407 x := v_0
25408 if !(buildcfg.GOAMD64 >= 3) {
25409 break
25410 }
25411 v.reset(OpAMD64TZCNTQ)
25412 v.AddArg(x)
25413 return true
25414 }
25415
25416
25417
25418 for {
25419 t := v.Type
25420 x := v_0
25421 if !(buildcfg.GOAMD64 < 3) {
25422 break
25423 }
25424 v.reset(OpAMD64CMOVQEQ)
25425 v0 := b.NewValue0(v.Pos, OpSelect0, t)
25426 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25427 v1.AddArg(x)
25428 v0.AddArg(v1)
25429 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
25430 v2.AuxInt = int64ToAuxInt(64)
25431 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
25432 v3.AddArg(v1)
25433 v.AddArg3(v0, v2, v3)
25434 return true
25435 }
25436 return false
25437 }
25438 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
25439 v_0 := v.Args[0]
25440 b := v.Block
25441 typ := &b.Func.Config.Types
25442
25443
25444
25445 for {
25446 x := v_0
25447 if !(buildcfg.GOAMD64 >= 3) {
25448 break
25449 }
25450 v.reset(OpAMD64TZCNTQ)
25451 v.AddArg(x)
25452 return true
25453 }
25454
25455
25456
25457 for {
25458 x := v_0
25459 if !(buildcfg.GOAMD64 < 3) {
25460 break
25461 }
25462 v.reset(OpSelect0)
25463 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25464 v0.AddArg(x)
25465 v.AddArg(v0)
25466 return true
25467 }
25468 return false
25469 }
25470 func rewriteValueAMD64_OpCtz8(v *Value) bool {
25471 v_0 := v.Args[0]
25472 b := v.Block
25473 typ := &b.Func.Config.Types
25474
25475
25476 for {
25477 x := v_0
25478 v.reset(OpAMD64BSFL)
25479 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25480 v0.AuxInt = int32ToAuxInt(1 << 8)
25481 v0.AddArg(x)
25482 v.AddArg(v0)
25483 return true
25484 }
25485 }
25486 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
25487 v_0 := v.Args[0]
25488
25489
25490
25491 for {
25492 x := v_0
25493 if !(buildcfg.GOAMD64 >= 3) {
25494 break
25495 }
25496 v.reset(OpAMD64TZCNTL)
25497 v.AddArg(x)
25498 return true
25499 }
25500
25501
25502
25503 for {
25504 x := v_0
25505 if !(buildcfg.GOAMD64 < 3) {
25506 break
25507 }
25508 v.reset(OpAMD64BSFL)
25509 v.AddArg(x)
25510 return true
25511 }
25512 return false
25513 }
25514 func rewriteValueAMD64_OpDiv16(v *Value) bool {
25515 v_1 := v.Args[1]
25516 v_0 := v.Args[0]
25517 b := v.Block
25518 typ := &b.Func.Config.Types
25519
25520
25521 for {
25522 a := auxIntToBool(v.AuxInt)
25523 x := v_0
25524 y := v_1
25525 v.reset(OpSelect0)
25526 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25527 v0.AuxInt = boolToAuxInt(a)
25528 v0.AddArg2(x, y)
25529 v.AddArg(v0)
25530 return true
25531 }
25532 }
25533 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
25534 v_1 := v.Args[1]
25535 v_0 := v.Args[0]
25536 b := v.Block
25537 typ := &b.Func.Config.Types
25538
25539
25540 for {
25541 x := v_0
25542 y := v_1
25543 v.reset(OpSelect0)
25544 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25545 v0.AddArg2(x, y)
25546 v.AddArg(v0)
25547 return true
25548 }
25549 }
25550 func rewriteValueAMD64_OpDiv32(v *Value) bool {
25551 v_1 := v.Args[1]
25552 v_0 := v.Args[0]
25553 b := v.Block
25554 typ := &b.Func.Config.Types
25555
25556
25557 for {
25558 a := auxIntToBool(v.AuxInt)
25559 x := v_0
25560 y := v_1
25561 v.reset(OpSelect0)
25562 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
25563 v0.AuxInt = boolToAuxInt(a)
25564 v0.AddArg2(x, y)
25565 v.AddArg(v0)
25566 return true
25567 }
25568 }
25569 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
25570 v_1 := v.Args[1]
25571 v_0 := v.Args[0]
25572 b := v.Block
25573 typ := &b.Func.Config.Types
25574
25575
25576 for {
25577 x := v_0
25578 y := v_1
25579 v.reset(OpSelect0)
25580 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
25581 v0.AddArg2(x, y)
25582 v.AddArg(v0)
25583 return true
25584 }
25585 }
25586 func rewriteValueAMD64_OpDiv64(v *Value) bool {
25587 v_1 := v.Args[1]
25588 v_0 := v.Args[0]
25589 b := v.Block
25590 typ := &b.Func.Config.Types
25591
25592
25593 for {
25594 a := auxIntToBool(v.AuxInt)
25595 x := v_0
25596 y := v_1
25597 v.reset(OpSelect0)
25598 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
25599 v0.AuxInt = boolToAuxInt(a)
25600 v0.AddArg2(x, y)
25601 v.AddArg(v0)
25602 return true
25603 }
25604 }
25605 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
25606 v_1 := v.Args[1]
25607 v_0 := v.Args[0]
25608 b := v.Block
25609 typ := &b.Func.Config.Types
25610
25611
25612 for {
25613 x := v_0
25614 y := v_1
25615 v.reset(OpSelect0)
25616 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
25617 v0.AddArg2(x, y)
25618 v.AddArg(v0)
25619 return true
25620 }
25621 }
25622 func rewriteValueAMD64_OpDiv8(v *Value) bool {
25623 v_1 := v.Args[1]
25624 v_0 := v.Args[0]
25625 b := v.Block
25626 typ := &b.Func.Config.Types
25627
25628
25629 for {
25630 x := v_0
25631 y := v_1
25632 v.reset(OpSelect0)
25633 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25634 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25635 v1.AddArg(x)
25636 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25637 v2.AddArg(y)
25638 v0.AddArg2(v1, v2)
25639 v.AddArg(v0)
25640 return true
25641 }
25642 }
25643 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
25644 v_1 := v.Args[1]
25645 v_0 := v.Args[0]
25646 b := v.Block
25647 typ := &b.Func.Config.Types
25648
25649
25650 for {
25651 x := v_0
25652 y := v_1
25653 v.reset(OpSelect0)
25654 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25655 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25656 v1.AddArg(x)
25657 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25658 v2.AddArg(y)
25659 v0.AddArg2(v1, v2)
25660 v.AddArg(v0)
25661 return true
25662 }
25663 }
25664 func rewriteValueAMD64_OpEq16(v *Value) bool {
25665 v_1 := v.Args[1]
25666 v_0 := v.Args[0]
25667 b := v.Block
25668
25669
25670 for {
25671 x := v_0
25672 y := v_1
25673 v.reset(OpAMD64SETEQ)
25674 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25675 v0.AddArg2(x, y)
25676 v.AddArg(v0)
25677 return true
25678 }
25679 }
25680 func rewriteValueAMD64_OpEq32(v *Value) bool {
25681 v_1 := v.Args[1]
25682 v_0 := v.Args[0]
25683 b := v.Block
25684
25685
25686 for {
25687 x := v_0
25688 y := v_1
25689 v.reset(OpAMD64SETEQ)
25690 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25691 v0.AddArg2(x, y)
25692 v.AddArg(v0)
25693 return true
25694 }
25695 }
25696 func rewriteValueAMD64_OpEq32F(v *Value) bool {
25697 v_1 := v.Args[1]
25698 v_0 := v.Args[0]
25699 b := v.Block
25700
25701
25702 for {
25703 x := v_0
25704 y := v_1
25705 v.reset(OpAMD64SETEQF)
25706 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25707 v0.AddArg2(x, y)
25708 v.AddArg(v0)
25709 return true
25710 }
25711 }
25712 func rewriteValueAMD64_OpEq64(v *Value) bool {
25713 v_1 := v.Args[1]
25714 v_0 := v.Args[0]
25715 b := v.Block
25716
25717
25718 for {
25719 x := v_0
25720 y := v_1
25721 v.reset(OpAMD64SETEQ)
25722 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25723 v0.AddArg2(x, y)
25724 v.AddArg(v0)
25725 return true
25726 }
25727 }
25728 func rewriteValueAMD64_OpEq64F(v *Value) bool {
25729 v_1 := v.Args[1]
25730 v_0 := v.Args[0]
25731 b := v.Block
25732
25733
25734 for {
25735 x := v_0
25736 y := v_1
25737 v.reset(OpAMD64SETEQF)
25738 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25739 v0.AddArg2(x, y)
25740 v.AddArg(v0)
25741 return true
25742 }
25743 }
25744 func rewriteValueAMD64_OpEq8(v *Value) bool {
25745 v_1 := v.Args[1]
25746 v_0 := v.Args[0]
25747 b := v.Block
25748
25749
25750 for {
25751 x := v_0
25752 y := v_1
25753 v.reset(OpAMD64SETEQ)
25754 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25755 v0.AddArg2(x, y)
25756 v.AddArg(v0)
25757 return true
25758 }
25759 }
25760 func rewriteValueAMD64_OpEqB(v *Value) bool {
25761 v_1 := v.Args[1]
25762 v_0 := v.Args[0]
25763 b := v.Block
25764
25765
25766 for {
25767 x := v_0
25768 y := v_1
25769 v.reset(OpAMD64SETEQ)
25770 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25771 v0.AddArg2(x, y)
25772 v.AddArg(v0)
25773 return true
25774 }
25775 }
25776 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
25777 v_1 := v.Args[1]
25778 v_0 := v.Args[0]
25779 b := v.Block
25780
25781
25782 for {
25783 x := v_0
25784 y := v_1
25785 v.reset(OpAMD64SETEQ)
25786 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25787 v0.AddArg2(x, y)
25788 v.AddArg(v0)
25789 return true
25790 }
25791 }
25792 func rewriteValueAMD64_OpFMA(v *Value) bool {
25793 v_2 := v.Args[2]
25794 v_1 := v.Args[1]
25795 v_0 := v.Args[0]
25796
25797
25798 for {
25799 x := v_0
25800 y := v_1
25801 z := v_2
25802 v.reset(OpAMD64VFMADD231SD)
25803 v.AddArg3(z, x, y)
25804 return true
25805 }
25806 }
25807 func rewriteValueAMD64_OpFloor(v *Value) bool {
25808 v_0 := v.Args[0]
25809
25810
25811 for {
25812 x := v_0
25813 v.reset(OpAMD64ROUNDSD)
25814 v.AuxInt = int8ToAuxInt(1)
25815 v.AddArg(x)
25816 return true
25817 }
25818 }
25819 func rewriteValueAMD64_OpGetG(v *Value) bool {
25820 v_0 := v.Args[0]
25821
25822
25823
25824 for {
25825 mem := v_0
25826 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
25827 break
25828 }
25829 v.reset(OpAMD64LoweredGetG)
25830 v.AddArg(mem)
25831 return true
25832 }
25833 return false
25834 }
25835 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
25836 b := v.Block
25837 typ := &b.Func.Config.Types
25838
25839
25840 for {
25841 s := auxToSym(v.Aux)
25842 v.reset(OpAMD64SETNE)
25843 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25844 v0.AuxInt = int32ToAuxInt(0)
25845 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
25846 v1.Aux = symToAux(s)
25847 v0.AddArg(v1)
25848 v.AddArg(v0)
25849 return true
25850 }
25851 }
25852 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
25853 v_1 := v.Args[1]
25854 v_0 := v.Args[0]
25855 b := v.Block
25856
25857
25858 for {
25859 idx := v_0
25860 len := v_1
25861 v.reset(OpAMD64SETB)
25862 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25863 v0.AddArg2(idx, len)
25864 v.AddArg(v0)
25865 return true
25866 }
25867 }
25868 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
25869 v_0 := v.Args[0]
25870 b := v.Block
25871
25872
25873 for {
25874 p := v_0
25875 v.reset(OpAMD64SETNE)
25876 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
25877 v0.AddArg2(p, p)
25878 v.AddArg(v0)
25879 return true
25880 }
25881 }
25882 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
25883 v_1 := v.Args[1]
25884 v_0 := v.Args[0]
25885 b := v.Block
25886
25887
25888 for {
25889 idx := v_0
25890 len := v_1
25891 v.reset(OpAMD64SETBE)
25892 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25893 v0.AddArg2(idx, len)
25894 v.AddArg(v0)
25895 return true
25896 }
25897 }
25898 func rewriteValueAMD64_OpLeq16(v *Value) bool {
25899 v_1 := v.Args[1]
25900 v_0 := v.Args[0]
25901 b := v.Block
25902
25903
25904 for {
25905 x := v_0
25906 y := v_1
25907 v.reset(OpAMD64SETLE)
25908 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25909 v0.AddArg2(x, y)
25910 v.AddArg(v0)
25911 return true
25912 }
25913 }
25914 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
25915 v_1 := v.Args[1]
25916 v_0 := v.Args[0]
25917 b := v.Block
25918
25919
25920 for {
25921 x := v_0
25922 y := v_1
25923 v.reset(OpAMD64SETBE)
25924 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25925 v0.AddArg2(x, y)
25926 v.AddArg(v0)
25927 return true
25928 }
25929 }
25930 func rewriteValueAMD64_OpLeq32(v *Value) bool {
25931 v_1 := v.Args[1]
25932 v_0 := v.Args[0]
25933 b := v.Block
25934
25935
25936 for {
25937 x := v_0
25938 y := v_1
25939 v.reset(OpAMD64SETLE)
25940 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25941 v0.AddArg2(x, y)
25942 v.AddArg(v0)
25943 return true
25944 }
25945 }
25946 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
25947 v_1 := v.Args[1]
25948 v_0 := v.Args[0]
25949 b := v.Block
25950
25951
25952 for {
25953 x := v_0
25954 y := v_1
25955 v.reset(OpAMD64SETGEF)
25956 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25957 v0.AddArg2(y, x)
25958 v.AddArg(v0)
25959 return true
25960 }
25961 }
25962 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
25963 v_1 := v.Args[1]
25964 v_0 := v.Args[0]
25965 b := v.Block
25966
25967
25968 for {
25969 x := v_0
25970 y := v_1
25971 v.reset(OpAMD64SETBE)
25972 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25973 v0.AddArg2(x, y)
25974 v.AddArg(v0)
25975 return true
25976 }
25977 }
25978 func rewriteValueAMD64_OpLeq64(v *Value) bool {
25979 v_1 := v.Args[1]
25980 v_0 := v.Args[0]
25981 b := v.Block
25982
25983
25984 for {
25985 x := v_0
25986 y := v_1
25987 v.reset(OpAMD64SETLE)
25988 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25989 v0.AddArg2(x, y)
25990 v.AddArg(v0)
25991 return true
25992 }
25993 }
25994 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
25995 v_1 := v.Args[1]
25996 v_0 := v.Args[0]
25997 b := v.Block
25998
25999
26000 for {
26001 x := v_0
26002 y := v_1
26003 v.reset(OpAMD64SETGEF)
26004 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26005 v0.AddArg2(y, x)
26006 v.AddArg(v0)
26007 return true
26008 }
26009 }
26010 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
26011 v_1 := v.Args[1]
26012 v_0 := v.Args[0]
26013 b := v.Block
26014
26015
26016 for {
26017 x := v_0
26018 y := v_1
26019 v.reset(OpAMD64SETBE)
26020 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26021 v0.AddArg2(x, y)
26022 v.AddArg(v0)
26023 return true
26024 }
26025 }
26026 func rewriteValueAMD64_OpLeq8(v *Value) bool {
26027 v_1 := v.Args[1]
26028 v_0 := v.Args[0]
26029 b := v.Block
26030
26031
26032 for {
26033 x := v_0
26034 y := v_1
26035 v.reset(OpAMD64SETLE)
26036 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26037 v0.AddArg2(x, y)
26038 v.AddArg(v0)
26039 return true
26040 }
26041 }
26042 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
26043 v_1 := v.Args[1]
26044 v_0 := v.Args[0]
26045 b := v.Block
26046
26047
26048 for {
26049 x := v_0
26050 y := v_1
26051 v.reset(OpAMD64SETBE)
26052 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26053 v0.AddArg2(x, y)
26054 v.AddArg(v0)
26055 return true
26056 }
26057 }
26058 func rewriteValueAMD64_OpLess16(v *Value) bool {
26059 v_1 := v.Args[1]
26060 v_0 := v.Args[0]
26061 b := v.Block
26062
26063
26064 for {
26065 x := v_0
26066 y := v_1
26067 v.reset(OpAMD64SETL)
26068 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26069 v0.AddArg2(x, y)
26070 v.AddArg(v0)
26071 return true
26072 }
26073 }
26074 func rewriteValueAMD64_OpLess16U(v *Value) bool {
26075 v_1 := v.Args[1]
26076 v_0 := v.Args[0]
26077 b := v.Block
26078
26079
26080 for {
26081 x := v_0
26082 y := v_1
26083 v.reset(OpAMD64SETB)
26084 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26085 v0.AddArg2(x, y)
26086 v.AddArg(v0)
26087 return true
26088 }
26089 }
26090 func rewriteValueAMD64_OpLess32(v *Value) bool {
26091 v_1 := v.Args[1]
26092 v_0 := v.Args[0]
26093 b := v.Block
26094
26095
26096 for {
26097 x := v_0
26098 y := v_1
26099 v.reset(OpAMD64SETL)
26100 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26101 v0.AddArg2(x, y)
26102 v.AddArg(v0)
26103 return true
26104 }
26105 }
26106 func rewriteValueAMD64_OpLess32F(v *Value) bool {
26107 v_1 := v.Args[1]
26108 v_0 := v.Args[0]
26109 b := v.Block
26110
26111
26112 for {
26113 x := v_0
26114 y := v_1
26115 v.reset(OpAMD64SETGF)
26116 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26117 v0.AddArg2(y, x)
26118 v.AddArg(v0)
26119 return true
26120 }
26121 }
26122 func rewriteValueAMD64_OpLess32U(v *Value) bool {
26123 v_1 := v.Args[1]
26124 v_0 := v.Args[0]
26125 b := v.Block
26126
26127
26128 for {
26129 x := v_0
26130 y := v_1
26131 v.reset(OpAMD64SETB)
26132 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26133 v0.AddArg2(x, y)
26134 v.AddArg(v0)
26135 return true
26136 }
26137 }
26138 func rewriteValueAMD64_OpLess64(v *Value) bool {
26139 v_1 := v.Args[1]
26140 v_0 := v.Args[0]
26141 b := v.Block
26142
26143
26144 for {
26145 x := v_0
26146 y := v_1
26147 v.reset(OpAMD64SETL)
26148 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26149 v0.AddArg2(x, y)
26150 v.AddArg(v0)
26151 return true
26152 }
26153 }
26154 func rewriteValueAMD64_OpLess64F(v *Value) bool {
26155 v_1 := v.Args[1]
26156 v_0 := v.Args[0]
26157 b := v.Block
26158
26159
26160 for {
26161 x := v_0
26162 y := v_1
26163 v.reset(OpAMD64SETGF)
26164 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26165 v0.AddArg2(y, x)
26166 v.AddArg(v0)
26167 return true
26168 }
26169 }
26170 func rewriteValueAMD64_OpLess64U(v *Value) bool {
26171 v_1 := v.Args[1]
26172 v_0 := v.Args[0]
26173 b := v.Block
26174
26175
26176 for {
26177 x := v_0
26178 y := v_1
26179 v.reset(OpAMD64SETB)
26180 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26181 v0.AddArg2(x, y)
26182 v.AddArg(v0)
26183 return true
26184 }
26185 }
26186 func rewriteValueAMD64_OpLess8(v *Value) bool {
26187 v_1 := v.Args[1]
26188 v_0 := v.Args[0]
26189 b := v.Block
26190
26191
26192 for {
26193 x := v_0
26194 y := v_1
26195 v.reset(OpAMD64SETL)
26196 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26197 v0.AddArg2(x, y)
26198 v.AddArg(v0)
26199 return true
26200 }
26201 }
26202 func rewriteValueAMD64_OpLess8U(v *Value) bool {
26203 v_1 := v.Args[1]
26204 v_0 := v.Args[0]
26205 b := v.Block
26206
26207
26208 for {
26209 x := v_0
26210 y := v_1
26211 v.reset(OpAMD64SETB)
26212 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26213 v0.AddArg2(x, y)
26214 v.AddArg(v0)
26215 return true
26216 }
26217 }
26218 func rewriteValueAMD64_OpLoad(v *Value) bool {
26219 v_1 := v.Args[1]
26220 v_0 := v.Args[0]
26221
26222
26223
26224 for {
26225 t := v.Type
26226 ptr := v_0
26227 mem := v_1
26228 if !(is64BitInt(t) || isPtr(t)) {
26229 break
26230 }
26231 v.reset(OpAMD64MOVQload)
26232 v.AddArg2(ptr, mem)
26233 return true
26234 }
26235
26236
26237
26238 for {
26239 t := v.Type
26240 ptr := v_0
26241 mem := v_1
26242 if !(is32BitInt(t)) {
26243 break
26244 }
26245 v.reset(OpAMD64MOVLload)
26246 v.AddArg2(ptr, mem)
26247 return true
26248 }
26249
26250
26251
26252 for {
26253 t := v.Type
26254 ptr := v_0
26255 mem := v_1
26256 if !(is16BitInt(t)) {
26257 break
26258 }
26259 v.reset(OpAMD64MOVWload)
26260 v.AddArg2(ptr, mem)
26261 return true
26262 }
26263
26264
26265
26266 for {
26267 t := v.Type
26268 ptr := v_0
26269 mem := v_1
26270 if !(t.IsBoolean() || is8BitInt(t)) {
26271 break
26272 }
26273 v.reset(OpAMD64MOVBload)
26274 v.AddArg2(ptr, mem)
26275 return true
26276 }
26277
26278
26279
26280 for {
26281 t := v.Type
26282 ptr := v_0
26283 mem := v_1
26284 if !(is32BitFloat(t)) {
26285 break
26286 }
26287 v.reset(OpAMD64MOVSSload)
26288 v.AddArg2(ptr, mem)
26289 return true
26290 }
26291
26292
26293
26294 for {
26295 t := v.Type
26296 ptr := v_0
26297 mem := v_1
26298 if !(is64BitFloat(t)) {
26299 break
26300 }
26301 v.reset(OpAMD64MOVSDload)
26302 v.AddArg2(ptr, mem)
26303 return true
26304 }
26305 return false
26306 }
26307 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26308 v_1 := v.Args[1]
26309 v_0 := v.Args[0]
26310 b := v.Block
26311 typ := &b.Func.Config.Types
26312
26313
26314
26315 for {
26316 t := v.Type
26317 sym := auxToSym(v.Aux)
26318 base := v_0
26319 mem := v_1
26320 if !(t.Elem().HasPointers()) {
26321 break
26322 }
26323 v.reset(OpAMD64LEAQ)
26324 v.Aux = symToAux(sym)
26325 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26326 v0.AddArg2(base, mem)
26327 v.AddArg(v0)
26328 return true
26329 }
26330
26331
26332
26333 for {
26334 t := v.Type
26335 sym := auxToSym(v.Aux)
26336 base := v_0
26337 if !(!t.Elem().HasPointers()) {
26338 break
26339 }
26340 v.reset(OpAMD64LEAQ)
26341 v.Aux = symToAux(sym)
26342 v.AddArg(base)
26343 return true
26344 }
26345 return false
26346 }
26347 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26348 v_1 := v.Args[1]
26349 v_0 := v.Args[0]
26350 b := v.Block
26351
26352
26353
26354 for {
26355 t := v.Type
26356 x := v_0
26357 y := v_1
26358 if !(!shiftIsBounded(v)) {
26359 break
26360 }
26361 v.reset(OpAMD64ANDL)
26362 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26363 v0.AddArg2(x, y)
26364 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26365 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26366 v2.AuxInt = int16ToAuxInt(32)
26367 v2.AddArg(y)
26368 v1.AddArg(v2)
26369 v.AddArg2(v0, v1)
26370 return true
26371 }
26372
26373
26374
26375 for {
26376 x := v_0
26377 y := v_1
26378 if !(shiftIsBounded(v)) {
26379 break
26380 }
26381 v.reset(OpAMD64SHLL)
26382 v.AddArg2(x, y)
26383 return true
26384 }
26385 return false
26386 }
26387 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26388 v_1 := v.Args[1]
26389 v_0 := v.Args[0]
26390 b := v.Block
26391
26392
26393
26394 for {
26395 t := v.Type
26396 x := v_0
26397 y := v_1
26398 if !(!shiftIsBounded(v)) {
26399 break
26400 }
26401 v.reset(OpAMD64ANDL)
26402 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26403 v0.AddArg2(x, y)
26404 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26405 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26406 v2.AuxInt = int32ToAuxInt(32)
26407 v2.AddArg(y)
26408 v1.AddArg(v2)
26409 v.AddArg2(v0, v1)
26410 return true
26411 }
26412
26413
26414
26415 for {
26416 x := v_0
26417 y := v_1
26418 if !(shiftIsBounded(v)) {
26419 break
26420 }
26421 v.reset(OpAMD64SHLL)
26422 v.AddArg2(x, y)
26423 return true
26424 }
26425 return false
26426 }
26427 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
26428 v_1 := v.Args[1]
26429 v_0 := v.Args[0]
26430 b := v.Block
26431
26432
26433
26434 for {
26435 t := v.Type
26436 x := v_0
26437 y := v_1
26438 if !(!shiftIsBounded(v)) {
26439 break
26440 }
26441 v.reset(OpAMD64ANDL)
26442 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26443 v0.AddArg2(x, y)
26444 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26445 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26446 v2.AuxInt = int32ToAuxInt(32)
26447 v2.AddArg(y)
26448 v1.AddArg(v2)
26449 v.AddArg2(v0, v1)
26450 return true
26451 }
26452
26453
26454
26455 for {
26456 x := v_0
26457 y := v_1
26458 if !(shiftIsBounded(v)) {
26459 break
26460 }
26461 v.reset(OpAMD64SHLL)
26462 v.AddArg2(x, y)
26463 return true
26464 }
26465 return false
26466 }
26467 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
26468 v_1 := v.Args[1]
26469 v_0 := v.Args[0]
26470 b := v.Block
26471
26472
26473
26474 for {
26475 t := v.Type
26476 x := v_0
26477 y := v_1
26478 if !(!shiftIsBounded(v)) {
26479 break
26480 }
26481 v.reset(OpAMD64ANDL)
26482 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26483 v0.AddArg2(x, y)
26484 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26485 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26486 v2.AuxInt = int8ToAuxInt(32)
26487 v2.AddArg(y)
26488 v1.AddArg(v2)
26489 v.AddArg2(v0, v1)
26490 return true
26491 }
26492
26493
26494
26495 for {
26496 x := v_0
26497 y := v_1
26498 if !(shiftIsBounded(v)) {
26499 break
26500 }
26501 v.reset(OpAMD64SHLL)
26502 v.AddArg2(x, y)
26503 return true
26504 }
26505 return false
26506 }
26507 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
26508 v_1 := v.Args[1]
26509 v_0 := v.Args[0]
26510 b := v.Block
26511
26512
26513
26514 for {
26515 t := v.Type
26516 x := v_0
26517 y := v_1
26518 if !(!shiftIsBounded(v)) {
26519 break
26520 }
26521 v.reset(OpAMD64ANDL)
26522 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26523 v0.AddArg2(x, y)
26524 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26525 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26526 v2.AuxInt = int16ToAuxInt(32)
26527 v2.AddArg(y)
26528 v1.AddArg(v2)
26529 v.AddArg2(v0, v1)
26530 return true
26531 }
26532
26533
26534
26535 for {
26536 x := v_0
26537 y := v_1
26538 if !(shiftIsBounded(v)) {
26539 break
26540 }
26541 v.reset(OpAMD64SHLL)
26542 v.AddArg2(x, y)
26543 return true
26544 }
26545 return false
26546 }
26547 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
26548 v_1 := v.Args[1]
26549 v_0 := v.Args[0]
26550 b := v.Block
26551
26552
26553
26554 for {
26555 t := v.Type
26556 x := v_0
26557 y := v_1
26558 if !(!shiftIsBounded(v)) {
26559 break
26560 }
26561 v.reset(OpAMD64ANDL)
26562 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26563 v0.AddArg2(x, y)
26564 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26565 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26566 v2.AuxInt = int32ToAuxInt(32)
26567 v2.AddArg(y)
26568 v1.AddArg(v2)
26569 v.AddArg2(v0, v1)
26570 return true
26571 }
26572
26573
26574
26575 for {
26576 x := v_0
26577 y := v_1
26578 if !(shiftIsBounded(v)) {
26579 break
26580 }
26581 v.reset(OpAMD64SHLL)
26582 v.AddArg2(x, y)
26583 return true
26584 }
26585 return false
26586 }
26587 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
26588 v_1 := v.Args[1]
26589 v_0 := v.Args[0]
26590 b := v.Block
26591
26592
26593
26594 for {
26595 t := v.Type
26596 x := v_0
26597 y := v_1
26598 if !(!shiftIsBounded(v)) {
26599 break
26600 }
26601 v.reset(OpAMD64ANDL)
26602 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26603 v0.AddArg2(x, y)
26604 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26605 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26606 v2.AuxInt = int32ToAuxInt(32)
26607 v2.AddArg(y)
26608 v1.AddArg(v2)
26609 v.AddArg2(v0, v1)
26610 return true
26611 }
26612
26613
26614
26615 for {
26616 x := v_0
26617 y := v_1
26618 if !(shiftIsBounded(v)) {
26619 break
26620 }
26621 v.reset(OpAMD64SHLL)
26622 v.AddArg2(x, y)
26623 return true
26624 }
26625 return false
26626 }
26627 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
26628 v_1 := v.Args[1]
26629 v_0 := v.Args[0]
26630 b := v.Block
26631
26632
26633
26634 for {
26635 t := v.Type
26636 x := v_0
26637 y := v_1
26638 if !(!shiftIsBounded(v)) {
26639 break
26640 }
26641 v.reset(OpAMD64ANDL)
26642 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26643 v0.AddArg2(x, y)
26644 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26645 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26646 v2.AuxInt = int8ToAuxInt(32)
26647 v2.AddArg(y)
26648 v1.AddArg(v2)
26649 v.AddArg2(v0, v1)
26650 return true
26651 }
26652
26653
26654
26655 for {
26656 x := v_0
26657 y := v_1
26658 if !(shiftIsBounded(v)) {
26659 break
26660 }
26661 v.reset(OpAMD64SHLL)
26662 v.AddArg2(x, y)
26663 return true
26664 }
26665 return false
26666 }
26667 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
26668 v_1 := v.Args[1]
26669 v_0 := v.Args[0]
26670 b := v.Block
26671
26672
26673
26674 for {
26675 t := v.Type
26676 x := v_0
26677 y := v_1
26678 if !(!shiftIsBounded(v)) {
26679 break
26680 }
26681 v.reset(OpAMD64ANDQ)
26682 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26683 v0.AddArg2(x, y)
26684 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26685 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26686 v2.AuxInt = int16ToAuxInt(64)
26687 v2.AddArg(y)
26688 v1.AddArg(v2)
26689 v.AddArg2(v0, v1)
26690 return true
26691 }
26692
26693
26694
26695 for {
26696 x := v_0
26697 y := v_1
26698 if !(shiftIsBounded(v)) {
26699 break
26700 }
26701 v.reset(OpAMD64SHLQ)
26702 v.AddArg2(x, y)
26703 return true
26704 }
26705 return false
26706 }
26707 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
26708 v_1 := v.Args[1]
26709 v_0 := v.Args[0]
26710 b := v.Block
26711
26712
26713
26714 for {
26715 t := v.Type
26716 x := v_0
26717 y := v_1
26718 if !(!shiftIsBounded(v)) {
26719 break
26720 }
26721 v.reset(OpAMD64ANDQ)
26722 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26723 v0.AddArg2(x, y)
26724 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26725 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26726 v2.AuxInt = int32ToAuxInt(64)
26727 v2.AddArg(y)
26728 v1.AddArg(v2)
26729 v.AddArg2(v0, v1)
26730 return true
26731 }
26732
26733
26734
26735 for {
26736 x := v_0
26737 y := v_1
26738 if !(shiftIsBounded(v)) {
26739 break
26740 }
26741 v.reset(OpAMD64SHLQ)
26742 v.AddArg2(x, y)
26743 return true
26744 }
26745 return false
26746 }
26747 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
26748 v_1 := v.Args[1]
26749 v_0 := v.Args[0]
26750 b := v.Block
26751
26752
26753
26754 for {
26755 t := v.Type
26756 x := v_0
26757 y := v_1
26758 if !(!shiftIsBounded(v)) {
26759 break
26760 }
26761 v.reset(OpAMD64ANDQ)
26762 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26763 v0.AddArg2(x, y)
26764 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26765 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26766 v2.AuxInt = int32ToAuxInt(64)
26767 v2.AddArg(y)
26768 v1.AddArg(v2)
26769 v.AddArg2(v0, v1)
26770 return true
26771 }
26772
26773
26774
26775 for {
26776 x := v_0
26777 y := v_1
26778 if !(shiftIsBounded(v)) {
26779 break
26780 }
26781 v.reset(OpAMD64SHLQ)
26782 v.AddArg2(x, y)
26783 return true
26784 }
26785 return false
26786 }
26787 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
26788 v_1 := v.Args[1]
26789 v_0 := v.Args[0]
26790 b := v.Block
26791
26792
26793
26794 for {
26795 t := v.Type
26796 x := v_0
26797 y := v_1
26798 if !(!shiftIsBounded(v)) {
26799 break
26800 }
26801 v.reset(OpAMD64ANDQ)
26802 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26803 v0.AddArg2(x, y)
26804 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26805 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26806 v2.AuxInt = int8ToAuxInt(64)
26807 v2.AddArg(y)
26808 v1.AddArg(v2)
26809 v.AddArg2(v0, v1)
26810 return true
26811 }
26812
26813
26814
26815 for {
26816 x := v_0
26817 y := v_1
26818 if !(shiftIsBounded(v)) {
26819 break
26820 }
26821 v.reset(OpAMD64SHLQ)
26822 v.AddArg2(x, y)
26823 return true
26824 }
26825 return false
26826 }
26827 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
26828 v_1 := v.Args[1]
26829 v_0 := v.Args[0]
26830 b := v.Block
26831
26832
26833
26834 for {
26835 t := v.Type
26836 x := v_0
26837 y := v_1
26838 if !(!shiftIsBounded(v)) {
26839 break
26840 }
26841 v.reset(OpAMD64ANDL)
26842 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26843 v0.AddArg2(x, y)
26844 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26845 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26846 v2.AuxInt = int16ToAuxInt(32)
26847 v2.AddArg(y)
26848 v1.AddArg(v2)
26849 v.AddArg2(v0, v1)
26850 return true
26851 }
26852
26853
26854
26855 for {
26856 x := v_0
26857 y := v_1
26858 if !(shiftIsBounded(v)) {
26859 break
26860 }
26861 v.reset(OpAMD64SHLL)
26862 v.AddArg2(x, y)
26863 return true
26864 }
26865 return false
26866 }
26867 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
26868 v_1 := v.Args[1]
26869 v_0 := v.Args[0]
26870 b := v.Block
26871
26872
26873
26874 for {
26875 t := v.Type
26876 x := v_0
26877 y := v_1
26878 if !(!shiftIsBounded(v)) {
26879 break
26880 }
26881 v.reset(OpAMD64ANDL)
26882 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26883 v0.AddArg2(x, y)
26884 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26885 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26886 v2.AuxInt = int32ToAuxInt(32)
26887 v2.AddArg(y)
26888 v1.AddArg(v2)
26889 v.AddArg2(v0, v1)
26890 return true
26891 }
26892
26893
26894
26895 for {
26896 x := v_0
26897 y := v_1
26898 if !(shiftIsBounded(v)) {
26899 break
26900 }
26901 v.reset(OpAMD64SHLL)
26902 v.AddArg2(x, y)
26903 return true
26904 }
26905 return false
26906 }
26907 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
26908 v_1 := v.Args[1]
26909 v_0 := v.Args[0]
26910 b := v.Block
26911
26912
26913
26914 for {
26915 t := v.Type
26916 x := v_0
26917 y := v_1
26918 if !(!shiftIsBounded(v)) {
26919 break
26920 }
26921 v.reset(OpAMD64ANDL)
26922 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26923 v0.AddArg2(x, y)
26924 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26925 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26926 v2.AuxInt = int32ToAuxInt(32)
26927 v2.AddArg(y)
26928 v1.AddArg(v2)
26929 v.AddArg2(v0, v1)
26930 return true
26931 }
26932
26933
26934
26935 for {
26936 x := v_0
26937 y := v_1
26938 if !(shiftIsBounded(v)) {
26939 break
26940 }
26941 v.reset(OpAMD64SHLL)
26942 v.AddArg2(x, y)
26943 return true
26944 }
26945 return false
26946 }
26947 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
26948 v_1 := v.Args[1]
26949 v_0 := v.Args[0]
26950 b := v.Block
26951
26952
26953
26954 for {
26955 t := v.Type
26956 x := v_0
26957 y := v_1
26958 if !(!shiftIsBounded(v)) {
26959 break
26960 }
26961 v.reset(OpAMD64ANDL)
26962 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26963 v0.AddArg2(x, y)
26964 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26965 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26966 v2.AuxInt = int8ToAuxInt(32)
26967 v2.AddArg(y)
26968 v1.AddArg(v2)
26969 v.AddArg2(v0, v1)
26970 return true
26971 }
26972
26973
26974
26975 for {
26976 x := v_0
26977 y := v_1
26978 if !(shiftIsBounded(v)) {
26979 break
26980 }
26981 v.reset(OpAMD64SHLL)
26982 v.AddArg2(x, y)
26983 return true
26984 }
26985 return false
26986 }
26987 func rewriteValueAMD64_OpMax32F(v *Value) bool {
26988 v_1 := v.Args[1]
26989 v_0 := v.Args[0]
26990 b := v.Block
26991
26992
26993 for {
26994 t := v.Type
26995 x := v_0
26996 y := v_1
26997 v.reset(OpNeg32F)
26998 v.Type = t
26999 v0 := b.NewValue0(v.Pos, OpMin32F, t)
27000 v1 := b.NewValue0(v.Pos, OpNeg32F, t)
27001 v1.AddArg(x)
27002 v2 := b.NewValue0(v.Pos, OpNeg32F, t)
27003 v2.AddArg(y)
27004 v0.AddArg2(v1, v2)
27005 v.AddArg(v0)
27006 return true
27007 }
27008 }
27009 func rewriteValueAMD64_OpMax64F(v *Value) bool {
27010 v_1 := v.Args[1]
27011 v_0 := v.Args[0]
27012 b := v.Block
27013
27014
27015 for {
27016 t := v.Type
27017 x := v_0
27018 y := v_1
27019 v.reset(OpNeg64F)
27020 v.Type = t
27021 v0 := b.NewValue0(v.Pos, OpMin64F, t)
27022 v1 := b.NewValue0(v.Pos, OpNeg64F, t)
27023 v1.AddArg(x)
27024 v2 := b.NewValue0(v.Pos, OpNeg64F, t)
27025 v2.AddArg(y)
27026 v0.AddArg2(v1, v2)
27027 v.AddArg(v0)
27028 return true
27029 }
27030 }
27031 func rewriteValueAMD64_OpMin32F(v *Value) bool {
27032 v_1 := v.Args[1]
27033 v_0 := v.Args[0]
27034 b := v.Block
27035
27036
27037 for {
27038 t := v.Type
27039 x := v_0
27040 y := v_1
27041 v.reset(OpAMD64POR)
27042 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27043 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27044 v1.AddArg2(x, y)
27045 v0.AddArg2(v1, x)
27046 v.AddArg2(v0, v1)
27047 return true
27048 }
27049 }
27050 func rewriteValueAMD64_OpMin64F(v *Value) bool {
27051 v_1 := v.Args[1]
27052 v_0 := v.Args[0]
27053 b := v.Block
27054
27055
27056 for {
27057 t := v.Type
27058 x := v_0
27059 y := v_1
27060 v.reset(OpAMD64POR)
27061 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27062 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27063 v1.AddArg2(x, y)
27064 v0.AddArg2(v1, x)
27065 v.AddArg2(v0, v1)
27066 return true
27067 }
27068 }
27069 func rewriteValueAMD64_OpMod16(v *Value) bool {
27070 v_1 := v.Args[1]
27071 v_0 := v.Args[0]
27072 b := v.Block
27073 typ := &b.Func.Config.Types
27074
27075
27076 for {
27077 a := auxIntToBool(v.AuxInt)
27078 x := v_0
27079 y := v_1
27080 v.reset(OpSelect1)
27081 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27082 v0.AuxInt = boolToAuxInt(a)
27083 v0.AddArg2(x, y)
27084 v.AddArg(v0)
27085 return true
27086 }
27087 }
27088 func rewriteValueAMD64_OpMod16u(v *Value) bool {
27089 v_1 := v.Args[1]
27090 v_0 := v.Args[0]
27091 b := v.Block
27092 typ := &b.Func.Config.Types
27093
27094
27095 for {
27096 x := v_0
27097 y := v_1
27098 v.reset(OpSelect1)
27099 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27100 v0.AddArg2(x, y)
27101 v.AddArg(v0)
27102 return true
27103 }
27104 }
27105 func rewriteValueAMD64_OpMod32(v *Value) bool {
27106 v_1 := v.Args[1]
27107 v_0 := v.Args[0]
27108 b := v.Block
27109 typ := &b.Func.Config.Types
27110
27111
27112 for {
27113 a := auxIntToBool(v.AuxInt)
27114 x := v_0
27115 y := v_1
27116 v.reset(OpSelect1)
27117 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27118 v0.AuxInt = boolToAuxInt(a)
27119 v0.AddArg2(x, y)
27120 v.AddArg(v0)
27121 return true
27122 }
27123 }
27124 func rewriteValueAMD64_OpMod32u(v *Value) bool {
27125 v_1 := v.Args[1]
27126 v_0 := v.Args[0]
27127 b := v.Block
27128 typ := &b.Func.Config.Types
27129
27130
27131 for {
27132 x := v_0
27133 y := v_1
27134 v.reset(OpSelect1)
27135 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27136 v0.AddArg2(x, y)
27137 v.AddArg(v0)
27138 return true
27139 }
27140 }
27141 func rewriteValueAMD64_OpMod64(v *Value) bool {
27142 v_1 := v.Args[1]
27143 v_0 := v.Args[0]
27144 b := v.Block
27145 typ := &b.Func.Config.Types
27146
27147
27148 for {
27149 a := auxIntToBool(v.AuxInt)
27150 x := v_0
27151 y := v_1
27152 v.reset(OpSelect1)
27153 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27154 v0.AuxInt = boolToAuxInt(a)
27155 v0.AddArg2(x, y)
27156 v.AddArg(v0)
27157 return true
27158 }
27159 }
27160 func rewriteValueAMD64_OpMod64u(v *Value) bool {
27161 v_1 := v.Args[1]
27162 v_0 := v.Args[0]
27163 b := v.Block
27164 typ := &b.Func.Config.Types
27165
27166
27167 for {
27168 x := v_0
27169 y := v_1
27170 v.reset(OpSelect1)
27171 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27172 v0.AddArg2(x, y)
27173 v.AddArg(v0)
27174 return true
27175 }
27176 }
27177 func rewriteValueAMD64_OpMod8(v *Value) bool {
27178 v_1 := v.Args[1]
27179 v_0 := v.Args[0]
27180 b := v.Block
27181 typ := &b.Func.Config.Types
27182
27183
27184 for {
27185 x := v_0
27186 y := v_1
27187 v.reset(OpSelect1)
27188 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27189 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27190 v1.AddArg(x)
27191 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27192 v2.AddArg(y)
27193 v0.AddArg2(v1, v2)
27194 v.AddArg(v0)
27195 return true
27196 }
27197 }
27198 func rewriteValueAMD64_OpMod8u(v *Value) bool {
27199 v_1 := v.Args[1]
27200 v_0 := v.Args[0]
27201 b := v.Block
27202 typ := &b.Func.Config.Types
27203
27204
27205 for {
27206 x := v_0
27207 y := v_1
27208 v.reset(OpSelect1)
27209 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27210 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27211 v1.AddArg(x)
27212 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27213 v2.AddArg(y)
27214 v0.AddArg2(v1, v2)
27215 v.AddArg(v0)
27216 return true
27217 }
27218 }
27219 func rewriteValueAMD64_OpMove(v *Value) bool {
27220 v_2 := v.Args[2]
27221 v_1 := v.Args[1]
27222 v_0 := v.Args[0]
27223 b := v.Block
27224 typ := &b.Func.Config.Types
27225
27226
27227 for {
27228 if auxIntToInt64(v.AuxInt) != 0 {
27229 break
27230 }
27231 mem := v_2
27232 v.copyOf(mem)
27233 return true
27234 }
27235
27236
27237 for {
27238 if auxIntToInt64(v.AuxInt) != 1 {
27239 break
27240 }
27241 dst := v_0
27242 src := v_1
27243 mem := v_2
27244 v.reset(OpAMD64MOVBstore)
27245 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27246 v0.AddArg2(src, mem)
27247 v.AddArg3(dst, v0, mem)
27248 return true
27249 }
27250
27251
27252 for {
27253 if auxIntToInt64(v.AuxInt) != 2 {
27254 break
27255 }
27256 dst := v_0
27257 src := v_1
27258 mem := v_2
27259 v.reset(OpAMD64MOVWstore)
27260 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27261 v0.AddArg2(src, mem)
27262 v.AddArg3(dst, v0, mem)
27263 return true
27264 }
27265
27266
27267 for {
27268 if auxIntToInt64(v.AuxInt) != 4 {
27269 break
27270 }
27271 dst := v_0
27272 src := v_1
27273 mem := v_2
27274 v.reset(OpAMD64MOVLstore)
27275 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27276 v0.AddArg2(src, mem)
27277 v.AddArg3(dst, v0, mem)
27278 return true
27279 }
27280
27281
27282 for {
27283 if auxIntToInt64(v.AuxInt) != 8 {
27284 break
27285 }
27286 dst := v_0
27287 src := v_1
27288 mem := v_2
27289 v.reset(OpAMD64MOVQstore)
27290 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27291 v0.AddArg2(src, mem)
27292 v.AddArg3(dst, v0, mem)
27293 return true
27294 }
27295
27296
27297 for {
27298 if auxIntToInt64(v.AuxInt) != 16 {
27299 break
27300 }
27301 dst := v_0
27302 src := v_1
27303 mem := v_2
27304 v.reset(OpAMD64MOVOstore)
27305 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27306 v0.AddArg2(src, mem)
27307 v.AddArg3(dst, v0, mem)
27308 return true
27309 }
27310
27311
27312 for {
27313 if auxIntToInt64(v.AuxInt) != 32 {
27314 break
27315 }
27316 dst := v_0
27317 src := v_1
27318 mem := v_2
27319 v.reset(OpMove)
27320 v.AuxInt = int64ToAuxInt(16)
27321 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27322 v0.AuxInt = int64ToAuxInt(16)
27323 v0.AddArg(dst)
27324 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27325 v1.AuxInt = int64ToAuxInt(16)
27326 v1.AddArg(src)
27327 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27328 v2.AuxInt = int64ToAuxInt(16)
27329 v2.AddArg3(dst, src, mem)
27330 v.AddArg3(v0, v1, v2)
27331 return true
27332 }
27333
27334
27335 for {
27336 if auxIntToInt64(v.AuxInt) != 48 {
27337 break
27338 }
27339 dst := v_0
27340 src := v_1
27341 mem := v_2
27342 v.reset(OpMove)
27343 v.AuxInt = int64ToAuxInt(32)
27344 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27345 v0.AuxInt = int64ToAuxInt(16)
27346 v0.AddArg(dst)
27347 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27348 v1.AuxInt = int64ToAuxInt(16)
27349 v1.AddArg(src)
27350 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27351 v2.AuxInt = int64ToAuxInt(16)
27352 v2.AddArg3(dst, src, mem)
27353 v.AddArg3(v0, v1, v2)
27354 return true
27355 }
27356
27357
27358 for {
27359 if auxIntToInt64(v.AuxInt) != 64 {
27360 break
27361 }
27362 dst := v_0
27363 src := v_1
27364 mem := v_2
27365 v.reset(OpMove)
27366 v.AuxInt = int64ToAuxInt(32)
27367 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27368 v0.AuxInt = int64ToAuxInt(32)
27369 v0.AddArg(dst)
27370 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27371 v1.AuxInt = int64ToAuxInt(32)
27372 v1.AddArg(src)
27373 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27374 v2.AuxInt = int64ToAuxInt(32)
27375 v2.AddArg3(dst, src, mem)
27376 v.AddArg3(v0, v1, v2)
27377 return true
27378 }
27379
27380
27381 for {
27382 if auxIntToInt64(v.AuxInt) != 3 {
27383 break
27384 }
27385 dst := v_0
27386 src := v_1
27387 mem := v_2
27388 v.reset(OpAMD64MOVBstore)
27389 v.AuxInt = int32ToAuxInt(2)
27390 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27391 v0.AuxInt = int32ToAuxInt(2)
27392 v0.AddArg2(src, mem)
27393 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
27394 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27395 v2.AddArg2(src, mem)
27396 v1.AddArg3(dst, v2, mem)
27397 v.AddArg3(dst, v0, v1)
27398 return true
27399 }
27400
27401
27402 for {
27403 if auxIntToInt64(v.AuxInt) != 5 {
27404 break
27405 }
27406 dst := v_0
27407 src := v_1
27408 mem := v_2
27409 v.reset(OpAMD64MOVBstore)
27410 v.AuxInt = int32ToAuxInt(4)
27411 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27412 v0.AuxInt = int32ToAuxInt(4)
27413 v0.AddArg2(src, mem)
27414 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27415 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27416 v2.AddArg2(src, mem)
27417 v1.AddArg3(dst, v2, mem)
27418 v.AddArg3(dst, v0, v1)
27419 return true
27420 }
27421
27422
27423 for {
27424 if auxIntToInt64(v.AuxInt) != 6 {
27425 break
27426 }
27427 dst := v_0
27428 src := v_1
27429 mem := v_2
27430 v.reset(OpAMD64MOVWstore)
27431 v.AuxInt = int32ToAuxInt(4)
27432 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27433 v0.AuxInt = int32ToAuxInt(4)
27434 v0.AddArg2(src, mem)
27435 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27436 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27437 v2.AddArg2(src, mem)
27438 v1.AddArg3(dst, v2, mem)
27439 v.AddArg3(dst, v0, v1)
27440 return true
27441 }
27442
27443
27444 for {
27445 if auxIntToInt64(v.AuxInt) != 7 {
27446 break
27447 }
27448 dst := v_0
27449 src := v_1
27450 mem := v_2
27451 v.reset(OpAMD64MOVLstore)
27452 v.AuxInt = int32ToAuxInt(3)
27453 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27454 v0.AuxInt = int32ToAuxInt(3)
27455 v0.AddArg2(src, mem)
27456 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27457 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27458 v2.AddArg2(src, mem)
27459 v1.AddArg3(dst, v2, mem)
27460 v.AddArg3(dst, v0, v1)
27461 return true
27462 }
27463
27464
27465 for {
27466 if auxIntToInt64(v.AuxInt) != 9 {
27467 break
27468 }
27469 dst := v_0
27470 src := v_1
27471 mem := v_2
27472 v.reset(OpAMD64MOVBstore)
27473 v.AuxInt = int32ToAuxInt(8)
27474 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27475 v0.AuxInt = int32ToAuxInt(8)
27476 v0.AddArg2(src, mem)
27477 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27478 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27479 v2.AddArg2(src, mem)
27480 v1.AddArg3(dst, v2, mem)
27481 v.AddArg3(dst, v0, v1)
27482 return true
27483 }
27484
27485
27486 for {
27487 if auxIntToInt64(v.AuxInt) != 10 {
27488 break
27489 }
27490 dst := v_0
27491 src := v_1
27492 mem := v_2
27493 v.reset(OpAMD64MOVWstore)
27494 v.AuxInt = int32ToAuxInt(8)
27495 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27496 v0.AuxInt = int32ToAuxInt(8)
27497 v0.AddArg2(src, mem)
27498 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27499 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27500 v2.AddArg2(src, mem)
27501 v1.AddArg3(dst, v2, mem)
27502 v.AddArg3(dst, v0, v1)
27503 return true
27504 }
27505
27506
27507 for {
27508 if auxIntToInt64(v.AuxInt) != 11 {
27509 break
27510 }
27511 dst := v_0
27512 src := v_1
27513 mem := v_2
27514 v.reset(OpAMD64MOVLstore)
27515 v.AuxInt = int32ToAuxInt(7)
27516 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27517 v0.AuxInt = int32ToAuxInt(7)
27518 v0.AddArg2(src, mem)
27519 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27520 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27521 v2.AddArg2(src, mem)
27522 v1.AddArg3(dst, v2, mem)
27523 v.AddArg3(dst, v0, v1)
27524 return true
27525 }
27526
27527
27528 for {
27529 if auxIntToInt64(v.AuxInt) != 12 {
27530 break
27531 }
27532 dst := v_0
27533 src := v_1
27534 mem := v_2
27535 v.reset(OpAMD64MOVLstore)
27536 v.AuxInt = int32ToAuxInt(8)
27537 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27538 v0.AuxInt = int32ToAuxInt(8)
27539 v0.AddArg2(src, mem)
27540 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27541 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27542 v2.AddArg2(src, mem)
27543 v1.AddArg3(dst, v2, mem)
27544 v.AddArg3(dst, v0, v1)
27545 return true
27546 }
27547
27548
27549
27550 for {
27551 s := auxIntToInt64(v.AuxInt)
27552 dst := v_0
27553 src := v_1
27554 mem := v_2
27555 if !(s >= 13 && s <= 15) {
27556 break
27557 }
27558 v.reset(OpAMD64MOVQstore)
27559 v.AuxInt = int32ToAuxInt(int32(s - 8))
27560 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27561 v0.AuxInt = int32ToAuxInt(int32(s - 8))
27562 v0.AddArg2(src, mem)
27563 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27564 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27565 v2.AddArg2(src, mem)
27566 v1.AddArg3(dst, v2, mem)
27567 v.AddArg3(dst, v0, v1)
27568 return true
27569 }
27570
27571
27572
27573 for {
27574 s := auxIntToInt64(v.AuxInt)
27575 dst := v_0
27576 src := v_1
27577 mem := v_2
27578 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
27579 break
27580 }
27581 v.reset(OpMove)
27582 v.AuxInt = int64ToAuxInt(s - s%16)
27583 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27584 v0.AuxInt = int64ToAuxInt(s % 16)
27585 v0.AddArg(dst)
27586 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27587 v1.AuxInt = int64ToAuxInt(s % 16)
27588 v1.AddArg(src)
27589 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27590 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27591 v3.AddArg2(src, mem)
27592 v2.AddArg3(dst, v3, mem)
27593 v.AddArg3(v0, v1, v2)
27594 return true
27595 }
27596
27597
27598
27599 for {
27600 s := auxIntToInt64(v.AuxInt)
27601 dst := v_0
27602 src := v_1
27603 mem := v_2
27604 if !(s > 16 && s%16 != 0 && s%16 > 8) {
27605 break
27606 }
27607 v.reset(OpMove)
27608 v.AuxInt = int64ToAuxInt(s - s%16)
27609 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27610 v0.AuxInt = int64ToAuxInt(s % 16)
27611 v0.AddArg(dst)
27612 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27613 v1.AuxInt = int64ToAuxInt(s % 16)
27614 v1.AddArg(src)
27615 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
27616 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27617 v3.AddArg2(src, mem)
27618 v2.AddArg3(dst, v3, mem)
27619 v.AddArg3(v0, v1, v2)
27620 return true
27621 }
27622
27623
27624
27625 for {
27626 s := auxIntToInt64(v.AuxInt)
27627 dst := v_0
27628 src := v_1
27629 mem := v_2
27630 if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
27631 break
27632 }
27633 v.reset(OpAMD64DUFFCOPY)
27634 v.AuxInt = int64ToAuxInt(s)
27635 v.AddArg3(dst, src, mem)
27636 return true
27637 }
27638
27639
27640
27641 for {
27642 s := auxIntToInt64(v.AuxInt)
27643 dst := v_0
27644 src := v_1
27645 mem := v_2
27646 if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) {
27647 break
27648 }
27649 v.reset(OpAMD64REPMOVSQ)
27650 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27651 v0.AuxInt = int64ToAuxInt(s / 8)
27652 v.AddArg4(dst, src, v0, mem)
27653 return true
27654 }
27655 return false
27656 }
27657 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
27658 v_0 := v.Args[0]
27659 b := v.Block
27660 typ := &b.Func.Config.Types
27661
27662
27663 for {
27664 x := v_0
27665 v.reset(OpAMD64PXOR)
27666 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
27667 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
27668 v.AddArg2(x, v0)
27669 return true
27670 }
27671 }
27672 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
27673 v_0 := v.Args[0]
27674 b := v.Block
27675 typ := &b.Func.Config.Types
27676
27677
27678 for {
27679 x := v_0
27680 v.reset(OpAMD64PXOR)
27681 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
27682 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
27683 v.AddArg2(x, v0)
27684 return true
27685 }
27686 }
27687 func rewriteValueAMD64_OpNeq16(v *Value) bool {
27688 v_1 := v.Args[1]
27689 v_0 := v.Args[0]
27690 b := v.Block
27691
27692
27693 for {
27694 x := v_0
27695 y := v_1
27696 v.reset(OpAMD64SETNE)
27697 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
27698 v0.AddArg2(x, y)
27699 v.AddArg(v0)
27700 return true
27701 }
27702 }
27703 func rewriteValueAMD64_OpNeq32(v *Value) bool {
27704 v_1 := v.Args[1]
27705 v_0 := v.Args[0]
27706 b := v.Block
27707
27708
27709 for {
27710 x := v_0
27711 y := v_1
27712 v.reset(OpAMD64SETNE)
27713 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
27714 v0.AddArg2(x, y)
27715 v.AddArg(v0)
27716 return true
27717 }
27718 }
27719 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
27720 v_1 := v.Args[1]
27721 v_0 := v.Args[0]
27722 b := v.Block
27723
27724
27725 for {
27726 x := v_0
27727 y := v_1
27728 v.reset(OpAMD64SETNEF)
27729 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
27730 v0.AddArg2(x, y)
27731 v.AddArg(v0)
27732 return true
27733 }
27734 }
27735 func rewriteValueAMD64_OpNeq64(v *Value) bool {
27736 v_1 := v.Args[1]
27737 v_0 := v.Args[0]
27738 b := v.Block
27739
27740
27741 for {
27742 x := v_0
27743 y := v_1
27744 v.reset(OpAMD64SETNE)
27745 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27746 v0.AddArg2(x, y)
27747 v.AddArg(v0)
27748 return true
27749 }
27750 }
27751 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
27752 v_1 := v.Args[1]
27753 v_0 := v.Args[0]
27754 b := v.Block
27755
27756
27757 for {
27758 x := v_0
27759 y := v_1
27760 v.reset(OpAMD64SETNEF)
27761 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
27762 v0.AddArg2(x, y)
27763 v.AddArg(v0)
27764 return true
27765 }
27766 }
27767 func rewriteValueAMD64_OpNeq8(v *Value) bool {
27768 v_1 := v.Args[1]
27769 v_0 := v.Args[0]
27770 b := v.Block
27771
27772
27773 for {
27774 x := v_0
27775 y := v_1
27776 v.reset(OpAMD64SETNE)
27777 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27778 v0.AddArg2(x, y)
27779 v.AddArg(v0)
27780 return true
27781 }
27782 }
27783 func rewriteValueAMD64_OpNeqB(v *Value) bool {
27784 v_1 := v.Args[1]
27785 v_0 := v.Args[0]
27786 b := v.Block
27787
27788
27789 for {
27790 x := v_0
27791 y := v_1
27792 v.reset(OpAMD64SETNE)
27793 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27794 v0.AddArg2(x, y)
27795 v.AddArg(v0)
27796 return true
27797 }
27798 }
27799 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
27800 v_1 := v.Args[1]
27801 v_0 := v.Args[0]
27802 b := v.Block
27803
27804
27805 for {
27806 x := v_0
27807 y := v_1
27808 v.reset(OpAMD64SETNE)
27809 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27810 v0.AddArg2(x, y)
27811 v.AddArg(v0)
27812 return true
27813 }
27814 }
27815 func rewriteValueAMD64_OpNot(v *Value) bool {
27816 v_0 := v.Args[0]
27817
27818
27819 for {
27820 x := v_0
27821 v.reset(OpAMD64XORLconst)
27822 v.AuxInt = int32ToAuxInt(1)
27823 v.AddArg(x)
27824 return true
27825 }
27826 }
27827 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
27828 v_0 := v.Args[0]
27829 b := v.Block
27830 typ := &b.Func.Config.Types
27831
27832
27833
27834 for {
27835 off := auxIntToInt64(v.AuxInt)
27836 ptr := v_0
27837 if !(is32Bit(off)) {
27838 break
27839 }
27840 v.reset(OpAMD64ADDQconst)
27841 v.AuxInt = int32ToAuxInt(int32(off))
27842 v.AddArg(ptr)
27843 return true
27844 }
27845
27846
27847 for {
27848 off := auxIntToInt64(v.AuxInt)
27849 ptr := v_0
27850 v.reset(OpAMD64ADDQ)
27851 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27852 v0.AuxInt = int64ToAuxInt(off)
27853 v.AddArg2(v0, ptr)
27854 return true
27855 }
27856 }
27857 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
27858 v_0 := v.Args[0]
27859 b := v.Block
27860 typ := &b.Func.Config.Types
27861
27862
27863 for {
27864 x := v_0
27865 v.reset(OpAMD64POPCNTL)
27866 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
27867 v0.AddArg(x)
27868 v.AddArg(v0)
27869 return true
27870 }
27871 }
27872 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
27873 v_0 := v.Args[0]
27874 b := v.Block
27875 typ := &b.Func.Config.Types
27876
27877
27878 for {
27879 x := v_0
27880 v.reset(OpAMD64POPCNTL)
27881 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
27882 v0.AddArg(x)
27883 v.AddArg(v0)
27884 return true
27885 }
27886 }
27887 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
27888 v_0 := v.Args[0]
27889
27890
27891 for {
27892 x := v_0
27893 v.reset(OpAMD64ROUNDSD)
27894 v.AuxInt = int8ToAuxInt(0)
27895 v.AddArg(x)
27896 return true
27897 }
27898 }
27899 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
27900 v_1 := v.Args[1]
27901 v_0 := v.Args[0]
27902 b := v.Block
27903
27904
27905
27906 for {
27907 t := v.Type
27908 x := v_0
27909 y := v_1
27910 if !(!shiftIsBounded(v)) {
27911 break
27912 }
27913 v.reset(OpAMD64ANDL)
27914 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27915 v0.AddArg2(x, y)
27916 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27917 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27918 v2.AuxInt = int16ToAuxInt(16)
27919 v2.AddArg(y)
27920 v1.AddArg(v2)
27921 v.AddArg2(v0, v1)
27922 return true
27923 }
27924
27925
27926
27927 for {
27928 x := v_0
27929 y := v_1
27930 if !(shiftIsBounded(v)) {
27931 break
27932 }
27933 v.reset(OpAMD64SHRW)
27934 v.AddArg2(x, y)
27935 return true
27936 }
27937 return false
27938 }
27939 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
27940 v_1 := v.Args[1]
27941 v_0 := v.Args[0]
27942 b := v.Block
27943
27944
27945
27946 for {
27947 t := v.Type
27948 x := v_0
27949 y := v_1
27950 if !(!shiftIsBounded(v)) {
27951 break
27952 }
27953 v.reset(OpAMD64ANDL)
27954 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27955 v0.AddArg2(x, y)
27956 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27957 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27958 v2.AuxInt = int32ToAuxInt(16)
27959 v2.AddArg(y)
27960 v1.AddArg(v2)
27961 v.AddArg2(v0, v1)
27962 return true
27963 }
27964
27965
27966
27967 for {
27968 x := v_0
27969 y := v_1
27970 if !(shiftIsBounded(v)) {
27971 break
27972 }
27973 v.reset(OpAMD64SHRW)
27974 v.AddArg2(x, y)
27975 return true
27976 }
27977 return false
27978 }
27979 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
27980 v_1 := v.Args[1]
27981 v_0 := v.Args[0]
27982 b := v.Block
27983
27984
27985
27986 for {
27987 t := v.Type
27988 x := v_0
27989 y := v_1
27990 if !(!shiftIsBounded(v)) {
27991 break
27992 }
27993 v.reset(OpAMD64ANDL)
27994 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27995 v0.AddArg2(x, y)
27996 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27997 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27998 v2.AuxInt = int32ToAuxInt(16)
27999 v2.AddArg(y)
28000 v1.AddArg(v2)
28001 v.AddArg2(v0, v1)
28002 return true
28003 }
28004
28005
28006
28007 for {
28008 x := v_0
28009 y := v_1
28010 if !(shiftIsBounded(v)) {
28011 break
28012 }
28013 v.reset(OpAMD64SHRW)
28014 v.AddArg2(x, y)
28015 return true
28016 }
28017 return false
28018 }
28019 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
28020 v_1 := v.Args[1]
28021 v_0 := v.Args[0]
28022 b := v.Block
28023
28024
28025
28026 for {
28027 t := v.Type
28028 x := v_0
28029 y := v_1
28030 if !(!shiftIsBounded(v)) {
28031 break
28032 }
28033 v.reset(OpAMD64ANDL)
28034 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28035 v0.AddArg2(x, y)
28036 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28037 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28038 v2.AuxInt = int8ToAuxInt(16)
28039 v2.AddArg(y)
28040 v1.AddArg(v2)
28041 v.AddArg2(v0, v1)
28042 return true
28043 }
28044
28045
28046
28047 for {
28048 x := v_0
28049 y := v_1
28050 if !(shiftIsBounded(v)) {
28051 break
28052 }
28053 v.reset(OpAMD64SHRW)
28054 v.AddArg2(x, y)
28055 return true
28056 }
28057 return false
28058 }
28059 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28060 v_1 := v.Args[1]
28061 v_0 := v.Args[0]
28062 b := v.Block
28063
28064
28065
28066 for {
28067 t := v.Type
28068 x := v_0
28069 y := v_1
28070 if !(!shiftIsBounded(v)) {
28071 break
28072 }
28073 v.reset(OpAMD64SARW)
28074 v.Type = t
28075 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28076 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28077 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28078 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28079 v3.AuxInt = int16ToAuxInt(16)
28080 v3.AddArg(y)
28081 v2.AddArg(v3)
28082 v1.AddArg(v2)
28083 v0.AddArg2(y, v1)
28084 v.AddArg2(x, v0)
28085 return true
28086 }
28087
28088
28089
28090 for {
28091 x := v_0
28092 y := v_1
28093 if !(shiftIsBounded(v)) {
28094 break
28095 }
28096 v.reset(OpAMD64SARW)
28097 v.AddArg2(x, y)
28098 return true
28099 }
28100 return false
28101 }
28102 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28103 v_1 := v.Args[1]
28104 v_0 := v.Args[0]
28105 b := v.Block
28106
28107
28108
28109 for {
28110 t := v.Type
28111 x := v_0
28112 y := v_1
28113 if !(!shiftIsBounded(v)) {
28114 break
28115 }
28116 v.reset(OpAMD64SARW)
28117 v.Type = t
28118 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28119 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28120 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28121 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28122 v3.AuxInt = int32ToAuxInt(16)
28123 v3.AddArg(y)
28124 v2.AddArg(v3)
28125 v1.AddArg(v2)
28126 v0.AddArg2(y, v1)
28127 v.AddArg2(x, v0)
28128 return true
28129 }
28130
28131
28132
28133 for {
28134 x := v_0
28135 y := v_1
28136 if !(shiftIsBounded(v)) {
28137 break
28138 }
28139 v.reset(OpAMD64SARW)
28140 v.AddArg2(x, y)
28141 return true
28142 }
28143 return false
28144 }
28145 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28146 v_1 := v.Args[1]
28147 v_0 := v.Args[0]
28148 b := v.Block
28149
28150
28151
28152 for {
28153 t := v.Type
28154 x := v_0
28155 y := v_1
28156 if !(!shiftIsBounded(v)) {
28157 break
28158 }
28159 v.reset(OpAMD64SARW)
28160 v.Type = t
28161 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28162 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28163 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28164 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28165 v3.AuxInt = int32ToAuxInt(16)
28166 v3.AddArg(y)
28167 v2.AddArg(v3)
28168 v1.AddArg(v2)
28169 v0.AddArg2(y, v1)
28170 v.AddArg2(x, v0)
28171 return true
28172 }
28173
28174
28175
28176 for {
28177 x := v_0
28178 y := v_1
28179 if !(shiftIsBounded(v)) {
28180 break
28181 }
28182 v.reset(OpAMD64SARW)
28183 v.AddArg2(x, y)
28184 return true
28185 }
28186 return false
28187 }
28188 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28189 v_1 := v.Args[1]
28190 v_0 := v.Args[0]
28191 b := v.Block
28192
28193
28194
28195 for {
28196 t := v.Type
28197 x := v_0
28198 y := v_1
28199 if !(!shiftIsBounded(v)) {
28200 break
28201 }
28202 v.reset(OpAMD64SARW)
28203 v.Type = t
28204 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28205 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28206 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28207 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28208 v3.AuxInt = int8ToAuxInt(16)
28209 v3.AddArg(y)
28210 v2.AddArg(v3)
28211 v1.AddArg(v2)
28212 v0.AddArg2(y, v1)
28213 v.AddArg2(x, v0)
28214 return true
28215 }
28216
28217
28218
28219 for {
28220 x := v_0
28221 y := v_1
28222 if !(shiftIsBounded(v)) {
28223 break
28224 }
28225 v.reset(OpAMD64SARW)
28226 v.AddArg2(x, y)
28227 return true
28228 }
28229 return false
28230 }
28231 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28232 v_1 := v.Args[1]
28233 v_0 := v.Args[0]
28234 b := v.Block
28235
28236
28237
28238 for {
28239 t := v.Type
28240 x := v_0
28241 y := v_1
28242 if !(!shiftIsBounded(v)) {
28243 break
28244 }
28245 v.reset(OpAMD64ANDL)
28246 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28247 v0.AddArg2(x, y)
28248 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28249 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28250 v2.AuxInt = int16ToAuxInt(32)
28251 v2.AddArg(y)
28252 v1.AddArg(v2)
28253 v.AddArg2(v0, v1)
28254 return true
28255 }
28256
28257
28258
28259 for {
28260 x := v_0
28261 y := v_1
28262 if !(shiftIsBounded(v)) {
28263 break
28264 }
28265 v.reset(OpAMD64SHRL)
28266 v.AddArg2(x, y)
28267 return true
28268 }
28269 return false
28270 }
28271 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28272 v_1 := v.Args[1]
28273 v_0 := v.Args[0]
28274 b := v.Block
28275
28276
28277
28278 for {
28279 t := v.Type
28280 x := v_0
28281 y := v_1
28282 if !(!shiftIsBounded(v)) {
28283 break
28284 }
28285 v.reset(OpAMD64ANDL)
28286 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28287 v0.AddArg2(x, y)
28288 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28289 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28290 v2.AuxInt = int32ToAuxInt(32)
28291 v2.AddArg(y)
28292 v1.AddArg(v2)
28293 v.AddArg2(v0, v1)
28294 return true
28295 }
28296
28297
28298
28299 for {
28300 x := v_0
28301 y := v_1
28302 if !(shiftIsBounded(v)) {
28303 break
28304 }
28305 v.reset(OpAMD64SHRL)
28306 v.AddArg2(x, y)
28307 return true
28308 }
28309 return false
28310 }
28311 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28312 v_1 := v.Args[1]
28313 v_0 := v.Args[0]
28314 b := v.Block
28315
28316
28317
28318 for {
28319 t := v.Type
28320 x := v_0
28321 y := v_1
28322 if !(!shiftIsBounded(v)) {
28323 break
28324 }
28325 v.reset(OpAMD64ANDL)
28326 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28327 v0.AddArg2(x, y)
28328 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28329 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28330 v2.AuxInt = int32ToAuxInt(32)
28331 v2.AddArg(y)
28332 v1.AddArg(v2)
28333 v.AddArg2(v0, v1)
28334 return true
28335 }
28336
28337
28338
28339 for {
28340 x := v_0
28341 y := v_1
28342 if !(shiftIsBounded(v)) {
28343 break
28344 }
28345 v.reset(OpAMD64SHRL)
28346 v.AddArg2(x, y)
28347 return true
28348 }
28349 return false
28350 }
28351 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
28352 v_1 := v.Args[1]
28353 v_0 := v.Args[0]
28354 b := v.Block
28355
28356
28357
28358 for {
28359 t := v.Type
28360 x := v_0
28361 y := v_1
28362 if !(!shiftIsBounded(v)) {
28363 break
28364 }
28365 v.reset(OpAMD64ANDL)
28366 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28367 v0.AddArg2(x, y)
28368 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28369 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28370 v2.AuxInt = int8ToAuxInt(32)
28371 v2.AddArg(y)
28372 v1.AddArg(v2)
28373 v.AddArg2(v0, v1)
28374 return true
28375 }
28376
28377
28378
28379 for {
28380 x := v_0
28381 y := v_1
28382 if !(shiftIsBounded(v)) {
28383 break
28384 }
28385 v.reset(OpAMD64SHRL)
28386 v.AddArg2(x, y)
28387 return true
28388 }
28389 return false
28390 }
28391 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
28392 v_1 := v.Args[1]
28393 v_0 := v.Args[0]
28394 b := v.Block
28395
28396
28397
28398 for {
28399 t := v.Type
28400 x := v_0
28401 y := v_1
28402 if !(!shiftIsBounded(v)) {
28403 break
28404 }
28405 v.reset(OpAMD64SARL)
28406 v.Type = t
28407 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28408 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28409 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28410 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28411 v3.AuxInt = int16ToAuxInt(32)
28412 v3.AddArg(y)
28413 v2.AddArg(v3)
28414 v1.AddArg(v2)
28415 v0.AddArg2(y, v1)
28416 v.AddArg2(x, v0)
28417 return true
28418 }
28419
28420
28421
28422 for {
28423 x := v_0
28424 y := v_1
28425 if !(shiftIsBounded(v)) {
28426 break
28427 }
28428 v.reset(OpAMD64SARL)
28429 v.AddArg2(x, y)
28430 return true
28431 }
28432 return false
28433 }
28434 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
28435 v_1 := v.Args[1]
28436 v_0 := v.Args[0]
28437 b := v.Block
28438
28439
28440
28441 for {
28442 t := v.Type
28443 x := v_0
28444 y := v_1
28445 if !(!shiftIsBounded(v)) {
28446 break
28447 }
28448 v.reset(OpAMD64SARL)
28449 v.Type = t
28450 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28451 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28452 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28453 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28454 v3.AuxInt = int32ToAuxInt(32)
28455 v3.AddArg(y)
28456 v2.AddArg(v3)
28457 v1.AddArg(v2)
28458 v0.AddArg2(y, v1)
28459 v.AddArg2(x, v0)
28460 return true
28461 }
28462
28463
28464
28465 for {
28466 x := v_0
28467 y := v_1
28468 if !(shiftIsBounded(v)) {
28469 break
28470 }
28471 v.reset(OpAMD64SARL)
28472 v.AddArg2(x, y)
28473 return true
28474 }
28475 return false
28476 }
28477 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
28478 v_1 := v.Args[1]
28479 v_0 := v.Args[0]
28480 b := v.Block
28481
28482
28483
28484 for {
28485 t := v.Type
28486 x := v_0
28487 y := v_1
28488 if !(!shiftIsBounded(v)) {
28489 break
28490 }
28491 v.reset(OpAMD64SARL)
28492 v.Type = t
28493 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28494 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28495 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28496 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28497 v3.AuxInt = int32ToAuxInt(32)
28498 v3.AddArg(y)
28499 v2.AddArg(v3)
28500 v1.AddArg(v2)
28501 v0.AddArg2(y, v1)
28502 v.AddArg2(x, v0)
28503 return true
28504 }
28505
28506
28507
28508 for {
28509 x := v_0
28510 y := v_1
28511 if !(shiftIsBounded(v)) {
28512 break
28513 }
28514 v.reset(OpAMD64SARL)
28515 v.AddArg2(x, y)
28516 return true
28517 }
28518 return false
28519 }
28520 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
28521 v_1 := v.Args[1]
28522 v_0 := v.Args[0]
28523 b := v.Block
28524
28525
28526
28527 for {
28528 t := v.Type
28529 x := v_0
28530 y := v_1
28531 if !(!shiftIsBounded(v)) {
28532 break
28533 }
28534 v.reset(OpAMD64SARL)
28535 v.Type = t
28536 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28537 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28538 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28539 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28540 v3.AuxInt = int8ToAuxInt(32)
28541 v3.AddArg(y)
28542 v2.AddArg(v3)
28543 v1.AddArg(v2)
28544 v0.AddArg2(y, v1)
28545 v.AddArg2(x, v0)
28546 return true
28547 }
28548
28549
28550
28551 for {
28552 x := v_0
28553 y := v_1
28554 if !(shiftIsBounded(v)) {
28555 break
28556 }
28557 v.reset(OpAMD64SARL)
28558 v.AddArg2(x, y)
28559 return true
28560 }
28561 return false
28562 }
28563 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
28564 v_1 := v.Args[1]
28565 v_0 := v.Args[0]
28566 b := v.Block
28567
28568
28569
28570 for {
28571 t := v.Type
28572 x := v_0
28573 y := v_1
28574 if !(!shiftIsBounded(v)) {
28575 break
28576 }
28577 v.reset(OpAMD64ANDQ)
28578 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28579 v0.AddArg2(x, y)
28580 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28581 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28582 v2.AuxInt = int16ToAuxInt(64)
28583 v2.AddArg(y)
28584 v1.AddArg(v2)
28585 v.AddArg2(v0, v1)
28586 return true
28587 }
28588
28589
28590
28591 for {
28592 x := v_0
28593 y := v_1
28594 if !(shiftIsBounded(v)) {
28595 break
28596 }
28597 v.reset(OpAMD64SHRQ)
28598 v.AddArg2(x, y)
28599 return true
28600 }
28601 return false
28602 }
28603 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
28604 v_1 := v.Args[1]
28605 v_0 := v.Args[0]
28606 b := v.Block
28607
28608
28609
28610 for {
28611 t := v.Type
28612 x := v_0
28613 y := v_1
28614 if !(!shiftIsBounded(v)) {
28615 break
28616 }
28617 v.reset(OpAMD64ANDQ)
28618 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28619 v0.AddArg2(x, y)
28620 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28621 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28622 v2.AuxInt = int32ToAuxInt(64)
28623 v2.AddArg(y)
28624 v1.AddArg(v2)
28625 v.AddArg2(v0, v1)
28626 return true
28627 }
28628
28629
28630
28631 for {
28632 x := v_0
28633 y := v_1
28634 if !(shiftIsBounded(v)) {
28635 break
28636 }
28637 v.reset(OpAMD64SHRQ)
28638 v.AddArg2(x, y)
28639 return true
28640 }
28641 return false
28642 }
28643 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
28644 v_1 := v.Args[1]
28645 v_0 := v.Args[0]
28646 b := v.Block
28647
28648
28649
28650 for {
28651 t := v.Type
28652 x := v_0
28653 y := v_1
28654 if !(!shiftIsBounded(v)) {
28655 break
28656 }
28657 v.reset(OpAMD64ANDQ)
28658 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28659 v0.AddArg2(x, y)
28660 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28661 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28662 v2.AuxInt = int32ToAuxInt(64)
28663 v2.AddArg(y)
28664 v1.AddArg(v2)
28665 v.AddArg2(v0, v1)
28666 return true
28667 }
28668
28669
28670
28671 for {
28672 x := v_0
28673 y := v_1
28674 if !(shiftIsBounded(v)) {
28675 break
28676 }
28677 v.reset(OpAMD64SHRQ)
28678 v.AddArg2(x, y)
28679 return true
28680 }
28681 return false
28682 }
28683 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
28684 v_1 := v.Args[1]
28685 v_0 := v.Args[0]
28686 b := v.Block
28687
28688
28689
28690 for {
28691 t := v.Type
28692 x := v_0
28693 y := v_1
28694 if !(!shiftIsBounded(v)) {
28695 break
28696 }
28697 v.reset(OpAMD64ANDQ)
28698 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28699 v0.AddArg2(x, y)
28700 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28701 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28702 v2.AuxInt = int8ToAuxInt(64)
28703 v2.AddArg(y)
28704 v1.AddArg(v2)
28705 v.AddArg2(v0, v1)
28706 return true
28707 }
28708
28709
28710
28711 for {
28712 x := v_0
28713 y := v_1
28714 if !(shiftIsBounded(v)) {
28715 break
28716 }
28717 v.reset(OpAMD64SHRQ)
28718 v.AddArg2(x, y)
28719 return true
28720 }
28721 return false
28722 }
28723 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
28724 v_1 := v.Args[1]
28725 v_0 := v.Args[0]
28726 b := v.Block
28727
28728
28729
28730 for {
28731 t := v.Type
28732 x := v_0
28733 y := v_1
28734 if !(!shiftIsBounded(v)) {
28735 break
28736 }
28737 v.reset(OpAMD64SARQ)
28738 v.Type = t
28739 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28740 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28741 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28742 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28743 v3.AuxInt = int16ToAuxInt(64)
28744 v3.AddArg(y)
28745 v2.AddArg(v3)
28746 v1.AddArg(v2)
28747 v0.AddArg2(y, v1)
28748 v.AddArg2(x, v0)
28749 return true
28750 }
28751
28752
28753
28754 for {
28755 x := v_0
28756 y := v_1
28757 if !(shiftIsBounded(v)) {
28758 break
28759 }
28760 v.reset(OpAMD64SARQ)
28761 v.AddArg2(x, y)
28762 return true
28763 }
28764 return false
28765 }
28766 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
28767 v_1 := v.Args[1]
28768 v_0 := v.Args[0]
28769 b := v.Block
28770
28771
28772
28773 for {
28774 t := v.Type
28775 x := v_0
28776 y := v_1
28777 if !(!shiftIsBounded(v)) {
28778 break
28779 }
28780 v.reset(OpAMD64SARQ)
28781 v.Type = t
28782 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28783 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28784 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28785 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28786 v3.AuxInt = int32ToAuxInt(64)
28787 v3.AddArg(y)
28788 v2.AddArg(v3)
28789 v1.AddArg(v2)
28790 v0.AddArg2(y, v1)
28791 v.AddArg2(x, v0)
28792 return true
28793 }
28794
28795
28796
28797 for {
28798 x := v_0
28799 y := v_1
28800 if !(shiftIsBounded(v)) {
28801 break
28802 }
28803 v.reset(OpAMD64SARQ)
28804 v.AddArg2(x, y)
28805 return true
28806 }
28807 return false
28808 }
28809 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
28810 v_1 := v.Args[1]
28811 v_0 := v.Args[0]
28812 b := v.Block
28813
28814
28815
28816 for {
28817 t := v.Type
28818 x := v_0
28819 y := v_1
28820 if !(!shiftIsBounded(v)) {
28821 break
28822 }
28823 v.reset(OpAMD64SARQ)
28824 v.Type = t
28825 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28826 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28827 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28828 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28829 v3.AuxInt = int32ToAuxInt(64)
28830 v3.AddArg(y)
28831 v2.AddArg(v3)
28832 v1.AddArg(v2)
28833 v0.AddArg2(y, v1)
28834 v.AddArg2(x, v0)
28835 return true
28836 }
28837
28838
28839
28840 for {
28841 x := v_0
28842 y := v_1
28843 if !(shiftIsBounded(v)) {
28844 break
28845 }
28846 v.reset(OpAMD64SARQ)
28847 v.AddArg2(x, y)
28848 return true
28849 }
28850 return false
28851 }
28852 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
28853 v_1 := v.Args[1]
28854 v_0 := v.Args[0]
28855 b := v.Block
28856
28857
28858
28859 for {
28860 t := v.Type
28861 x := v_0
28862 y := v_1
28863 if !(!shiftIsBounded(v)) {
28864 break
28865 }
28866 v.reset(OpAMD64SARQ)
28867 v.Type = t
28868 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28869 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28870 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28871 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28872 v3.AuxInt = int8ToAuxInt(64)
28873 v3.AddArg(y)
28874 v2.AddArg(v3)
28875 v1.AddArg(v2)
28876 v0.AddArg2(y, v1)
28877 v.AddArg2(x, v0)
28878 return true
28879 }
28880
28881
28882
28883 for {
28884 x := v_0
28885 y := v_1
28886 if !(shiftIsBounded(v)) {
28887 break
28888 }
28889 v.reset(OpAMD64SARQ)
28890 v.AddArg2(x, y)
28891 return true
28892 }
28893 return false
28894 }
28895 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
28896 v_1 := v.Args[1]
28897 v_0 := v.Args[0]
28898 b := v.Block
28899
28900
28901
28902 for {
28903 t := v.Type
28904 x := v_0
28905 y := v_1
28906 if !(!shiftIsBounded(v)) {
28907 break
28908 }
28909 v.reset(OpAMD64ANDL)
28910 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28911 v0.AddArg2(x, y)
28912 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28913 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28914 v2.AuxInt = int16ToAuxInt(8)
28915 v2.AddArg(y)
28916 v1.AddArg(v2)
28917 v.AddArg2(v0, v1)
28918 return true
28919 }
28920
28921
28922
28923 for {
28924 x := v_0
28925 y := v_1
28926 if !(shiftIsBounded(v)) {
28927 break
28928 }
28929 v.reset(OpAMD64SHRB)
28930 v.AddArg2(x, y)
28931 return true
28932 }
28933 return false
28934 }
28935 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
28936 v_1 := v.Args[1]
28937 v_0 := v.Args[0]
28938 b := v.Block
28939
28940
28941
28942 for {
28943 t := v.Type
28944 x := v_0
28945 y := v_1
28946 if !(!shiftIsBounded(v)) {
28947 break
28948 }
28949 v.reset(OpAMD64ANDL)
28950 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28951 v0.AddArg2(x, y)
28952 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28953 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28954 v2.AuxInt = int32ToAuxInt(8)
28955 v2.AddArg(y)
28956 v1.AddArg(v2)
28957 v.AddArg2(v0, v1)
28958 return true
28959 }
28960
28961
28962
28963 for {
28964 x := v_0
28965 y := v_1
28966 if !(shiftIsBounded(v)) {
28967 break
28968 }
28969 v.reset(OpAMD64SHRB)
28970 v.AddArg2(x, y)
28971 return true
28972 }
28973 return false
28974 }
28975 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
28976 v_1 := v.Args[1]
28977 v_0 := v.Args[0]
28978 b := v.Block
28979
28980
28981
28982 for {
28983 t := v.Type
28984 x := v_0
28985 y := v_1
28986 if !(!shiftIsBounded(v)) {
28987 break
28988 }
28989 v.reset(OpAMD64ANDL)
28990 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28991 v0.AddArg2(x, y)
28992 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28993 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28994 v2.AuxInt = int32ToAuxInt(8)
28995 v2.AddArg(y)
28996 v1.AddArg(v2)
28997 v.AddArg2(v0, v1)
28998 return true
28999 }
29000
29001
29002
29003 for {
29004 x := v_0
29005 y := v_1
29006 if !(shiftIsBounded(v)) {
29007 break
29008 }
29009 v.reset(OpAMD64SHRB)
29010 v.AddArg2(x, y)
29011 return true
29012 }
29013 return false
29014 }
29015 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
29016 v_1 := v.Args[1]
29017 v_0 := v.Args[0]
29018 b := v.Block
29019
29020
29021
29022 for {
29023 t := v.Type
29024 x := v_0
29025 y := v_1
29026 if !(!shiftIsBounded(v)) {
29027 break
29028 }
29029 v.reset(OpAMD64ANDL)
29030 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29031 v0.AddArg2(x, y)
29032 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29033 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29034 v2.AuxInt = int8ToAuxInt(8)
29035 v2.AddArg(y)
29036 v1.AddArg(v2)
29037 v.AddArg2(v0, v1)
29038 return true
29039 }
29040
29041
29042
29043 for {
29044 x := v_0
29045 y := v_1
29046 if !(shiftIsBounded(v)) {
29047 break
29048 }
29049 v.reset(OpAMD64SHRB)
29050 v.AddArg2(x, y)
29051 return true
29052 }
29053 return false
29054 }
29055 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29056 v_1 := v.Args[1]
29057 v_0 := v.Args[0]
29058 b := v.Block
29059
29060
29061
29062 for {
29063 t := v.Type
29064 x := v_0
29065 y := v_1
29066 if !(!shiftIsBounded(v)) {
29067 break
29068 }
29069 v.reset(OpAMD64SARB)
29070 v.Type = t
29071 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29072 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29073 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29074 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29075 v3.AuxInt = int16ToAuxInt(8)
29076 v3.AddArg(y)
29077 v2.AddArg(v3)
29078 v1.AddArg(v2)
29079 v0.AddArg2(y, v1)
29080 v.AddArg2(x, v0)
29081 return true
29082 }
29083
29084
29085
29086 for {
29087 x := v_0
29088 y := v_1
29089 if !(shiftIsBounded(v)) {
29090 break
29091 }
29092 v.reset(OpAMD64SARB)
29093 v.AddArg2(x, y)
29094 return true
29095 }
29096 return false
29097 }
29098 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29099 v_1 := v.Args[1]
29100 v_0 := v.Args[0]
29101 b := v.Block
29102
29103
29104
29105 for {
29106 t := v.Type
29107 x := v_0
29108 y := v_1
29109 if !(!shiftIsBounded(v)) {
29110 break
29111 }
29112 v.reset(OpAMD64SARB)
29113 v.Type = t
29114 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29115 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29116 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29117 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29118 v3.AuxInt = int32ToAuxInt(8)
29119 v3.AddArg(y)
29120 v2.AddArg(v3)
29121 v1.AddArg(v2)
29122 v0.AddArg2(y, v1)
29123 v.AddArg2(x, v0)
29124 return true
29125 }
29126
29127
29128
29129 for {
29130 x := v_0
29131 y := v_1
29132 if !(shiftIsBounded(v)) {
29133 break
29134 }
29135 v.reset(OpAMD64SARB)
29136 v.AddArg2(x, y)
29137 return true
29138 }
29139 return false
29140 }
29141 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29142 v_1 := v.Args[1]
29143 v_0 := v.Args[0]
29144 b := v.Block
29145
29146
29147
29148 for {
29149 t := v.Type
29150 x := v_0
29151 y := v_1
29152 if !(!shiftIsBounded(v)) {
29153 break
29154 }
29155 v.reset(OpAMD64SARB)
29156 v.Type = t
29157 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29158 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29159 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29160 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29161 v3.AuxInt = int32ToAuxInt(8)
29162 v3.AddArg(y)
29163 v2.AddArg(v3)
29164 v1.AddArg(v2)
29165 v0.AddArg2(y, v1)
29166 v.AddArg2(x, v0)
29167 return true
29168 }
29169
29170
29171
29172 for {
29173 x := v_0
29174 y := v_1
29175 if !(shiftIsBounded(v)) {
29176 break
29177 }
29178 v.reset(OpAMD64SARB)
29179 v.AddArg2(x, y)
29180 return true
29181 }
29182 return false
29183 }
29184 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29185 v_1 := v.Args[1]
29186 v_0 := v.Args[0]
29187 b := v.Block
29188
29189
29190
29191 for {
29192 t := v.Type
29193 x := v_0
29194 y := v_1
29195 if !(!shiftIsBounded(v)) {
29196 break
29197 }
29198 v.reset(OpAMD64SARB)
29199 v.Type = t
29200 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29201 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29202 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29203 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29204 v3.AuxInt = int8ToAuxInt(8)
29205 v3.AddArg(y)
29206 v2.AddArg(v3)
29207 v1.AddArg(v2)
29208 v0.AddArg2(y, v1)
29209 v.AddArg2(x, v0)
29210 return true
29211 }
29212
29213
29214
29215 for {
29216 x := v_0
29217 y := v_1
29218 if !(shiftIsBounded(v)) {
29219 break
29220 }
29221 v.reset(OpAMD64SARB)
29222 v.AddArg2(x, y)
29223 return true
29224 }
29225 return false
29226 }
29227 func rewriteValueAMD64_OpSelect0(v *Value) bool {
29228 v_0 := v.Args[0]
29229 b := v.Block
29230 typ := &b.Func.Config.Types
29231
29232
29233 for {
29234 if v_0.Op != OpMul64uover {
29235 break
29236 }
29237 y := v_0.Args[1]
29238 x := v_0.Args[0]
29239 v.reset(OpSelect0)
29240 v.Type = typ.UInt64
29241 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29242 v0.AddArg2(x, y)
29243 v.AddArg(v0)
29244 return true
29245 }
29246
29247
29248 for {
29249 if v_0.Op != OpMul32uover {
29250 break
29251 }
29252 y := v_0.Args[1]
29253 x := v_0.Args[0]
29254 v.reset(OpSelect0)
29255 v.Type = typ.UInt32
29256 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29257 v0.AddArg2(x, y)
29258 v.AddArg(v0)
29259 return true
29260 }
29261
29262
29263 for {
29264 if v_0.Op != OpAdd64carry {
29265 break
29266 }
29267 c := v_0.Args[2]
29268 x := v_0.Args[0]
29269 y := v_0.Args[1]
29270 v.reset(OpSelect0)
29271 v.Type = typ.UInt64
29272 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29273 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29274 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29275 v2.AddArg(c)
29276 v1.AddArg(v2)
29277 v0.AddArg3(x, y, v1)
29278 v.AddArg(v0)
29279 return true
29280 }
29281
29282
29283 for {
29284 if v_0.Op != OpSub64borrow {
29285 break
29286 }
29287 c := v_0.Args[2]
29288 x := v_0.Args[0]
29289 y := v_0.Args[1]
29290 v.reset(OpSelect0)
29291 v.Type = typ.UInt64
29292 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29293 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29294 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29295 v2.AddArg(c)
29296 v1.AddArg(v2)
29297 v0.AddArg3(x, y, v1)
29298 v.AddArg(v0)
29299 return true
29300 }
29301
29302
29303 for {
29304 t := v.Type
29305 if v_0.Op != OpAMD64AddTupleFirst32 {
29306 break
29307 }
29308 tuple := v_0.Args[1]
29309 val := v_0.Args[0]
29310 v.reset(OpAMD64ADDL)
29311 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29312 v0.AddArg(tuple)
29313 v.AddArg2(val, v0)
29314 return true
29315 }
29316
29317
29318 for {
29319 t := v.Type
29320 if v_0.Op != OpAMD64AddTupleFirst64 {
29321 break
29322 }
29323 tuple := v_0.Args[1]
29324 val := v_0.Args[0]
29325 v.reset(OpAMD64ADDQ)
29326 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29327 v0.AddArg(tuple)
29328 v.AddArg2(val, v0)
29329 return true
29330 }
29331
29332
29333
29334 for {
29335 a := v_0
29336 if a.Op != OpAMD64ADDQconstflags {
29337 break
29338 }
29339 c := auxIntToInt32(a.AuxInt)
29340 x := a.Args[0]
29341 if !(a.Uses == 1) {
29342 break
29343 }
29344 v.reset(OpAMD64ADDQconst)
29345 v.AuxInt = int32ToAuxInt(c)
29346 v.AddArg(x)
29347 return true
29348 }
29349
29350
29351
29352 for {
29353 a := v_0
29354 if a.Op != OpAMD64ADDLconstflags {
29355 break
29356 }
29357 c := auxIntToInt32(a.AuxInt)
29358 x := a.Args[0]
29359 if !(a.Uses == 1) {
29360 break
29361 }
29362 v.reset(OpAMD64ADDLconst)
29363 v.AuxInt = int32ToAuxInt(c)
29364 v.AddArg(x)
29365 return true
29366 }
29367 return false
29368 }
29369 func rewriteValueAMD64_OpSelect1(v *Value) bool {
29370 v_0 := v.Args[0]
29371 b := v.Block
29372 typ := &b.Func.Config.Types
29373
29374
29375 for {
29376 if v_0.Op != OpMul64uover {
29377 break
29378 }
29379 y := v_0.Args[1]
29380 x := v_0.Args[0]
29381 v.reset(OpAMD64SETO)
29382 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29383 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29384 v1.AddArg2(x, y)
29385 v0.AddArg(v1)
29386 v.AddArg(v0)
29387 return true
29388 }
29389
29390
29391 for {
29392 if v_0.Op != OpMul32uover {
29393 break
29394 }
29395 y := v_0.Args[1]
29396 x := v_0.Args[0]
29397 v.reset(OpAMD64SETO)
29398 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29399 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29400 v1.AddArg2(x, y)
29401 v0.AddArg(v1)
29402 v.AddArg(v0)
29403 return true
29404 }
29405
29406
29407 for {
29408 if v_0.Op != OpAdd64carry {
29409 break
29410 }
29411 c := v_0.Args[2]
29412 x := v_0.Args[0]
29413 y := v_0.Args[1]
29414 v.reset(OpAMD64NEGQ)
29415 v.Type = typ.UInt64
29416 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29417 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29418 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29419 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29420 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29421 v4.AddArg(c)
29422 v3.AddArg(v4)
29423 v2.AddArg3(x, y, v3)
29424 v1.AddArg(v2)
29425 v0.AddArg(v1)
29426 v.AddArg(v0)
29427 return true
29428 }
29429
29430
29431 for {
29432 if v_0.Op != OpSub64borrow {
29433 break
29434 }
29435 c := v_0.Args[2]
29436 x := v_0.Args[0]
29437 y := v_0.Args[1]
29438 v.reset(OpAMD64NEGQ)
29439 v.Type = typ.UInt64
29440 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29441 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29442 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29443 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29444 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29445 v4.AddArg(c)
29446 v3.AddArg(v4)
29447 v2.AddArg3(x, y, v3)
29448 v1.AddArg(v2)
29449 v0.AddArg(v1)
29450 v.AddArg(v0)
29451 return true
29452 }
29453
29454
29455 for {
29456 if v_0.Op != OpAMD64NEGLflags {
29457 break
29458 }
29459 v_0_0 := v_0.Args[0]
29460 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
29461 break
29462 }
29463 v.reset(OpAMD64FlagEQ)
29464 return true
29465 }
29466
29467
29468 for {
29469 if v_0.Op != OpAMD64NEGLflags {
29470 break
29471 }
29472 v_0_0 := v_0.Args[0]
29473 if v_0_0.Op != OpAMD64NEGQ {
29474 break
29475 }
29476 v_0_0_0 := v_0_0.Args[0]
29477 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
29478 break
29479 }
29480 x := v_0_0_0.Args[0]
29481 v.copyOf(x)
29482 return true
29483 }
29484
29485
29486 for {
29487 if v_0.Op != OpAMD64AddTupleFirst32 {
29488 break
29489 }
29490 tuple := v_0.Args[1]
29491 v.reset(OpSelect1)
29492 v.AddArg(tuple)
29493 return true
29494 }
29495
29496
29497 for {
29498 if v_0.Op != OpAMD64AddTupleFirst64 {
29499 break
29500 }
29501 tuple := v_0.Args[1]
29502 v.reset(OpSelect1)
29503 v.AddArg(tuple)
29504 return true
29505 }
29506
29507
29508
29509 for {
29510 a := v_0
29511 if a.Op != OpAMD64LoweredAtomicAnd64 {
29512 break
29513 }
29514 mem := a.Args[2]
29515 ptr := a.Args[0]
29516 val := a.Args[1]
29517 if !(a.Uses == 1 && clobber(a)) {
29518 break
29519 }
29520 v.reset(OpAMD64ANDQlock)
29521 v.AddArg3(ptr, val, mem)
29522 return true
29523 }
29524
29525
29526
29527 for {
29528 a := v_0
29529 if a.Op != OpAMD64LoweredAtomicAnd32 {
29530 break
29531 }
29532 mem := a.Args[2]
29533 ptr := a.Args[0]
29534 val := a.Args[1]
29535 if !(a.Uses == 1 && clobber(a)) {
29536 break
29537 }
29538 v.reset(OpAMD64ANDLlock)
29539 v.AddArg3(ptr, val, mem)
29540 return true
29541 }
29542
29543
29544
29545 for {
29546 a := v_0
29547 if a.Op != OpAMD64LoweredAtomicOr64 {
29548 break
29549 }
29550 mem := a.Args[2]
29551 ptr := a.Args[0]
29552 val := a.Args[1]
29553 if !(a.Uses == 1 && clobber(a)) {
29554 break
29555 }
29556 v.reset(OpAMD64ORQlock)
29557 v.AddArg3(ptr, val, mem)
29558 return true
29559 }
29560
29561
29562
29563 for {
29564 a := v_0
29565 if a.Op != OpAMD64LoweredAtomicOr32 {
29566 break
29567 }
29568 mem := a.Args[2]
29569 ptr := a.Args[0]
29570 val := a.Args[1]
29571 if !(a.Uses == 1 && clobber(a)) {
29572 break
29573 }
29574 v.reset(OpAMD64ORLlock)
29575 v.AddArg3(ptr, val, mem)
29576 return true
29577 }
29578 return false
29579 }
29580 func rewriteValueAMD64_OpSelectN(v *Value) bool {
29581 v_0 := v.Args[0]
29582 b := v.Block
29583 config := b.Func.Config
29584
29585
29586
29587 for {
29588 if auxIntToInt64(v.AuxInt) != 0 {
29589 break
29590 }
29591 call := v_0
29592 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
29593 break
29594 }
29595 sym := auxToCall(call.Aux)
29596 s1 := call.Args[0]
29597 if s1.Op != OpAMD64MOVQstoreconst {
29598 break
29599 }
29600 sc := auxIntToValAndOff(s1.AuxInt)
29601 _ = s1.Args[1]
29602 s2 := s1.Args[1]
29603 if s2.Op != OpAMD64MOVQstore {
29604 break
29605 }
29606 _ = s2.Args[2]
29607 src := s2.Args[1]
29608 s3 := s2.Args[2]
29609 if s3.Op != OpAMD64MOVQstore {
29610 break
29611 }
29612 mem := s3.Args[2]
29613 dst := s3.Args[1]
29614 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
29615 break
29616 }
29617 v.reset(OpMove)
29618 v.AuxInt = int64ToAuxInt(sc.Val64())
29619 v.AddArg3(dst, src, mem)
29620 return true
29621 }
29622
29623
29624
29625 for {
29626 if auxIntToInt64(v.AuxInt) != 0 {
29627 break
29628 }
29629 call := v_0
29630 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
29631 break
29632 }
29633 sym := auxToCall(call.Aux)
29634 mem := call.Args[3]
29635 dst := call.Args[0]
29636 src := call.Args[1]
29637 call_2 := call.Args[2]
29638 if call_2.Op != OpAMD64MOVQconst {
29639 break
29640 }
29641 sz := auxIntToInt64(call_2.AuxInt)
29642 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
29643 break
29644 }
29645 v.reset(OpMove)
29646 v.AuxInt = int64ToAuxInt(sz)
29647 v.AddArg3(dst, src, mem)
29648 return true
29649 }
29650 return false
29651 }
29652 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
29653 v_0 := v.Args[0]
29654 b := v.Block
29655
29656
29657 for {
29658 t := v.Type
29659 x := v_0
29660 v.reset(OpAMD64SARQconst)
29661 v.AuxInt = int8ToAuxInt(63)
29662 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
29663 v0.AddArg(x)
29664 v.AddArg(v0)
29665 return true
29666 }
29667 }
29668 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
29669 v_1 := v.Args[1]
29670 v_0 := v.Args[0]
29671 b := v.Block
29672 typ := &b.Func.Config.Types
29673
29674
29675 for {
29676 x := v_0
29677 y := v_1
29678 v.reset(OpAMD64CMOVQCC)
29679 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29680 v0.AuxInt = int64ToAuxInt(0)
29681 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29682 v1.AddArg2(x, y)
29683 v.AddArg3(x, v0, v1)
29684 return true
29685 }
29686 }
29687 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
29688 v_1 := v.Args[1]
29689 v_0 := v.Args[0]
29690 b := v.Block
29691 typ := &b.Func.Config.Types
29692
29693
29694 for {
29695 x := v_0
29696 y := v_1
29697 v.reset(OpAMD64CMOVQHI)
29698 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29699 v0.AuxInt = int64ToAuxInt(0)
29700 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29701 v1.AddArg2(x, y)
29702 v.AddArg3(x, v0, v1)
29703 return true
29704 }
29705 }
29706 func rewriteValueAMD64_OpStore(v *Value) bool {
29707 v_2 := v.Args[2]
29708 v_1 := v.Args[1]
29709 v_0 := v.Args[0]
29710
29711
29712
29713 for {
29714 t := auxToType(v.Aux)
29715 ptr := v_0
29716 val := v_1
29717 mem := v_2
29718 if !(t.Size() == 8 && t.IsFloat()) {
29719 break
29720 }
29721 v.reset(OpAMD64MOVSDstore)
29722 v.AddArg3(ptr, val, mem)
29723 return true
29724 }
29725
29726
29727
29728 for {
29729 t := auxToType(v.Aux)
29730 ptr := v_0
29731 val := v_1
29732 mem := v_2
29733 if !(t.Size() == 4 && t.IsFloat()) {
29734 break
29735 }
29736 v.reset(OpAMD64MOVSSstore)
29737 v.AddArg3(ptr, val, mem)
29738 return true
29739 }
29740
29741
29742
29743 for {
29744 t := auxToType(v.Aux)
29745 ptr := v_0
29746 val := v_1
29747 mem := v_2
29748 if !(t.Size() == 8 && !t.IsFloat()) {
29749 break
29750 }
29751 v.reset(OpAMD64MOVQstore)
29752 v.AddArg3(ptr, val, mem)
29753 return true
29754 }
29755
29756
29757
29758 for {
29759 t := auxToType(v.Aux)
29760 ptr := v_0
29761 val := v_1
29762 mem := v_2
29763 if !(t.Size() == 4 && !t.IsFloat()) {
29764 break
29765 }
29766 v.reset(OpAMD64MOVLstore)
29767 v.AddArg3(ptr, val, mem)
29768 return true
29769 }
29770
29771
29772
29773 for {
29774 t := auxToType(v.Aux)
29775 ptr := v_0
29776 val := v_1
29777 mem := v_2
29778 if !(t.Size() == 2) {
29779 break
29780 }
29781 v.reset(OpAMD64MOVWstore)
29782 v.AddArg3(ptr, val, mem)
29783 return true
29784 }
29785
29786
29787
29788 for {
29789 t := auxToType(v.Aux)
29790 ptr := v_0
29791 val := v_1
29792 mem := v_2
29793 if !(t.Size() == 1) {
29794 break
29795 }
29796 v.reset(OpAMD64MOVBstore)
29797 v.AddArg3(ptr, val, mem)
29798 return true
29799 }
29800 return false
29801 }
29802 func rewriteValueAMD64_OpTrunc(v *Value) bool {
29803 v_0 := v.Args[0]
29804
29805
29806 for {
29807 x := v_0
29808 v.reset(OpAMD64ROUNDSD)
29809 v.AuxInt = int8ToAuxInt(3)
29810 v.AddArg(x)
29811 return true
29812 }
29813 }
29814 func rewriteValueAMD64_OpZero(v *Value) bool {
29815 v_1 := v.Args[1]
29816 v_0 := v.Args[0]
29817 b := v.Block
29818 typ := &b.Func.Config.Types
29819
29820
29821 for {
29822 if auxIntToInt64(v.AuxInt) != 0 {
29823 break
29824 }
29825 mem := v_1
29826 v.copyOf(mem)
29827 return true
29828 }
29829
29830
29831 for {
29832 if auxIntToInt64(v.AuxInt) != 1 {
29833 break
29834 }
29835 destptr := v_0
29836 mem := v_1
29837 v.reset(OpAMD64MOVBstoreconst)
29838 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29839 v.AddArg2(destptr, mem)
29840 return true
29841 }
29842
29843
29844 for {
29845 if auxIntToInt64(v.AuxInt) != 2 {
29846 break
29847 }
29848 destptr := v_0
29849 mem := v_1
29850 v.reset(OpAMD64MOVWstoreconst)
29851 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29852 v.AddArg2(destptr, mem)
29853 return true
29854 }
29855
29856
29857 for {
29858 if auxIntToInt64(v.AuxInt) != 4 {
29859 break
29860 }
29861 destptr := v_0
29862 mem := v_1
29863 v.reset(OpAMD64MOVLstoreconst)
29864 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29865 v.AddArg2(destptr, mem)
29866 return true
29867 }
29868
29869
29870 for {
29871 if auxIntToInt64(v.AuxInt) != 8 {
29872 break
29873 }
29874 destptr := v_0
29875 mem := v_1
29876 v.reset(OpAMD64MOVQstoreconst)
29877 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29878 v.AddArg2(destptr, mem)
29879 return true
29880 }
29881
29882
29883 for {
29884 if auxIntToInt64(v.AuxInt) != 3 {
29885 break
29886 }
29887 destptr := v_0
29888 mem := v_1
29889 v.reset(OpAMD64MOVBstoreconst)
29890 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
29891 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
29892 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29893 v0.AddArg2(destptr, mem)
29894 v.AddArg2(destptr, v0)
29895 return true
29896 }
29897
29898
29899 for {
29900 if auxIntToInt64(v.AuxInt) != 5 {
29901 break
29902 }
29903 destptr := v_0
29904 mem := v_1
29905 v.reset(OpAMD64MOVBstoreconst)
29906 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29907 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29908 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29909 v0.AddArg2(destptr, mem)
29910 v.AddArg2(destptr, v0)
29911 return true
29912 }
29913
29914
29915 for {
29916 if auxIntToInt64(v.AuxInt) != 6 {
29917 break
29918 }
29919 destptr := v_0
29920 mem := v_1
29921 v.reset(OpAMD64MOVWstoreconst)
29922 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29923 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29924 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29925 v0.AddArg2(destptr, mem)
29926 v.AddArg2(destptr, v0)
29927 return true
29928 }
29929
29930
29931 for {
29932 if auxIntToInt64(v.AuxInt) != 7 {
29933 break
29934 }
29935 destptr := v_0
29936 mem := v_1
29937 v.reset(OpAMD64MOVLstoreconst)
29938 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
29939 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29940 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29941 v0.AddArg2(destptr, mem)
29942 v.AddArg2(destptr, v0)
29943 return true
29944 }
29945
29946
29947 for {
29948 if auxIntToInt64(v.AuxInt) != 9 {
29949 break
29950 }
29951 destptr := v_0
29952 mem := v_1
29953 v.reset(OpAMD64MOVBstoreconst)
29954 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29955 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29956 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29957 v0.AddArg2(destptr, mem)
29958 v.AddArg2(destptr, v0)
29959 return true
29960 }
29961
29962
29963 for {
29964 if auxIntToInt64(v.AuxInt) != 10 {
29965 break
29966 }
29967 destptr := v_0
29968 mem := v_1
29969 v.reset(OpAMD64MOVWstoreconst)
29970 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29971 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29972 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29973 v0.AddArg2(destptr, mem)
29974 v.AddArg2(destptr, v0)
29975 return true
29976 }
29977
29978
29979 for {
29980 if auxIntToInt64(v.AuxInt) != 11 {
29981 break
29982 }
29983 destptr := v_0
29984 mem := v_1
29985 v.reset(OpAMD64MOVLstoreconst)
29986 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
29987 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29988 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29989 v0.AddArg2(destptr, mem)
29990 v.AddArg2(destptr, v0)
29991 return true
29992 }
29993
29994
29995 for {
29996 if auxIntToInt64(v.AuxInt) != 12 {
29997 break
29998 }
29999 destptr := v_0
30000 mem := v_1
30001 v.reset(OpAMD64MOVLstoreconst)
30002 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30003 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30004 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30005 v0.AddArg2(destptr, mem)
30006 v.AddArg2(destptr, v0)
30007 return true
30008 }
30009
30010
30011
30012 for {
30013 s := auxIntToInt64(v.AuxInt)
30014 destptr := v_0
30015 mem := v_1
30016 if !(s > 12 && s < 16) {
30017 break
30018 }
30019 v.reset(OpAMD64MOVQstoreconst)
30020 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
30021 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30022 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30023 v0.AddArg2(destptr, mem)
30024 v.AddArg2(destptr, v0)
30025 return true
30026 }
30027
30028
30029
30030 for {
30031 s := auxIntToInt64(v.AuxInt)
30032 destptr := v_0
30033 mem := v_1
30034 if !(s%16 != 0 && s > 16) {
30035 break
30036 }
30037 v.reset(OpZero)
30038 v.AuxInt = int64ToAuxInt(s - s%16)
30039 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30040 v0.AuxInt = int64ToAuxInt(s % 16)
30041 v0.AddArg(destptr)
30042 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30043 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30044 v1.AddArg2(destptr, mem)
30045 v.AddArg2(v0, v1)
30046 return true
30047 }
30048
30049
30050 for {
30051 if auxIntToInt64(v.AuxInt) != 16 {
30052 break
30053 }
30054 destptr := v_0
30055 mem := v_1
30056 v.reset(OpAMD64MOVOstoreconst)
30057 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30058 v.AddArg2(destptr, mem)
30059 return true
30060 }
30061
30062
30063 for {
30064 if auxIntToInt64(v.AuxInt) != 32 {
30065 break
30066 }
30067 destptr := v_0
30068 mem := v_1
30069 v.reset(OpAMD64MOVOstoreconst)
30070 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30071 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30072 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30073 v0.AddArg2(destptr, mem)
30074 v.AddArg2(destptr, v0)
30075 return true
30076 }
30077
30078
30079 for {
30080 if auxIntToInt64(v.AuxInt) != 48 {
30081 break
30082 }
30083 destptr := v_0
30084 mem := v_1
30085 v.reset(OpAMD64MOVOstoreconst)
30086 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30087 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30088 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30089 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30090 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30091 v1.AddArg2(destptr, mem)
30092 v0.AddArg2(destptr, v1)
30093 v.AddArg2(destptr, v0)
30094 return true
30095 }
30096
30097
30098 for {
30099 if auxIntToInt64(v.AuxInt) != 64 {
30100 break
30101 }
30102 destptr := v_0
30103 mem := v_1
30104 v.reset(OpAMD64MOVOstoreconst)
30105 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30106 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30107 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30108 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30109 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30110 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30111 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30112 v2.AddArg2(destptr, mem)
30113 v1.AddArg2(destptr, v2)
30114 v0.AddArg2(destptr, v1)
30115 v.AddArg2(destptr, v0)
30116 return true
30117 }
30118
30119
30120
30121 for {
30122 s := auxIntToInt64(v.AuxInt)
30123 destptr := v_0
30124 mem := v_1
30125 if !(s > 64 && s <= 1024 && s%16 == 0) {
30126 break
30127 }
30128 v.reset(OpAMD64DUFFZERO)
30129 v.AuxInt = int64ToAuxInt(s)
30130 v.AddArg2(destptr, mem)
30131 return true
30132 }
30133
30134
30135
30136 for {
30137 s := auxIntToInt64(v.AuxInt)
30138 destptr := v_0
30139 mem := v_1
30140 if !(s > 1024 && s%8 == 0) {
30141 break
30142 }
30143 v.reset(OpAMD64REPSTOSQ)
30144 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30145 v0.AuxInt = int64ToAuxInt(s / 8)
30146 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30147 v1.AuxInt = int64ToAuxInt(0)
30148 v.AddArg4(destptr, v0, v1, mem)
30149 return true
30150 }
30151 return false
30152 }
30153 func rewriteBlockAMD64(b *Block) bool {
30154 typ := &b.Func.Config.Types
30155 switch b.Kind {
30156 case BlockAMD64EQ:
30157
30158
30159 for b.Controls[0].Op == OpAMD64TESTL {
30160 v_0 := b.Controls[0]
30161 _ = v_0.Args[1]
30162 v_0_0 := v_0.Args[0]
30163 v_0_1 := v_0.Args[1]
30164 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30165 if v_0_0.Op != OpAMD64SHLL {
30166 continue
30167 }
30168 x := v_0_0.Args[1]
30169 v_0_0_0 := v_0_0.Args[0]
30170 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30171 continue
30172 }
30173 y := v_0_1
30174 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30175 v0.AddArg2(x, y)
30176 b.resetWithControl(BlockAMD64UGE, v0)
30177 return true
30178 }
30179 break
30180 }
30181
30182
30183 for b.Controls[0].Op == OpAMD64TESTQ {
30184 v_0 := b.Controls[0]
30185 _ = v_0.Args[1]
30186 v_0_0 := v_0.Args[0]
30187 v_0_1 := v_0.Args[1]
30188 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30189 if v_0_0.Op != OpAMD64SHLQ {
30190 continue
30191 }
30192 x := v_0_0.Args[1]
30193 v_0_0_0 := v_0_0.Args[0]
30194 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30195 continue
30196 }
30197 y := v_0_1
30198 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30199 v0.AddArg2(x, y)
30200 b.resetWithControl(BlockAMD64UGE, v0)
30201 return true
30202 }
30203 break
30204 }
30205
30206
30207
30208 for b.Controls[0].Op == OpAMD64TESTLconst {
30209 v_0 := b.Controls[0]
30210 c := auxIntToInt32(v_0.AuxInt)
30211 x := v_0.Args[0]
30212 if !(isUint32PowerOfTwo(int64(c))) {
30213 break
30214 }
30215 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30216 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30217 v0.AddArg(x)
30218 b.resetWithControl(BlockAMD64UGE, v0)
30219 return true
30220 }
30221
30222
30223
30224 for b.Controls[0].Op == OpAMD64TESTQconst {
30225 v_0 := b.Controls[0]
30226 c := auxIntToInt32(v_0.AuxInt)
30227 x := v_0.Args[0]
30228 if !(isUint64PowerOfTwo(int64(c))) {
30229 break
30230 }
30231 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30232 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30233 v0.AddArg(x)
30234 b.resetWithControl(BlockAMD64UGE, v0)
30235 return true
30236 }
30237
30238
30239
30240 for b.Controls[0].Op == OpAMD64TESTQ {
30241 v_0 := b.Controls[0]
30242 _ = v_0.Args[1]
30243 v_0_0 := v_0.Args[0]
30244 v_0_1 := v_0.Args[1]
30245 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30246 if v_0_0.Op != OpAMD64MOVQconst {
30247 continue
30248 }
30249 c := auxIntToInt64(v_0_0.AuxInt)
30250 x := v_0_1
30251 if !(isUint64PowerOfTwo(c)) {
30252 continue
30253 }
30254 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30255 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30256 v0.AddArg(x)
30257 b.resetWithControl(BlockAMD64UGE, v0)
30258 return true
30259 }
30260 break
30261 }
30262
30263
30264
30265 for b.Controls[0].Op == OpAMD64TESTQ {
30266 v_0 := b.Controls[0]
30267 _ = v_0.Args[1]
30268 v_0_0 := v_0.Args[0]
30269 v_0_1 := v_0.Args[1]
30270 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30271 z1 := v_0_0
30272 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30273 continue
30274 }
30275 z1_0 := z1.Args[0]
30276 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30277 continue
30278 }
30279 x := z1_0.Args[0]
30280 z2 := v_0_1
30281 if !(z1 == z2) {
30282 continue
30283 }
30284 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30285 v0.AuxInt = int8ToAuxInt(63)
30286 v0.AddArg(x)
30287 b.resetWithControl(BlockAMD64UGE, v0)
30288 return true
30289 }
30290 break
30291 }
30292
30293
30294
30295 for b.Controls[0].Op == OpAMD64TESTL {
30296 v_0 := b.Controls[0]
30297 _ = v_0.Args[1]
30298 v_0_0 := v_0.Args[0]
30299 v_0_1 := v_0.Args[1]
30300 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30301 z1 := v_0_0
30302 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30303 continue
30304 }
30305 z1_0 := z1.Args[0]
30306 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30307 continue
30308 }
30309 x := z1_0.Args[0]
30310 z2 := v_0_1
30311 if !(z1 == z2) {
30312 continue
30313 }
30314 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30315 v0.AuxInt = int8ToAuxInt(31)
30316 v0.AddArg(x)
30317 b.resetWithControl(BlockAMD64UGE, v0)
30318 return true
30319 }
30320 break
30321 }
30322
30323
30324
30325 for b.Controls[0].Op == OpAMD64TESTQ {
30326 v_0 := b.Controls[0]
30327 _ = v_0.Args[1]
30328 v_0_0 := v_0.Args[0]
30329 v_0_1 := v_0.Args[1]
30330 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30331 z1 := v_0_0
30332 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30333 continue
30334 }
30335 z1_0 := z1.Args[0]
30336 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30337 continue
30338 }
30339 x := z1_0.Args[0]
30340 z2 := v_0_1
30341 if !(z1 == z2) {
30342 continue
30343 }
30344 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30345 v0.AuxInt = int8ToAuxInt(0)
30346 v0.AddArg(x)
30347 b.resetWithControl(BlockAMD64UGE, v0)
30348 return true
30349 }
30350 break
30351 }
30352
30353
30354
30355 for b.Controls[0].Op == OpAMD64TESTL {
30356 v_0 := b.Controls[0]
30357 _ = v_0.Args[1]
30358 v_0_0 := v_0.Args[0]
30359 v_0_1 := v_0.Args[1]
30360 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30361 z1 := v_0_0
30362 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30363 continue
30364 }
30365 z1_0 := z1.Args[0]
30366 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30367 continue
30368 }
30369 x := z1_0.Args[0]
30370 z2 := v_0_1
30371 if !(z1 == z2) {
30372 continue
30373 }
30374 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30375 v0.AuxInt = int8ToAuxInt(0)
30376 v0.AddArg(x)
30377 b.resetWithControl(BlockAMD64UGE, v0)
30378 return true
30379 }
30380 break
30381 }
30382
30383
30384
30385 for b.Controls[0].Op == OpAMD64TESTQ {
30386 v_0 := b.Controls[0]
30387 _ = v_0.Args[1]
30388 v_0_0 := v_0.Args[0]
30389 v_0_1 := v_0.Args[1]
30390 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30391 z1 := v_0_0
30392 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30393 continue
30394 }
30395 x := z1.Args[0]
30396 z2 := v_0_1
30397 if !(z1 == z2) {
30398 continue
30399 }
30400 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30401 v0.AuxInt = int8ToAuxInt(63)
30402 v0.AddArg(x)
30403 b.resetWithControl(BlockAMD64UGE, v0)
30404 return true
30405 }
30406 break
30407 }
30408
30409
30410
30411 for b.Controls[0].Op == OpAMD64TESTL {
30412 v_0 := b.Controls[0]
30413 _ = v_0.Args[1]
30414 v_0_0 := v_0.Args[0]
30415 v_0_1 := v_0.Args[1]
30416 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30417 z1 := v_0_0
30418 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30419 continue
30420 }
30421 x := z1.Args[0]
30422 z2 := v_0_1
30423 if !(z1 == z2) {
30424 continue
30425 }
30426 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30427 v0.AuxInt = int8ToAuxInt(31)
30428 v0.AddArg(x)
30429 b.resetWithControl(BlockAMD64UGE, v0)
30430 return true
30431 }
30432 break
30433 }
30434
30435
30436 for b.Controls[0].Op == OpAMD64InvertFlags {
30437 v_0 := b.Controls[0]
30438 cmp := v_0.Args[0]
30439 b.resetWithControl(BlockAMD64EQ, cmp)
30440 return true
30441 }
30442
30443
30444 for b.Controls[0].Op == OpAMD64FlagEQ {
30445 b.Reset(BlockFirst)
30446 return true
30447 }
30448
30449
30450 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30451 b.Reset(BlockFirst)
30452 b.swapSuccessors()
30453 return true
30454 }
30455
30456
30457 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30458 b.Reset(BlockFirst)
30459 b.swapSuccessors()
30460 return true
30461 }
30462
30463
30464 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30465 b.Reset(BlockFirst)
30466 b.swapSuccessors()
30467 return true
30468 }
30469
30470
30471 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30472 b.Reset(BlockFirst)
30473 b.swapSuccessors()
30474 return true
30475 }
30476
30477
30478 for b.Controls[0].Op == OpAMD64TESTQ {
30479 v_0 := b.Controls[0]
30480 _ = v_0.Args[1]
30481 v_0_0 := v_0.Args[0]
30482 v_0_1 := v_0.Args[1]
30483 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30484 s := v_0_0
30485 if s.Op != OpSelect0 {
30486 continue
30487 }
30488 blsr := s.Args[0]
30489 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
30490 continue
30491 }
30492 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30493 v0.AddArg(blsr)
30494 b.resetWithControl(BlockAMD64EQ, v0)
30495 return true
30496 }
30497 break
30498 }
30499
30500
30501 for b.Controls[0].Op == OpAMD64TESTL {
30502 v_0 := b.Controls[0]
30503 _ = v_0.Args[1]
30504 v_0_0 := v_0.Args[0]
30505 v_0_1 := v_0.Args[1]
30506 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30507 s := v_0_0
30508 if s.Op != OpSelect0 {
30509 continue
30510 }
30511 blsr := s.Args[0]
30512 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
30513 continue
30514 }
30515 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30516 v0.AddArg(blsr)
30517 b.resetWithControl(BlockAMD64EQ, v0)
30518 return true
30519 }
30520 break
30521 }
30522
30523
30524
30525 for b.Controls[0].Op == OpAMD64TESTQ {
30526 t := b.Controls[0]
30527 _ = t.Args[1]
30528 t_0 := t.Args[0]
30529 t_1 := t.Args[1]
30530 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
30531 a := t_0
30532 if a.Op != OpAMD64ADDQconst {
30533 continue
30534 }
30535 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
30536 continue
30537 }
30538 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
30539 v0.AddArg(a.Args[0])
30540 b.resetWithControl(BlockAMD64EQ, v0)
30541 return true
30542 }
30543 break
30544 }
30545
30546
30547
30548 for b.Controls[0].Op == OpAMD64TESTL {
30549 t := b.Controls[0]
30550 _ = t.Args[1]
30551 t_0 := t.Args[0]
30552 t_1 := t.Args[1]
30553 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
30554 a := t_0
30555 if a.Op != OpAMD64ADDLconst {
30556 continue
30557 }
30558 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
30559 continue
30560 }
30561 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
30562 v0.AddArg(a.Args[0])
30563 b.resetWithControl(BlockAMD64EQ, v0)
30564 return true
30565 }
30566 break
30567 }
30568 case BlockAMD64GE:
30569
30570
30571
30572 for b.Controls[0].Op == OpAMD64CMPQconst {
30573 c := b.Controls[0]
30574 if auxIntToInt32(c.AuxInt) != 128 {
30575 break
30576 }
30577 z := c.Args[0]
30578 if !(c.Uses == 1) {
30579 break
30580 }
30581 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
30582 v0.AuxInt = int32ToAuxInt(127)
30583 v0.AddArg(z)
30584 b.resetWithControl(BlockAMD64GT, v0)
30585 return true
30586 }
30587
30588
30589
30590 for b.Controls[0].Op == OpAMD64CMPLconst {
30591 c := b.Controls[0]
30592 if auxIntToInt32(c.AuxInt) != 128 {
30593 break
30594 }
30595 z := c.Args[0]
30596 if !(c.Uses == 1) {
30597 break
30598 }
30599 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
30600 v0.AuxInt = int32ToAuxInt(127)
30601 v0.AddArg(z)
30602 b.resetWithControl(BlockAMD64GT, v0)
30603 return true
30604 }
30605
30606
30607 for b.Controls[0].Op == OpAMD64InvertFlags {
30608 v_0 := b.Controls[0]
30609 cmp := v_0.Args[0]
30610 b.resetWithControl(BlockAMD64LE, cmp)
30611 return true
30612 }
30613
30614
30615 for b.Controls[0].Op == OpAMD64FlagEQ {
30616 b.Reset(BlockFirst)
30617 return true
30618 }
30619
30620
30621 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30622 b.Reset(BlockFirst)
30623 b.swapSuccessors()
30624 return true
30625 }
30626
30627
30628 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30629 b.Reset(BlockFirst)
30630 b.swapSuccessors()
30631 return true
30632 }
30633
30634
30635 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30636 b.Reset(BlockFirst)
30637 return true
30638 }
30639
30640
30641 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30642 b.Reset(BlockFirst)
30643 return true
30644 }
30645 case BlockAMD64GT:
30646
30647
30648 for b.Controls[0].Op == OpAMD64InvertFlags {
30649 v_0 := b.Controls[0]
30650 cmp := v_0.Args[0]
30651 b.resetWithControl(BlockAMD64LT, cmp)
30652 return true
30653 }
30654
30655
30656 for b.Controls[0].Op == OpAMD64FlagEQ {
30657 b.Reset(BlockFirst)
30658 b.swapSuccessors()
30659 return true
30660 }
30661
30662
30663 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30664 b.Reset(BlockFirst)
30665 b.swapSuccessors()
30666 return true
30667 }
30668
30669
30670 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30671 b.Reset(BlockFirst)
30672 b.swapSuccessors()
30673 return true
30674 }
30675
30676
30677 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30678 b.Reset(BlockFirst)
30679 return true
30680 }
30681
30682
30683 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30684 b.Reset(BlockFirst)
30685 return true
30686 }
30687 case BlockIf:
30688
30689
30690 for b.Controls[0].Op == OpAMD64SETL {
30691 v_0 := b.Controls[0]
30692 cmp := v_0.Args[0]
30693 b.resetWithControl(BlockAMD64LT, cmp)
30694 return true
30695 }
30696
30697
30698 for b.Controls[0].Op == OpAMD64SETLE {
30699 v_0 := b.Controls[0]
30700 cmp := v_0.Args[0]
30701 b.resetWithControl(BlockAMD64LE, cmp)
30702 return true
30703 }
30704
30705
30706 for b.Controls[0].Op == OpAMD64SETG {
30707 v_0 := b.Controls[0]
30708 cmp := v_0.Args[0]
30709 b.resetWithControl(BlockAMD64GT, cmp)
30710 return true
30711 }
30712
30713
30714 for b.Controls[0].Op == OpAMD64SETGE {
30715 v_0 := b.Controls[0]
30716 cmp := v_0.Args[0]
30717 b.resetWithControl(BlockAMD64GE, cmp)
30718 return true
30719 }
30720
30721
30722 for b.Controls[0].Op == OpAMD64SETEQ {
30723 v_0 := b.Controls[0]
30724 cmp := v_0.Args[0]
30725 b.resetWithControl(BlockAMD64EQ, cmp)
30726 return true
30727 }
30728
30729
30730 for b.Controls[0].Op == OpAMD64SETNE {
30731 v_0 := b.Controls[0]
30732 cmp := v_0.Args[0]
30733 b.resetWithControl(BlockAMD64NE, cmp)
30734 return true
30735 }
30736
30737
30738 for b.Controls[0].Op == OpAMD64SETB {
30739 v_0 := b.Controls[0]
30740 cmp := v_0.Args[0]
30741 b.resetWithControl(BlockAMD64ULT, cmp)
30742 return true
30743 }
30744
30745
30746 for b.Controls[0].Op == OpAMD64SETBE {
30747 v_0 := b.Controls[0]
30748 cmp := v_0.Args[0]
30749 b.resetWithControl(BlockAMD64ULE, cmp)
30750 return true
30751 }
30752
30753
30754 for b.Controls[0].Op == OpAMD64SETA {
30755 v_0 := b.Controls[0]
30756 cmp := v_0.Args[0]
30757 b.resetWithControl(BlockAMD64UGT, cmp)
30758 return true
30759 }
30760
30761
30762 for b.Controls[0].Op == OpAMD64SETAE {
30763 v_0 := b.Controls[0]
30764 cmp := v_0.Args[0]
30765 b.resetWithControl(BlockAMD64UGE, cmp)
30766 return true
30767 }
30768
30769
30770 for b.Controls[0].Op == OpAMD64SETO {
30771 v_0 := b.Controls[0]
30772 cmp := v_0.Args[0]
30773 b.resetWithControl(BlockAMD64OS, cmp)
30774 return true
30775 }
30776
30777
30778 for b.Controls[0].Op == OpAMD64SETGF {
30779 v_0 := b.Controls[0]
30780 cmp := v_0.Args[0]
30781 b.resetWithControl(BlockAMD64UGT, cmp)
30782 return true
30783 }
30784
30785
30786 for b.Controls[0].Op == OpAMD64SETGEF {
30787 v_0 := b.Controls[0]
30788 cmp := v_0.Args[0]
30789 b.resetWithControl(BlockAMD64UGE, cmp)
30790 return true
30791 }
30792
30793
30794 for b.Controls[0].Op == OpAMD64SETEQF {
30795 v_0 := b.Controls[0]
30796 cmp := v_0.Args[0]
30797 b.resetWithControl(BlockAMD64EQF, cmp)
30798 return true
30799 }
30800
30801
30802 for b.Controls[0].Op == OpAMD64SETNEF {
30803 v_0 := b.Controls[0]
30804 cmp := v_0.Args[0]
30805 b.resetWithControl(BlockAMD64NEF, cmp)
30806 return true
30807 }
30808
30809
30810 for {
30811 cond := b.Controls[0]
30812 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
30813 v0.AddArg2(cond, cond)
30814 b.resetWithControl(BlockAMD64NE, v0)
30815 return true
30816 }
30817 case BlockJumpTable:
30818
30819
30820 for {
30821 idx := b.Controls[0]
30822 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
30823 v0.Aux = symToAux(makeJumpTableSym(b))
30824 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
30825 v0.AddArg(v1)
30826 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
30827 b.Aux = symToAux(makeJumpTableSym(b))
30828 return true
30829 }
30830 case BlockAMD64LE:
30831
30832
30833 for b.Controls[0].Op == OpAMD64InvertFlags {
30834 v_0 := b.Controls[0]
30835 cmp := v_0.Args[0]
30836 b.resetWithControl(BlockAMD64GE, cmp)
30837 return true
30838 }
30839
30840
30841 for b.Controls[0].Op == OpAMD64FlagEQ {
30842 b.Reset(BlockFirst)
30843 return true
30844 }
30845
30846
30847 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30848 b.Reset(BlockFirst)
30849 return true
30850 }
30851
30852
30853 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30854 b.Reset(BlockFirst)
30855 return true
30856 }
30857
30858
30859 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30860 b.Reset(BlockFirst)
30861 b.swapSuccessors()
30862 return true
30863 }
30864
30865
30866 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30867 b.Reset(BlockFirst)
30868 b.swapSuccessors()
30869 return true
30870 }
30871 case BlockAMD64LT:
30872
30873
30874
30875 for b.Controls[0].Op == OpAMD64CMPQconst {
30876 c := b.Controls[0]
30877 if auxIntToInt32(c.AuxInt) != 128 {
30878 break
30879 }
30880 z := c.Args[0]
30881 if !(c.Uses == 1) {
30882 break
30883 }
30884 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
30885 v0.AuxInt = int32ToAuxInt(127)
30886 v0.AddArg(z)
30887 b.resetWithControl(BlockAMD64LE, v0)
30888 return true
30889 }
30890
30891
30892
30893 for b.Controls[0].Op == OpAMD64CMPLconst {
30894 c := b.Controls[0]
30895 if auxIntToInt32(c.AuxInt) != 128 {
30896 break
30897 }
30898 z := c.Args[0]
30899 if !(c.Uses == 1) {
30900 break
30901 }
30902 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
30903 v0.AuxInt = int32ToAuxInt(127)
30904 v0.AddArg(z)
30905 b.resetWithControl(BlockAMD64LE, v0)
30906 return true
30907 }
30908
30909
30910 for b.Controls[0].Op == OpAMD64InvertFlags {
30911 v_0 := b.Controls[0]
30912 cmp := v_0.Args[0]
30913 b.resetWithControl(BlockAMD64GT, cmp)
30914 return true
30915 }
30916
30917
30918 for b.Controls[0].Op == OpAMD64FlagEQ {
30919 b.Reset(BlockFirst)
30920 b.swapSuccessors()
30921 return true
30922 }
30923
30924
30925 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30926 b.Reset(BlockFirst)
30927 return true
30928 }
30929
30930
30931 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30932 b.Reset(BlockFirst)
30933 return true
30934 }
30935
30936
30937 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30938 b.Reset(BlockFirst)
30939 b.swapSuccessors()
30940 return true
30941 }
30942
30943
30944 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30945 b.Reset(BlockFirst)
30946 b.swapSuccessors()
30947 return true
30948 }
30949 case BlockAMD64NE:
30950
30951
30952 for b.Controls[0].Op == OpAMD64TESTB {
30953 v_0 := b.Controls[0]
30954 _ = v_0.Args[1]
30955 v_0_0 := v_0.Args[0]
30956 if v_0_0.Op != OpAMD64SETL {
30957 break
30958 }
30959 cmp := v_0_0.Args[0]
30960 v_0_1 := v_0.Args[1]
30961 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
30962 break
30963 }
30964 b.resetWithControl(BlockAMD64LT, cmp)
30965 return true
30966 }
30967
30968
30969 for b.Controls[0].Op == OpAMD64TESTB {
30970 v_0 := b.Controls[0]
30971 _ = v_0.Args[1]
30972 v_0_0 := v_0.Args[0]
30973 if v_0_0.Op != OpAMD64SETLE {
30974 break
30975 }
30976 cmp := v_0_0.Args[0]
30977 v_0_1 := v_0.Args[1]
30978 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
30979 break
30980 }
30981 b.resetWithControl(BlockAMD64LE, cmp)
30982 return true
30983 }
30984
30985
30986 for b.Controls[0].Op == OpAMD64TESTB {
30987 v_0 := b.Controls[0]
30988 _ = v_0.Args[1]
30989 v_0_0 := v_0.Args[0]
30990 if v_0_0.Op != OpAMD64SETG {
30991 break
30992 }
30993 cmp := v_0_0.Args[0]
30994 v_0_1 := v_0.Args[1]
30995 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
30996 break
30997 }
30998 b.resetWithControl(BlockAMD64GT, cmp)
30999 return true
31000 }
31001
31002
31003 for b.Controls[0].Op == OpAMD64TESTB {
31004 v_0 := b.Controls[0]
31005 _ = v_0.Args[1]
31006 v_0_0 := v_0.Args[0]
31007 if v_0_0.Op != OpAMD64SETGE {
31008 break
31009 }
31010 cmp := v_0_0.Args[0]
31011 v_0_1 := v_0.Args[1]
31012 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
31013 break
31014 }
31015 b.resetWithControl(BlockAMD64GE, cmp)
31016 return true
31017 }
31018
31019
31020 for b.Controls[0].Op == OpAMD64TESTB {
31021 v_0 := b.Controls[0]
31022 _ = v_0.Args[1]
31023 v_0_0 := v_0.Args[0]
31024 if v_0_0.Op != OpAMD64SETEQ {
31025 break
31026 }
31027 cmp := v_0_0.Args[0]
31028 v_0_1 := v_0.Args[1]
31029 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
31030 break
31031 }
31032 b.resetWithControl(BlockAMD64EQ, cmp)
31033 return true
31034 }
31035
31036
31037 for b.Controls[0].Op == OpAMD64TESTB {
31038 v_0 := b.Controls[0]
31039 _ = v_0.Args[1]
31040 v_0_0 := v_0.Args[0]
31041 if v_0_0.Op != OpAMD64SETNE {
31042 break
31043 }
31044 cmp := v_0_0.Args[0]
31045 v_0_1 := v_0.Args[1]
31046 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
31047 break
31048 }
31049 b.resetWithControl(BlockAMD64NE, cmp)
31050 return true
31051 }
31052
31053
31054 for b.Controls[0].Op == OpAMD64TESTB {
31055 v_0 := b.Controls[0]
31056 _ = v_0.Args[1]
31057 v_0_0 := v_0.Args[0]
31058 if v_0_0.Op != OpAMD64SETB {
31059 break
31060 }
31061 cmp := v_0_0.Args[0]
31062 v_0_1 := v_0.Args[1]
31063 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
31064 break
31065 }
31066 b.resetWithControl(BlockAMD64ULT, cmp)
31067 return true
31068 }
31069
31070
31071 for b.Controls[0].Op == OpAMD64TESTB {
31072 v_0 := b.Controls[0]
31073 _ = v_0.Args[1]
31074 v_0_0 := v_0.Args[0]
31075 if v_0_0.Op != OpAMD64SETBE {
31076 break
31077 }
31078 cmp := v_0_0.Args[0]
31079 v_0_1 := v_0.Args[1]
31080 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31081 break
31082 }
31083 b.resetWithControl(BlockAMD64ULE, cmp)
31084 return true
31085 }
31086
31087
31088 for b.Controls[0].Op == OpAMD64TESTB {
31089 v_0 := b.Controls[0]
31090 _ = v_0.Args[1]
31091 v_0_0 := v_0.Args[0]
31092 if v_0_0.Op != OpAMD64SETA {
31093 break
31094 }
31095 cmp := v_0_0.Args[0]
31096 v_0_1 := v_0.Args[1]
31097 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31098 break
31099 }
31100 b.resetWithControl(BlockAMD64UGT, cmp)
31101 return true
31102 }
31103
31104
31105 for b.Controls[0].Op == OpAMD64TESTB {
31106 v_0 := b.Controls[0]
31107 _ = v_0.Args[1]
31108 v_0_0 := v_0.Args[0]
31109 if v_0_0.Op != OpAMD64SETAE {
31110 break
31111 }
31112 cmp := v_0_0.Args[0]
31113 v_0_1 := v_0.Args[1]
31114 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31115 break
31116 }
31117 b.resetWithControl(BlockAMD64UGE, cmp)
31118 return true
31119 }
31120
31121
31122 for b.Controls[0].Op == OpAMD64TESTB {
31123 v_0 := b.Controls[0]
31124 _ = v_0.Args[1]
31125 v_0_0 := v_0.Args[0]
31126 if v_0_0.Op != OpAMD64SETO {
31127 break
31128 }
31129 cmp := v_0_0.Args[0]
31130 v_0_1 := v_0.Args[1]
31131 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31132 break
31133 }
31134 b.resetWithControl(BlockAMD64OS, cmp)
31135 return true
31136 }
31137
31138
31139 for b.Controls[0].Op == OpAMD64TESTL {
31140 v_0 := b.Controls[0]
31141 _ = v_0.Args[1]
31142 v_0_0 := v_0.Args[0]
31143 v_0_1 := v_0.Args[1]
31144 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31145 if v_0_0.Op != OpAMD64SHLL {
31146 continue
31147 }
31148 x := v_0_0.Args[1]
31149 v_0_0_0 := v_0_0.Args[0]
31150 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31151 continue
31152 }
31153 y := v_0_1
31154 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31155 v0.AddArg2(x, y)
31156 b.resetWithControl(BlockAMD64ULT, v0)
31157 return true
31158 }
31159 break
31160 }
31161
31162
31163 for b.Controls[0].Op == OpAMD64TESTQ {
31164 v_0 := b.Controls[0]
31165 _ = v_0.Args[1]
31166 v_0_0 := v_0.Args[0]
31167 v_0_1 := v_0.Args[1]
31168 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31169 if v_0_0.Op != OpAMD64SHLQ {
31170 continue
31171 }
31172 x := v_0_0.Args[1]
31173 v_0_0_0 := v_0_0.Args[0]
31174 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31175 continue
31176 }
31177 y := v_0_1
31178 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31179 v0.AddArg2(x, y)
31180 b.resetWithControl(BlockAMD64ULT, v0)
31181 return true
31182 }
31183 break
31184 }
31185
31186
31187
31188 for b.Controls[0].Op == OpAMD64TESTLconst {
31189 v_0 := b.Controls[0]
31190 c := auxIntToInt32(v_0.AuxInt)
31191 x := v_0.Args[0]
31192 if !(isUint32PowerOfTwo(int64(c))) {
31193 break
31194 }
31195 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31196 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31197 v0.AddArg(x)
31198 b.resetWithControl(BlockAMD64ULT, v0)
31199 return true
31200 }
31201
31202
31203
31204 for b.Controls[0].Op == OpAMD64TESTQconst {
31205 v_0 := b.Controls[0]
31206 c := auxIntToInt32(v_0.AuxInt)
31207 x := v_0.Args[0]
31208 if !(isUint64PowerOfTwo(int64(c))) {
31209 break
31210 }
31211 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31212 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31213 v0.AddArg(x)
31214 b.resetWithControl(BlockAMD64ULT, v0)
31215 return true
31216 }
31217
31218
31219
31220 for b.Controls[0].Op == OpAMD64TESTQ {
31221 v_0 := b.Controls[0]
31222 _ = v_0.Args[1]
31223 v_0_0 := v_0.Args[0]
31224 v_0_1 := v_0.Args[1]
31225 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31226 if v_0_0.Op != OpAMD64MOVQconst {
31227 continue
31228 }
31229 c := auxIntToInt64(v_0_0.AuxInt)
31230 x := v_0_1
31231 if !(isUint64PowerOfTwo(c)) {
31232 continue
31233 }
31234 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31235 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31236 v0.AddArg(x)
31237 b.resetWithControl(BlockAMD64ULT, v0)
31238 return true
31239 }
31240 break
31241 }
31242
31243
31244
31245 for b.Controls[0].Op == OpAMD64TESTQ {
31246 v_0 := b.Controls[0]
31247 _ = v_0.Args[1]
31248 v_0_0 := v_0.Args[0]
31249 v_0_1 := v_0.Args[1]
31250 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31251 z1 := v_0_0
31252 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31253 continue
31254 }
31255 z1_0 := z1.Args[0]
31256 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31257 continue
31258 }
31259 x := z1_0.Args[0]
31260 z2 := v_0_1
31261 if !(z1 == z2) {
31262 continue
31263 }
31264 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31265 v0.AuxInt = int8ToAuxInt(63)
31266 v0.AddArg(x)
31267 b.resetWithControl(BlockAMD64ULT, v0)
31268 return true
31269 }
31270 break
31271 }
31272
31273
31274
31275 for b.Controls[0].Op == OpAMD64TESTL {
31276 v_0 := b.Controls[0]
31277 _ = v_0.Args[1]
31278 v_0_0 := v_0.Args[0]
31279 v_0_1 := v_0.Args[1]
31280 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31281 z1 := v_0_0
31282 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31283 continue
31284 }
31285 z1_0 := z1.Args[0]
31286 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31287 continue
31288 }
31289 x := z1_0.Args[0]
31290 z2 := v_0_1
31291 if !(z1 == z2) {
31292 continue
31293 }
31294 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31295 v0.AuxInt = int8ToAuxInt(31)
31296 v0.AddArg(x)
31297 b.resetWithControl(BlockAMD64ULT, v0)
31298 return true
31299 }
31300 break
31301 }
31302
31303
31304
31305 for b.Controls[0].Op == OpAMD64TESTQ {
31306 v_0 := b.Controls[0]
31307 _ = v_0.Args[1]
31308 v_0_0 := v_0.Args[0]
31309 v_0_1 := v_0.Args[1]
31310 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31311 z1 := v_0_0
31312 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31313 continue
31314 }
31315 z1_0 := z1.Args[0]
31316 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31317 continue
31318 }
31319 x := z1_0.Args[0]
31320 z2 := v_0_1
31321 if !(z1 == z2) {
31322 continue
31323 }
31324 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31325 v0.AuxInt = int8ToAuxInt(0)
31326 v0.AddArg(x)
31327 b.resetWithControl(BlockAMD64ULT, v0)
31328 return true
31329 }
31330 break
31331 }
31332
31333
31334
31335 for b.Controls[0].Op == OpAMD64TESTL {
31336 v_0 := b.Controls[0]
31337 _ = v_0.Args[1]
31338 v_0_0 := v_0.Args[0]
31339 v_0_1 := v_0.Args[1]
31340 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31341 z1 := v_0_0
31342 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31343 continue
31344 }
31345 z1_0 := z1.Args[0]
31346 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31347 continue
31348 }
31349 x := z1_0.Args[0]
31350 z2 := v_0_1
31351 if !(z1 == z2) {
31352 continue
31353 }
31354 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31355 v0.AuxInt = int8ToAuxInt(0)
31356 v0.AddArg(x)
31357 b.resetWithControl(BlockAMD64ULT, v0)
31358 return true
31359 }
31360 break
31361 }
31362
31363
31364
31365 for b.Controls[0].Op == OpAMD64TESTQ {
31366 v_0 := b.Controls[0]
31367 _ = v_0.Args[1]
31368 v_0_0 := v_0.Args[0]
31369 v_0_1 := v_0.Args[1]
31370 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31371 z1 := v_0_0
31372 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31373 continue
31374 }
31375 x := z1.Args[0]
31376 z2 := v_0_1
31377 if !(z1 == z2) {
31378 continue
31379 }
31380 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31381 v0.AuxInt = int8ToAuxInt(63)
31382 v0.AddArg(x)
31383 b.resetWithControl(BlockAMD64ULT, v0)
31384 return true
31385 }
31386 break
31387 }
31388
31389
31390
31391 for b.Controls[0].Op == OpAMD64TESTL {
31392 v_0 := b.Controls[0]
31393 _ = v_0.Args[1]
31394 v_0_0 := v_0.Args[0]
31395 v_0_1 := v_0.Args[1]
31396 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31397 z1 := v_0_0
31398 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31399 continue
31400 }
31401 x := z1.Args[0]
31402 z2 := v_0_1
31403 if !(z1 == z2) {
31404 continue
31405 }
31406 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31407 v0.AuxInt = int8ToAuxInt(31)
31408 v0.AddArg(x)
31409 b.resetWithControl(BlockAMD64ULT, v0)
31410 return true
31411 }
31412 break
31413 }
31414
31415
31416 for b.Controls[0].Op == OpAMD64TESTB {
31417 v_0 := b.Controls[0]
31418 _ = v_0.Args[1]
31419 v_0_0 := v_0.Args[0]
31420 if v_0_0.Op != OpAMD64SETGF {
31421 break
31422 }
31423 cmp := v_0_0.Args[0]
31424 v_0_1 := v_0.Args[1]
31425 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
31426 break
31427 }
31428 b.resetWithControl(BlockAMD64UGT, cmp)
31429 return true
31430 }
31431
31432
31433 for b.Controls[0].Op == OpAMD64TESTB {
31434 v_0 := b.Controls[0]
31435 _ = v_0.Args[1]
31436 v_0_0 := v_0.Args[0]
31437 if v_0_0.Op != OpAMD64SETGEF {
31438 break
31439 }
31440 cmp := v_0_0.Args[0]
31441 v_0_1 := v_0.Args[1]
31442 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
31443 break
31444 }
31445 b.resetWithControl(BlockAMD64UGE, cmp)
31446 return true
31447 }
31448
31449
31450 for b.Controls[0].Op == OpAMD64TESTB {
31451 v_0 := b.Controls[0]
31452 _ = v_0.Args[1]
31453 v_0_0 := v_0.Args[0]
31454 if v_0_0.Op != OpAMD64SETEQF {
31455 break
31456 }
31457 cmp := v_0_0.Args[0]
31458 v_0_1 := v_0.Args[1]
31459 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
31460 break
31461 }
31462 b.resetWithControl(BlockAMD64EQF, cmp)
31463 return true
31464 }
31465
31466
31467 for b.Controls[0].Op == OpAMD64TESTB {
31468 v_0 := b.Controls[0]
31469 _ = v_0.Args[1]
31470 v_0_0 := v_0.Args[0]
31471 if v_0_0.Op != OpAMD64SETNEF {
31472 break
31473 }
31474 cmp := v_0_0.Args[0]
31475 v_0_1 := v_0.Args[1]
31476 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
31477 break
31478 }
31479 b.resetWithControl(BlockAMD64NEF, cmp)
31480 return true
31481 }
31482
31483
31484 for b.Controls[0].Op == OpAMD64InvertFlags {
31485 v_0 := b.Controls[0]
31486 cmp := v_0.Args[0]
31487 b.resetWithControl(BlockAMD64NE, cmp)
31488 return true
31489 }
31490
31491
31492 for b.Controls[0].Op == OpAMD64FlagEQ {
31493 b.Reset(BlockFirst)
31494 b.swapSuccessors()
31495 return true
31496 }
31497
31498
31499 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31500 b.Reset(BlockFirst)
31501 return true
31502 }
31503
31504
31505 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31506 b.Reset(BlockFirst)
31507 return true
31508 }
31509
31510
31511 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31512 b.Reset(BlockFirst)
31513 return true
31514 }
31515
31516
31517 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31518 b.Reset(BlockFirst)
31519 return true
31520 }
31521
31522
31523 for b.Controls[0].Op == OpAMD64TESTQ {
31524 v_0 := b.Controls[0]
31525 _ = v_0.Args[1]
31526 v_0_0 := v_0.Args[0]
31527 v_0_1 := v_0.Args[1]
31528 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31529 s := v_0_0
31530 if s.Op != OpSelect0 {
31531 continue
31532 }
31533 blsr := s.Args[0]
31534 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31535 continue
31536 }
31537 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31538 v0.AddArg(blsr)
31539 b.resetWithControl(BlockAMD64NE, v0)
31540 return true
31541 }
31542 break
31543 }
31544
31545
31546 for b.Controls[0].Op == OpAMD64TESTL {
31547 v_0 := b.Controls[0]
31548 _ = v_0.Args[1]
31549 v_0_0 := v_0.Args[0]
31550 v_0_1 := v_0.Args[1]
31551 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31552 s := v_0_0
31553 if s.Op != OpSelect0 {
31554 continue
31555 }
31556 blsr := s.Args[0]
31557 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31558 continue
31559 }
31560 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31561 v0.AddArg(blsr)
31562 b.resetWithControl(BlockAMD64NE, v0)
31563 return true
31564 }
31565 break
31566 }
31567
31568
31569
31570 for b.Controls[0].Op == OpAMD64TESTQ {
31571 t := b.Controls[0]
31572 _ = t.Args[1]
31573 t_0 := t.Args[0]
31574 t_1 := t.Args[1]
31575 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
31576 a := t_0
31577 if a.Op != OpAMD64ADDQconst {
31578 continue
31579 }
31580 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
31581 continue
31582 }
31583 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
31584 v0.AddArg(a.Args[0])
31585 b.resetWithControl(BlockAMD64NE, v0)
31586 return true
31587 }
31588 break
31589 }
31590
31591
31592
31593 for b.Controls[0].Op == OpAMD64TESTL {
31594 t := b.Controls[0]
31595 _ = t.Args[1]
31596 t_0 := t.Args[0]
31597 t_1 := t.Args[1]
31598 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
31599 a := t_0
31600 if a.Op != OpAMD64ADDLconst {
31601 continue
31602 }
31603 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
31604 continue
31605 }
31606 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
31607 v0.AddArg(a.Args[0])
31608 b.resetWithControl(BlockAMD64NE, v0)
31609 return true
31610 }
31611 break
31612 }
31613 case BlockAMD64UGE:
31614
31615
31616 for b.Controls[0].Op == OpAMD64TESTQ {
31617 v_0 := b.Controls[0]
31618 x := v_0.Args[1]
31619 if x != v_0.Args[0] {
31620 break
31621 }
31622 b.Reset(BlockFirst)
31623 return true
31624 }
31625
31626
31627 for b.Controls[0].Op == OpAMD64TESTL {
31628 v_0 := b.Controls[0]
31629 x := v_0.Args[1]
31630 if x != v_0.Args[0] {
31631 break
31632 }
31633 b.Reset(BlockFirst)
31634 return true
31635 }
31636
31637
31638 for b.Controls[0].Op == OpAMD64TESTW {
31639 v_0 := b.Controls[0]
31640 x := v_0.Args[1]
31641 if x != v_0.Args[0] {
31642 break
31643 }
31644 b.Reset(BlockFirst)
31645 return true
31646 }
31647
31648
31649 for b.Controls[0].Op == OpAMD64TESTB {
31650 v_0 := b.Controls[0]
31651 x := v_0.Args[1]
31652 if x != v_0.Args[0] {
31653 break
31654 }
31655 b.Reset(BlockFirst)
31656 return true
31657 }
31658
31659
31660 for b.Controls[0].Op == OpAMD64InvertFlags {
31661 v_0 := b.Controls[0]
31662 cmp := v_0.Args[0]
31663 b.resetWithControl(BlockAMD64ULE, cmp)
31664 return true
31665 }
31666
31667
31668 for b.Controls[0].Op == OpAMD64FlagEQ {
31669 b.Reset(BlockFirst)
31670 return true
31671 }
31672
31673
31674 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31675 b.Reset(BlockFirst)
31676 b.swapSuccessors()
31677 return true
31678 }
31679
31680
31681 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31682 b.Reset(BlockFirst)
31683 return true
31684 }
31685
31686
31687 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31688 b.Reset(BlockFirst)
31689 b.swapSuccessors()
31690 return true
31691 }
31692
31693
31694 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31695 b.Reset(BlockFirst)
31696 return true
31697 }
31698 case BlockAMD64UGT:
31699
31700
31701 for b.Controls[0].Op == OpAMD64InvertFlags {
31702 v_0 := b.Controls[0]
31703 cmp := v_0.Args[0]
31704 b.resetWithControl(BlockAMD64ULT, cmp)
31705 return true
31706 }
31707
31708
31709 for b.Controls[0].Op == OpAMD64FlagEQ {
31710 b.Reset(BlockFirst)
31711 b.swapSuccessors()
31712 return true
31713 }
31714
31715
31716 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31717 b.Reset(BlockFirst)
31718 b.swapSuccessors()
31719 return true
31720 }
31721
31722
31723 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31724 b.Reset(BlockFirst)
31725 return true
31726 }
31727
31728
31729 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31730 b.Reset(BlockFirst)
31731 b.swapSuccessors()
31732 return true
31733 }
31734
31735
31736 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31737 b.Reset(BlockFirst)
31738 return true
31739 }
31740 case BlockAMD64ULE:
31741
31742
31743 for b.Controls[0].Op == OpAMD64InvertFlags {
31744 v_0 := b.Controls[0]
31745 cmp := v_0.Args[0]
31746 b.resetWithControl(BlockAMD64UGE, cmp)
31747 return true
31748 }
31749
31750
31751 for b.Controls[0].Op == OpAMD64FlagEQ {
31752 b.Reset(BlockFirst)
31753 return true
31754 }
31755
31756
31757 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31758 b.Reset(BlockFirst)
31759 return true
31760 }
31761
31762
31763 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31764 b.Reset(BlockFirst)
31765 b.swapSuccessors()
31766 return true
31767 }
31768
31769
31770 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31771 b.Reset(BlockFirst)
31772 return true
31773 }
31774
31775
31776 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31777 b.Reset(BlockFirst)
31778 b.swapSuccessors()
31779 return true
31780 }
31781 case BlockAMD64ULT:
31782
31783
31784 for b.Controls[0].Op == OpAMD64TESTQ {
31785 v_0 := b.Controls[0]
31786 x := v_0.Args[1]
31787 if x != v_0.Args[0] {
31788 break
31789 }
31790 b.Reset(BlockFirst)
31791 b.swapSuccessors()
31792 return true
31793 }
31794
31795
31796 for b.Controls[0].Op == OpAMD64TESTL {
31797 v_0 := b.Controls[0]
31798 x := v_0.Args[1]
31799 if x != v_0.Args[0] {
31800 break
31801 }
31802 b.Reset(BlockFirst)
31803 b.swapSuccessors()
31804 return true
31805 }
31806
31807
31808 for b.Controls[0].Op == OpAMD64TESTW {
31809 v_0 := b.Controls[0]
31810 x := v_0.Args[1]
31811 if x != v_0.Args[0] {
31812 break
31813 }
31814 b.Reset(BlockFirst)
31815 b.swapSuccessors()
31816 return true
31817 }
31818
31819
31820 for b.Controls[0].Op == OpAMD64TESTB {
31821 v_0 := b.Controls[0]
31822 x := v_0.Args[1]
31823 if x != v_0.Args[0] {
31824 break
31825 }
31826 b.Reset(BlockFirst)
31827 b.swapSuccessors()
31828 return true
31829 }
31830
31831
31832 for b.Controls[0].Op == OpAMD64InvertFlags {
31833 v_0 := b.Controls[0]
31834 cmp := v_0.Args[0]
31835 b.resetWithControl(BlockAMD64UGT, cmp)
31836 return true
31837 }
31838
31839
31840 for b.Controls[0].Op == OpAMD64FlagEQ {
31841 b.Reset(BlockFirst)
31842 b.swapSuccessors()
31843 return true
31844 }
31845
31846
31847 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31848 b.Reset(BlockFirst)
31849 return true
31850 }
31851
31852
31853 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31854 b.Reset(BlockFirst)
31855 b.swapSuccessors()
31856 return true
31857 }
31858
31859
31860 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31861 b.Reset(BlockFirst)
31862 return true
31863 }
31864
31865
31866 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31867 b.Reset(BlockFirst)
31868 b.swapSuccessors()
31869 return true
31870 }
31871 }
31872 return false
31873 }
31874
View as plain text