1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
9 #include <linux/compiler.h>
11 /* define this macro to get some asm stmts included in 'c' files */
14 /* Optimization barrier */
15 /* The "volatile" is due to gcc bugs */
16 #define ia64_barrier() asm volatile ("":::"memory")
18 #define ia64_stop() asm volatile (";;"::)
20 #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
22 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
24 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
26 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
28 extern void ia64_bad_param_for_setreg (void);
29 extern void ia64_bad_param_for_getreg (void);
32 register unsigned long ia64_r13 asm ("r13") __used;
35 #define ia64_native_setreg(regnum, val) \
38 case _IA64_REG_PSR_L: \
39 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
41 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
42 asm volatile ("mov ar%0=%1" :: \
43 "i" (regnum - _IA64_REG_AR_KR0), \
44 "r"(val): "memory"); \
46 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
47 asm volatile ("mov cr%0=%1" :: \
48 "i" (regnum - _IA64_REG_CR_DCR), \
49 "r"(val): "memory" ); \
52 asm volatile ("mov r12=%0" :: \
53 "r"(val): "memory"); \
56 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
59 ia64_bad_param_for_setreg(); \
64 #define ia64_native_getreg(regnum) \
66 __u64 ia64_intri_res; \
70 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
73 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
76 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
78 case _IA64_REG_TP: /* for current() */ \
79 ia64_intri_res = ia64_r13; \
81 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
82 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
83 : "i"(regnum - _IA64_REG_AR_KR0)); \
85 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
86 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
87 : "i" (regnum - _IA64_REG_CR_DCR)); \
90 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
93 ia64_bad_param_for_getreg(); \
99 #define ia64_hint_pause 0
101 #define ia64_hint(mode) \
104 case ia64_hint_pause: \
105 asm volatile ("hint @pause" ::: "memory"); \
111 /* Integer values for mux1 instruction */
112 #define ia64_mux1_brcst 0
113 #define ia64_mux1_mix 8
114 #define ia64_mux1_shuf 9
115 #define ia64_mux1_alt 10
116 #define ia64_mux1_rev 11
118 #define ia64_mux1(x, mode) \
120 __u64 ia64_intri_res; \
123 case ia64_mux1_brcst: \
124 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
126 case ia64_mux1_mix: \
127 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
129 case ia64_mux1_shuf: \
130 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
132 case ia64_mux1_alt: \
133 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
135 case ia64_mux1_rev: \
136 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
142 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
143 # define ia64_popcnt(x) __builtin_popcountl(x)
145 # define ia64_popcnt(x) \
147 __u64 ia64_intri_res; \
148 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
154 #define ia64_getf_exp(x) \
156 long ia64_intri_res; \
158 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
163 #define ia64_shrp(a, b, count) \
165 __u64 ia64_intri_res; \
166 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
170 #define ia64_ldfs(regnum, x) \
172 register double __f__ asm ("f"#regnum); \
173 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
176 #define ia64_ldfd(regnum, x) \
178 register double __f__ asm ("f"#regnum); \
179 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
182 #define ia64_ldfe(regnum, x) \
184 register double __f__ asm ("f"#regnum); \
185 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
188 #define ia64_ldf8(regnum, x) \
190 register double __f__ asm ("f"#regnum); \
191 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
194 #define ia64_ldf_fill(regnum, x) \
196 register double __f__ asm ("f"#regnum); \
197 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
200 #define ia64_st4_rel_nta(m, val) \
202 asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
205 #define ia64_stfs(x, regnum) \
207 register double __f__ asm ("f"#regnum); \
208 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
211 #define ia64_stfd(x, regnum) \
213 register double __f__ asm ("f"#regnum); \
214 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
217 #define ia64_stfe(x, regnum) \
219 register double __f__ asm ("f"#regnum); \
220 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
223 #define ia64_stf8(x, regnum) \
225 register double __f__ asm ("f"#regnum); \
226 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
229 #define ia64_stf_spill(x, regnum) \
231 register double __f__ asm ("f"#regnum); \
232 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
235 #define ia64_fetchadd4_acq(p, inc) \
238 __u64 ia64_intri_res; \
239 asm volatile ("fetchadd4.acq %0=[%1],%2" \
240 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
246 #define ia64_fetchadd4_rel(p, inc) \
248 __u64 ia64_intri_res; \
249 asm volatile ("fetchadd4.rel %0=[%1],%2" \
250 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
256 #define ia64_fetchadd8_acq(p, inc) \
259 __u64 ia64_intri_res; \
260 asm volatile ("fetchadd8.acq %0=[%1],%2" \
261 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
267 #define ia64_fetchadd8_rel(p, inc) \
269 __u64 ia64_intri_res; \
270 asm volatile ("fetchadd8.rel %0=[%1],%2" \
271 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
277 #define ia64_xchg1(ptr,x) \
279 __u64 ia64_intri_res; \
280 asm volatile ("xchg1 %0=[%1],%2" \
281 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
285 #define ia64_xchg2(ptr,x) \
287 __u64 ia64_intri_res; \
288 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
289 : "r" (ptr), "r" (x) : "memory"); \
293 #define ia64_xchg4(ptr,x) \
295 __u64 ia64_intri_res; \
296 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
297 : "r" (ptr), "r" (x) : "memory"); \
301 #define ia64_xchg8(ptr,x) \
303 __u64 ia64_intri_res; \
304 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
305 : "r" (ptr), "r" (x) : "memory"); \
309 #define ia64_cmpxchg1_acq(ptr, new, old) \
311 __u64 ia64_intri_res; \
312 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
313 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
314 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
318 #define ia64_cmpxchg1_rel(ptr, new, old) \
320 __u64 ia64_intri_res; \
321 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
322 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
323 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
327 #define ia64_cmpxchg2_acq(ptr, new, old) \
329 __u64 ia64_intri_res; \
330 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
331 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
332 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
336 #define ia64_cmpxchg2_rel(ptr, new, old) \
338 __u64 ia64_intri_res; \
339 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
341 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
342 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
346 #define ia64_cmpxchg4_acq(ptr, new, old) \
348 __u64 ia64_intri_res; \
349 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
350 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
351 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
355 #define ia64_cmpxchg4_rel(ptr, new, old) \
357 __u64 ia64_intri_res; \
358 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
359 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
360 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
364 #define ia64_cmpxchg8_acq(ptr, new, old) \
366 __u64 ia64_intri_res; \
367 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
368 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
369 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
373 #define ia64_cmpxchg8_rel(ptr, new, old) \
375 __u64 ia64_intri_res; \
376 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
378 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
379 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
383 #define ia64_mf() asm volatile ("mf" ::: "memory")
384 #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
386 #define ia64_invala() asm volatile ("invala" ::: "memory")
388 #define ia64_native_thash(addr) \
390 __u64 ia64_intri_res; \
391 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
395 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
396 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
398 #ifdef HAVE_SERIALIZE_DIRECTIVE
399 # define ia64_dv_serialize_data() asm volatile (".serialize.data");
400 # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
402 # define ia64_dv_serialize_data()
403 # define ia64_dv_serialize_instruction()
406 #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
408 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
410 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
413 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
414 :: "r"(trnum), "r"(addr) : "memory")
416 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
417 :: "r"(trnum), "r"(addr) : "memory")
419 #define ia64_tpa(addr) \
422 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
426 #define __ia64_set_dbr(index, val) \
427 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
429 #define ia64_set_ibr(index, val) \
430 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
432 #define ia64_set_pkr(index, val) \
433 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
435 #define ia64_set_pmc(index, val) \
436 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
438 #define ia64_set_pmd(index, val) \
439 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
441 #define ia64_native_set_rr(index, val) \
442 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
444 #define ia64_native_get_cpuid(index) \
446 __u64 ia64_intri_res; \
447 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
451 #define __ia64_get_dbr(index) \
453 __u64 ia64_intri_res; \
454 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
458 #define ia64_get_ibr(index) \
460 __u64 ia64_intri_res; \
461 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
465 #define ia64_get_pkr(index) \
467 __u64 ia64_intri_res; \
468 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
472 #define ia64_get_pmc(index) \
474 __u64 ia64_intri_res; \
475 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
480 #define ia64_native_get_pmd(index) \
482 __u64 ia64_intri_res; \
483 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
487 #define ia64_native_get_rr(index) \
489 __u64 ia64_intri_res; \
490 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
494 #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
497 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
499 #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
500 #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
501 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
502 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
504 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
506 #define ia64_native_ptcga(addr, size) \
508 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
509 ia64_dv_serialize_data(); \
512 #define ia64_ptcl(addr, size) \
514 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
515 ia64_dv_serialize_data(); \
518 #define ia64_ptri(addr, size) \
519 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
521 #define ia64_ptrd(addr, size) \
522 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
524 #define ia64_ttag(addr) \
526 __u64 ia64_intri_res; \
527 asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
532 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
534 #define ia64_lfhint_none 0
535 #define ia64_lfhint_nt1 1
536 #define ia64_lfhint_nt2 2
537 #define ia64_lfhint_nta 3
539 #define ia64_lfetch(lfhint, y) \
542 case ia64_lfhint_none: \
543 asm volatile ("lfetch [%0]" : : "r"(y)); \
545 case ia64_lfhint_nt1: \
546 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
548 case ia64_lfhint_nt2: \
549 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
551 case ia64_lfhint_nta: \
552 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
557 #define ia64_lfetch_excl(lfhint, y) \
560 case ia64_lfhint_none: \
561 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
563 case ia64_lfhint_nt1: \
564 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
566 case ia64_lfhint_nt2: \
567 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
569 case ia64_lfhint_nta: \
570 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
575 #define ia64_lfetch_fault(lfhint, y) \
578 case ia64_lfhint_none: \
579 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
581 case ia64_lfhint_nt1: \
582 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
584 case ia64_lfhint_nt2: \
585 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
587 case ia64_lfhint_nta: \
588 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
593 #define ia64_lfetch_fault_excl(lfhint, y) \
596 case ia64_lfhint_none: \
597 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
599 case ia64_lfhint_nt1: \
600 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
602 case ia64_lfhint_nt2: \
603 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
605 case ia64_lfhint_nta: \
606 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
611 #define ia64_native_intrin_local_irq_restore(x) \
613 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
617 :: "r"((x)) : "p6", "p7", "memory"); \
620 #endif /* _ASM_IA64_GCC_INTRIN_H */