stub_64.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /*
  2. * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
  3. * Licensed under the GPL
  4. */
  5. #ifndef __SYSDEP_STUB_H
  6. #define __SYSDEP_STUB_H
  7. #include <sysdep/ptrace_user.h>
  8. #define STUB_SYSCALL_RET PT_INDEX(RAX)
  9. #define STUB_MMAP_NR __NR_mmap
  10. #define MMAP_OFFSET(o) (o)
  11. #define __syscall_clobber "r11","rcx","memory"
  12. #define __syscall "syscall"
  13. static inline long stub_syscall0(long syscall)
  14. {
  15. long ret;
  16. __asm__ volatile (__syscall
  17. : "=a" (ret)
  18. : "0" (syscall) : __syscall_clobber );
  19. return ret;
  20. }
  21. static inline long stub_syscall2(long syscall, long arg1, long arg2)
  22. {
  23. long ret;
  24. __asm__ volatile (__syscall
  25. : "=a" (ret)
  26. : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
  27. return ret;
  28. }
  29. static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
  30. {
  31. long ret;
  32. __asm__ volatile (__syscall
  33. : "=a" (ret)
  34. : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
  35. : __syscall_clobber );
  36. return ret;
  37. }
  38. static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
  39. long arg4)
  40. {
  41. long ret;
  42. __asm__ volatile ("movq %5,%%r10 ; " __syscall
  43. : "=a" (ret)
  44. : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
  45. "g" (arg4)
  46. : __syscall_clobber, "r10" );
  47. return ret;
  48. }
  49. static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
  50. long arg4, long arg5)
  51. {
  52. long ret;
  53. __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
  54. : "=a" (ret)
  55. : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
  56. "g" (arg4), "g" (arg5)
  57. : __syscall_clobber, "r10", "r8" );
  58. return ret;
  59. }
  60. static inline void trap_myself(void)
  61. {
  62. __asm("int3");
  63. }
  64. static inline void remap_stack(long fd, unsigned long offset)
  65. {
  66. __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
  67. "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
  68. "movq %%rax, (%%rbx)":
  69. : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
  70. "S" (UM_KERN_PAGE_SIZE),
  71. "d" (PROT_READ | PROT_WRITE),
  72. "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
  73. "g" (offset),
  74. "i" (&((struct stub_data *) STUB_DATA)->err)
  75. : __syscall_clobber, "r10", "r8", "r9" );
  76. }
  77. #endif