switch_amd64_unix.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. /*
  2. * this is the internal transfer function.
  3. *
  4. * HISTORY
  5. * 3-May-13 Ralf Schmitt <ralf@systemexit.de>
  6. * Add support for strange GCC caller-save decisions
  7. * (ported from switch_aarch64_gcc.h)
  8. * 18-Aug-11 Alexey Borzenkov <snaury@gmail.com>
  9. * Correctly save rbp, csr and cw
  10. * 01-Apr-04 Hye-Shik Chang <perky@FreeBSD.org>
  11. * Ported from i386 to amd64.
  12. * 24-Nov-02 Christian Tismer <tismer@tismer.com>
  13. * needed to add another magic constant to insure
  14. * that f in slp_eval_frame(PyFrameObject *f)
  15. * STACK_REFPLUS will probably be 1 in most cases.
  16. * gets included into the saved stack area.
  17. * 17-Sep-02 Christian Tismer <tismer@tismer.com>
  18. * after virtualizing stack save/restore, the
  19. * stack size shrunk a bit. Needed to introduce
  20. * an adjustment STACK_MAGIC per platform.
  21. * 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
  22. * slightly changed framework for spark
  23. * 31-Avr-02 Armin Rigo <arigo@ulb.ac.be>
  24. * Added ebx, esi and edi register-saves.
  25. * 01-Mar-02 Samual M. Rushing <rushing@ironport.com>
  26. * Ported from i386.
  27. */
  28. #define STACK_REFPLUS 1
  29. #ifdef SLP_EVAL
  30. /* #define STACK_MAGIC 3 */
  31. /* the above works fine with gcc 2.96, but 2.95.3 wants this */
  32. #define STACK_MAGIC 0
  33. #define REGS_TO_SAVE "r12", "r13", "r14", "r15"
  34. static int
  35. slp_switch(void)
  36. {
  37. int err;
  38. void* rbp;
  39. void* rbx;
  40. unsigned int csr;
  41. unsigned short cw;
  42. register long *stackref, stsizediff;
  43. __asm__ volatile ("" : : : REGS_TO_SAVE);
  44. __asm__ volatile ("fstcw %0" : "=m" (cw));
  45. __asm__ volatile ("stmxcsr %0" : "=m" (csr));
  46. __asm__ volatile ("movq %%rbp, %0" : "=m" (rbp));
  47. __asm__ volatile ("movq %%rbx, %0" : "=m" (rbx));
  48. __asm__ ("movq %%rsp, %0" : "=g" (stackref));
  49. {
  50. SLP_SAVE_STATE(stackref, stsizediff);
  51. __asm__ volatile (
  52. "addq %0, %%rsp\n"
  53. "addq %0, %%rbp\n"
  54. :
  55. : "r" (stsizediff)
  56. );
  57. SLP_RESTORE_STATE();
  58. __asm__ volatile ("xorq %%rax, %%rax" : "=a" (err));
  59. }
  60. __asm__ volatile ("movq %0, %%rbx" : : "m" (rbx));
  61. __asm__ volatile ("movq %0, %%rbp" : : "m" (rbp));
  62. __asm__ volatile ("ldmxcsr %0" : : "m" (csr));
  63. __asm__ volatile ("fldcw %0" : : "m" (cw));
  64. __asm__ volatile ("" : : : REGS_TO_SAVE);
  65. return err;
  66. }
  67. #endif
  68. /*
  69. * further self-processing support
  70. */
  71. /*
  72. * if you want to add self-inspection tools, place them
  73. * here. See the x86_msvc for the necessary defines.
  74. * These features are highly experimental und not
  75. * essential yet.
  76. */