cpcmd.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2007
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  5. * Christian Borntraeger (cborntra@de.ibm.com),
  6. */
  7. #define KMSG_COMPONENT "cpcmd"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/stddef.h>
  14. #include <linux/string.h>
  15. #include <asm/diag.h>
  16. #include <asm/ebcdic.h>
  17. #include <asm/cpcmd.h>
  18. #include <asm/io.h>
  19. static DEFINE_SPINLOCK(cpcmd_lock);
  20. static char cpcmd_buf[241];
  21. static int diag8_noresponse(int cmdlen)
  22. {
  23. register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
  24. register unsigned long reg3 asm ("3") = cmdlen;
  25. asm volatile(
  26. " sam31\n"
  27. " diag %1,%0,0x8\n"
  28. " sam64\n"
  29. : "+d" (reg3) : "d" (reg2) : "cc");
  30. return reg3;
  31. }
  32. static int diag8_response(int cmdlen, char *response, int *rlen)
  33. {
  34. register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
  35. register unsigned long reg3 asm ("3") = (addr_t) response;
  36. register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
  37. register unsigned long reg5 asm ("5") = *rlen;
  38. asm volatile(
  39. " sam31\n"
  40. " diag %2,%0,0x8\n"
  41. " sam64\n"
  42. " brc 8,1f\n"
  43. " agr %1,%4\n"
  44. "1:\n"
  45. : "+d" (reg4), "+d" (reg5)
  46. : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
  47. *rlen = reg5;
  48. return reg4;
  49. }
  50. /*
  51. * __cpcmd has some restrictions over cpcmd
  52. * - the response buffer must reside below 2GB (if any)
  53. * - __cpcmd is unlocked and therefore not SMP-safe
  54. */
  55. int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  56. {
  57. int cmdlen;
  58. int rc;
  59. int response_len;
  60. cmdlen = strlen(cmd);
  61. BUG_ON(cmdlen > 240);
  62. memcpy(cpcmd_buf, cmd, cmdlen);
  63. ASCEBC(cpcmd_buf, cmdlen);
  64. diag_stat_inc(DIAG_STAT_X008);
  65. if (response) {
  66. memset(response, 0, rlen);
  67. response_len = rlen;
  68. rc = diag8_response(cmdlen, response, &rlen);
  69. EBCASC(response, response_len);
  70. } else {
  71. rc = diag8_noresponse(cmdlen);
  72. }
  73. if (response_code)
  74. *response_code = rc;
  75. return rlen;
  76. }
  77. EXPORT_SYMBOL(__cpcmd);
  78. int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  79. {
  80. char *lowbuf;
  81. int len;
  82. unsigned long flags;
  83. if ((virt_to_phys(response) != (unsigned long) response) ||
  84. (((unsigned long)response + rlen) >> 31)) {
  85. lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
  86. if (!lowbuf) {
  87. pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
  88. return -ENOMEM;
  89. }
  90. spin_lock_irqsave(&cpcmd_lock, flags);
  91. len = __cpcmd(cmd, lowbuf, rlen, response_code);
  92. spin_unlock_irqrestore(&cpcmd_lock, flags);
  93. memcpy(response, lowbuf, rlen);
  94. kfree(lowbuf);
  95. } else {
  96. spin_lock_irqsave(&cpcmd_lock, flags);
  97. len = __cpcmd(cmd, response, rlen, response_code);
  98. spin_unlock_irqrestore(&cpcmd_lock, flags);
  99. }
  100. return len;
  101. }
  102. EXPORT_SYMBOL(cpcmd);