parport_ip32.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251
  1. /* Low-level parallel port routines for built-in port on SGI IP32
  2. *
  3. * Author: Arnaud Giersch <arnaud.giersch@free.fr>
  4. *
  5. * Based on parport_pc.c by
  6. * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
  7. * Andrea Arcangeli, et al.
  8. *
  9. * Thanks to Ilya A. Volynets-Evenbakh for his help.
  10. *
  11. * Copyright (C) 2005, 2006 Arnaud Giersch.
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the Free
  15. * Software Foundation; either version 2 of the License, or (at your option)
  16. * any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful, but WITHOUT
  19. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  20. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  21. * more details.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc., 59
  25. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. */
  27. /* Current status:
  28. *
  29. * Basic SPP and PS2 modes are supported.
  30. * Support for parallel port IRQ is present.
  31. * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
  32. * supported.
  33. * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
  34. * or without interrupt support.
  35. *
  36. * Hardware ECP mode is not fully implemented (ecp_read_data and
  37. * ecp_write_addr are actually missing).
  38. *
  39. * To do:
  40. *
  41. * Fully implement ECP mode.
  42. * EPP and ECP mode need to be tested. I currently do not own any
  43. * peripheral supporting these extended mode, and cannot test them.
  44. * If DMA mode works well, decide if support for PIO FIFO modes should be
  45. * dropped.
  46. * Use the io{read,write} family functions when they become available in
  47. * the linux-mips.org tree. Note: the MIPS specific functions readsb()
  48. * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
  49. * respectively.
  50. */
  51. /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
  52. * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
  53. * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
  54. * FIFO buffer and supports DMA transfers.
  55. *
  56. * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
  57. *
  58. * Theoretically, we could simply use the parport_pc module. It is however
  59. * not so simple. The parport_pc code assumes that the parallel port
  60. * registers are port-mapped. On the O2, they are memory-mapped.
  61. * Furthermore, each register is replicated on 256 consecutive addresses (as
  62. * it is for the built-in serial ports on the same chip).
  63. */
  64. /*--- Some configuration defines ---------------------------------------*/
  65. /* DEBUG_PARPORT_IP32
  66. * 0 disable debug
  67. * 1 standard level: pr_debug1 is enabled
  68. * 2 parport_ip32_dump_state is enabled
  69. * >=3 verbose level: pr_debug is enabled
  70. */
  71. #if !defined(DEBUG_PARPORT_IP32)
  72. # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
  73. #endif
  74. /*----------------------------------------------------------------------*/
  75. /* Setup DEBUG macros. This is done before any includes, just in case we
  76. * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
  77. */
  78. #if DEBUG_PARPORT_IP32 == 1
  79. # warning DEBUG_PARPORT_IP32 == 1
  80. #elif DEBUG_PARPORT_IP32 == 2
  81. # warning DEBUG_PARPORT_IP32 == 2
  82. #elif DEBUG_PARPORT_IP32 >= 3
  83. # warning DEBUG_PARPORT_IP32 >= 3
  84. # if !defined(DEBUG)
  85. # define DEBUG /* enable pr_debug() in kernel.h */
  86. # endif
  87. #endif
  88. #include <linux/completion.h>
  89. #include <linux/delay.h>
  90. #include <linux/dma-mapping.h>
  91. #include <linux/err.h>
  92. #include <linux/init.h>
  93. #include <linux/interrupt.h>
  94. #include <linux/jiffies.h>
  95. #include <linux/kernel.h>
  96. #include <linux/module.h>
  97. #include <linux/parport.h>
  98. #include <linux/sched.h>
  99. #include <linux/slab.h>
  100. #include <linux/spinlock.h>
  101. #include <linux/stddef.h>
  102. #include <linux/types.h>
  103. #include <asm/io.h>
  104. #include <asm/ip32/ip32_ints.h>
  105. #include <asm/ip32/mace.h>
  106. /*--- Global variables -------------------------------------------------*/
  107. /* Verbose probing on by default for debugging. */
  108. #if DEBUG_PARPORT_IP32 >= 1
  109. # define DEFAULT_VERBOSE_PROBING 1
  110. #else
  111. # define DEFAULT_VERBOSE_PROBING 0
  112. #endif
  113. /* Default prefix for printk */
  114. #define PPIP32 "parport_ip32: "
  115. /*
  116. * These are the module parameters:
  117. * @features: bit mask of features to enable/disable
  118. * (all enabled by default)
  119. * @verbose_probing: log chit-chat during initialization
  120. */
  121. #define PARPORT_IP32_ENABLE_IRQ (1U << 0)
  122. #define PARPORT_IP32_ENABLE_DMA (1U << 1)
  123. #define PARPORT_IP32_ENABLE_SPP (1U << 2)
  124. #define PARPORT_IP32_ENABLE_EPP (1U << 3)
  125. #define PARPORT_IP32_ENABLE_ECP (1U << 4)
  126. static unsigned int features = ~0U;
  127. static int verbose_probing = DEFAULT_VERBOSE_PROBING;
  128. /* We do not support more than one port. */
  129. static struct parport *this_port = NULL;
  130. /* Timing constants for FIFO modes. */
  131. #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
  132. #define FIFO_POLLING_INTERVAL 50 /* microseconds */
  133. /*--- I/O register definitions -----------------------------------------*/
  134. /**
  135. * struct parport_ip32_regs - virtual addresses of parallel port registers
  136. * @data: Data Register
  137. * @dsr: Device Status Register
  138. * @dcr: Device Control Register
  139. * @eppAddr: EPP Address Register
  140. * @eppData0: EPP Data Register 0
  141. * @eppData1: EPP Data Register 1
  142. * @eppData2: EPP Data Register 2
  143. * @eppData3: EPP Data Register 3
  144. * @ecpAFifo: ECP Address FIFO
  145. * @fifo: General FIFO register. The same address is used for:
  146. * - cFifo, the Parallel Port DATA FIFO
  147. * - ecpDFifo, the ECP Data FIFO
  148. * - tFifo, the ECP Test FIFO
  149. * @cnfgA: Configuration Register A
  150. * @cnfgB: Configuration Register B
  151. * @ecr: Extended Control Register
  152. */
  153. struct parport_ip32_regs {
  154. void __iomem *data;
  155. void __iomem *dsr;
  156. void __iomem *dcr;
  157. void __iomem *eppAddr;
  158. void __iomem *eppData0;
  159. void __iomem *eppData1;
  160. void __iomem *eppData2;
  161. void __iomem *eppData3;
  162. void __iomem *ecpAFifo;
  163. void __iomem *fifo;
  164. void __iomem *cnfgA;
  165. void __iomem *cnfgB;
  166. void __iomem *ecr;
  167. };
  168. /* Device Status Register */
  169. #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
  170. #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
  171. #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
  172. #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
  173. #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
  174. #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
  175. /* #define DSR_reserved (1U << 1) */
  176. #define DSR_TIMEOUT (1U << 0) /* EPP timeout */
  177. /* Device Control Register */
  178. /* #define DCR_reserved (1U << 7) | (1U << 6) */
  179. #define DCR_DIR (1U << 5) /* direction */
  180. #define DCR_IRQ (1U << 4) /* interrupt on nAck */
  181. #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
  182. #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
  183. #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
  184. #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
  185. /* ECP Configuration Register A */
  186. #define CNFGA_IRQ (1U << 7)
  187. #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
  188. #define CNFGA_ID_SHIFT 4
  189. #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
  190. #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
  191. #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
  192. /* #define CNFGA_reserved (1U << 3) */
  193. #define CNFGA_nBYTEINTRANS (1U << 2)
  194. #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
  195. /* ECP Configuration Register B */
  196. #define CNFGB_COMPRESS (1U << 7)
  197. #define CNFGB_INTRVAL (1U << 6)
  198. #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
  199. #define CNFGB_IRQ_SHIFT 3
  200. #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
  201. #define CNFGB_DMA_SHIFT 0
  202. /* Extended Control Register */
  203. #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
  204. #define ECR_MODE_SHIFT 5
  205. #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
  206. #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
  207. #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
  208. #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
  209. #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
  210. /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
  211. #define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
  212. #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
  213. #define ECR_nERRINTR (1U << 4)
  214. #define ECR_DMAEN (1U << 3)
  215. #define ECR_SERVINTR (1U << 2)
  216. #define ECR_F_FULL (1U << 1)
  217. #define ECR_F_EMPTY (1U << 0)
  218. /*--- Private data -----------------------------------------------------*/
  219. /**
  220. * enum parport_ip32_irq_mode - operation mode of interrupt handler
  221. * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
  222. * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
  223. */
  224. enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
  225. /**
  226. * struct parport_ip32_private - private stuff for &struct parport
  227. * @regs: register addresses
  228. * @dcr_cache: cached contents of DCR
  229. * @dcr_writable: bit mask of writable DCR bits
  230. * @pword: number of bytes per PWord
  231. * @fifo_depth: number of PWords that FIFO will hold
  232. * @readIntrThreshold: minimum number of PWords we can read
  233. * if we get an interrupt
  234. * @writeIntrThreshold: minimum number of PWords we can write
  235. * if we get an interrupt
  236. * @irq_mode: operation mode of interrupt handler for this port
  237. * @irq_complete: mutex used to wait for an interrupt to occur
  238. */
  239. struct parport_ip32_private {
  240. struct parport_ip32_regs regs;
  241. unsigned int dcr_cache;
  242. unsigned int dcr_writable;
  243. unsigned int pword;
  244. unsigned int fifo_depth;
  245. unsigned int readIntrThreshold;
  246. unsigned int writeIntrThreshold;
  247. enum parport_ip32_irq_mode irq_mode;
  248. struct completion irq_complete;
  249. };
  250. /*--- Debug code -------------------------------------------------------*/
  251. /*
  252. * pr_debug1 - print debug messages
  253. *
  254. * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
  255. */
  256. #if DEBUG_PARPORT_IP32 >= 1
  257. # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
  258. #else /* DEBUG_PARPORT_IP32 < 1 */
  259. # define pr_debug1(...) do { } while (0)
  260. #endif
  261. /*
  262. * pr_trace, pr_trace1 - trace function calls
  263. * @p: pointer to &struct parport
  264. * @fmt: printk format string
  265. * @...: parameters for format string
  266. *
  267. * Macros used to trace function calls. The given string is formatted after
  268. * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
  269. * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
  270. * directly.
  271. */
  272. #define __pr_trace(pr, p, fmt, ...) \
  273. pr("%s: %s" fmt "\n", \
  274. ({ const struct parport *__p = (p); \
  275. __p ? __p->name : "parport_ip32"; }), \
  276. __func__ , ##__VA_ARGS__)
  277. #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
  278. #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
  279. /*
  280. * __pr_probe, pr_probe - print message if @verbose_probing is true
  281. * @p: pointer to &struct parport
  282. * @fmt: printk format string
  283. * @...: parameters for format string
  284. *
  285. * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
  286. */
  287. #define __pr_probe(...) \
  288. do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
  289. #define pr_probe(p, fmt, ...) \
  290. __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
  291. /*
  292. * parport_ip32_dump_state - print register status of parport
  293. * @p: pointer to &struct parport
  294. * @str: string to add in message
  295. * @show_ecp_config: shall we dump ECP configuration registers too?
  296. *
  297. * This function is only here for debugging purpose, and should be used with
  298. * care. Reading the parallel port registers may have undesired side effects.
  299. * Especially if @show_ecp_config is true, the parallel port is resetted.
  300. * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
  301. */
  302. #if DEBUG_PARPORT_IP32 >= 2
  303. static void parport_ip32_dump_state(struct parport *p, char *str,
  304. unsigned int show_ecp_config)
  305. {
  306. struct parport_ip32_private * const priv = p->physport->private_data;
  307. unsigned int i;
  308. printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
  309. {
  310. static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
  311. "ECP", "EPP", "???",
  312. "TST", "CFG"};
  313. unsigned int ecr = readb(priv->regs.ecr);
  314. printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
  315. printk(" %s",
  316. ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
  317. if (ecr & ECR_nERRINTR)
  318. printk(",nErrIntrEn");
  319. if (ecr & ECR_DMAEN)
  320. printk(",dmaEn");
  321. if (ecr & ECR_SERVINTR)
  322. printk(",serviceIntr");
  323. if (ecr & ECR_F_FULL)
  324. printk(",f_full");
  325. if (ecr & ECR_F_EMPTY)
  326. printk(",f_empty");
  327. printk("\n");
  328. }
  329. if (show_ecp_config) {
  330. unsigned int oecr, cnfgA, cnfgB;
  331. oecr = readb(priv->regs.ecr);
  332. writeb(ECR_MODE_PS2, priv->regs.ecr);
  333. writeb(ECR_MODE_CFG, priv->regs.ecr);
  334. cnfgA = readb(priv->regs.cnfgA);
  335. cnfgB = readb(priv->regs.cnfgB);
  336. writeb(ECR_MODE_PS2, priv->regs.ecr);
  337. writeb(oecr, priv->regs.ecr);
  338. printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
  339. printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
  340. switch (cnfgA & CNFGA_ID_MASK) {
  341. case CNFGA_ID_8:
  342. printk(",8 bits");
  343. break;
  344. case CNFGA_ID_16:
  345. printk(",16 bits");
  346. break;
  347. case CNFGA_ID_32:
  348. printk(",32 bits");
  349. break;
  350. default:
  351. printk(",unknown ID");
  352. break;
  353. }
  354. if (!(cnfgA & CNFGA_nBYTEINTRANS))
  355. printk(",ByteInTrans");
  356. if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
  357. printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
  358. ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
  359. printk("\n");
  360. printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
  361. printk(" irq=%u,dma=%u",
  362. (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
  363. (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
  364. printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
  365. if (cnfgB & CNFGB_COMPRESS)
  366. printk(",compress");
  367. printk("\n");
  368. }
  369. for (i = 0; i < 2; i++) {
  370. unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
  371. printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
  372. i ? "soft" : "hard", dcr);
  373. printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
  374. if (dcr & DCR_IRQ)
  375. printk(",ackIntEn");
  376. if (!(dcr & DCR_SELECT))
  377. printk(",nSelectIn");
  378. if (dcr & DCR_nINIT)
  379. printk(",nInit");
  380. if (!(dcr & DCR_AUTOFD))
  381. printk(",nAutoFD");
  382. if (!(dcr & DCR_STROBE))
  383. printk(",nStrobe");
  384. printk("\n");
  385. }
  386. #define sep (f++ ? ',' : ' ')
  387. {
  388. unsigned int f = 0;
  389. unsigned int dsr = readb(priv->regs.dsr);
  390. printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
  391. if (!(dsr & DSR_nBUSY))
  392. printk("%cBusy", sep);
  393. if (dsr & DSR_nACK)
  394. printk("%cnAck", sep);
  395. if (dsr & DSR_PERROR)
  396. printk("%cPError", sep);
  397. if (dsr & DSR_SELECT)
  398. printk("%cSelect", sep);
  399. if (dsr & DSR_nFAULT)
  400. printk("%cnFault", sep);
  401. if (!(dsr & DSR_nPRINT))
  402. printk("%c(Print)", sep);
  403. if (dsr & DSR_TIMEOUT)
  404. printk("%cTimeout", sep);
  405. printk("\n");
  406. }
  407. #undef sep
  408. }
  409. #else /* DEBUG_PARPORT_IP32 < 2 */
  410. #define parport_ip32_dump_state(...) do { } while (0)
  411. #endif
  412. /*
  413. * CHECK_EXTRA_BITS - track and log extra bits
  414. * @p: pointer to &struct parport
  415. * @b: byte to inspect
  416. * @m: bit mask of authorized bits
  417. *
  418. * This is used to track and log extra bits that should not be there in
  419. * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
  420. * defined if %DEBUG_PARPORT_IP32 >= 1.
  421. */
  422. #if DEBUG_PARPORT_IP32 >= 1
  423. #define CHECK_EXTRA_BITS(p, b, m) \
  424. do { \
  425. unsigned int __b = (b), __m = (m); \
  426. if (__b & ~__m) \
  427. pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
  428. "0x%02x/0x%02x\n", \
  429. (p)->name, __func__, #b, __b, __m); \
  430. } while (0)
  431. #else /* DEBUG_PARPORT_IP32 < 1 */
  432. #define CHECK_EXTRA_BITS(...) do { } while (0)
  433. #endif
  434. /*--- IP32 parallel port DMA operations --------------------------------*/
  435. /**
  436. * struct parport_ip32_dma_data - private data needed for DMA operation
  437. * @dir: DMA direction (from or to device)
  438. * @buf: buffer physical address
  439. * @len: buffer length
  440. * @next: address of next bytes to DMA transfer
  441. * @left: number of bytes remaining
  442. * @ctx: next context to write (0: context_a; 1: context_b)
  443. * @irq_on: are the DMA IRQs currently enabled?
  444. * @lock: spinlock to protect access to the structure
  445. */
  446. struct parport_ip32_dma_data {
  447. enum dma_data_direction dir;
  448. dma_addr_t buf;
  449. dma_addr_t next;
  450. size_t len;
  451. size_t left;
  452. unsigned int ctx;
  453. unsigned int irq_on;
  454. spinlock_t lock;
  455. };
  456. static struct parport_ip32_dma_data parport_ip32_dma;
  457. /**
  458. * parport_ip32_dma_setup_context - setup next DMA context
  459. * @limit: maximum data size for the context
  460. *
  461. * The alignment constraints must be verified in caller function, and the
  462. * parameter @limit must be set accordingly.
  463. */
  464. static void parport_ip32_dma_setup_context(unsigned int limit)
  465. {
  466. unsigned long flags;
  467. spin_lock_irqsave(&parport_ip32_dma.lock, flags);
  468. if (parport_ip32_dma.left > 0) {
  469. /* Note: ctxreg is "volatile" here only because
  470. * mace->perif.ctrl.parport.context_a and context_b are
  471. * "volatile". */
  472. volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
  473. &mace->perif.ctrl.parport.context_a :
  474. &mace->perif.ctrl.parport.context_b;
  475. u64 count;
  476. u64 ctxval;
  477. if (parport_ip32_dma.left <= limit) {
  478. count = parport_ip32_dma.left;
  479. ctxval = MACEPAR_CONTEXT_LASTFLAG;
  480. } else {
  481. count = limit;
  482. ctxval = 0;
  483. }
  484. pr_trace(NULL,
  485. "(%u): 0x%04x:0x%04x, %u -> %u%s",
  486. limit,
  487. (unsigned int)parport_ip32_dma.buf,
  488. (unsigned int)parport_ip32_dma.next,
  489. (unsigned int)count,
  490. parport_ip32_dma.ctx, ctxval ? "*" : "");
  491. ctxval |= parport_ip32_dma.next &
  492. MACEPAR_CONTEXT_BASEADDR_MASK;
  493. ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
  494. MACEPAR_CONTEXT_DATALEN_MASK;
  495. writeq(ctxval, ctxreg);
  496. parport_ip32_dma.next += count;
  497. parport_ip32_dma.left -= count;
  498. parport_ip32_dma.ctx ^= 1U;
  499. }
  500. /* If there is nothing more to send, disable IRQs to avoid to
  501. * face an IRQ storm which can lock the machine. Disable them
  502. * only once. */
  503. if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
  504. pr_debug(PPIP32 "IRQ off (ctx)\n");
  505. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  506. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  507. parport_ip32_dma.irq_on = 0;
  508. }
  509. spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
  510. }
  511. /**
  512. * parport_ip32_dma_interrupt - DMA interrupt handler
  513. * @irq: interrupt number
  514. * @dev_id: unused
  515. */
  516. static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
  517. {
  518. if (parport_ip32_dma.left)
  519. pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
  520. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  521. return IRQ_HANDLED;
  522. }
  523. #if DEBUG_PARPORT_IP32
  524. static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
  525. {
  526. pr_trace1(NULL, "(%d)", irq);
  527. return IRQ_HANDLED;
  528. }
  529. #endif
  530. /**
  531. * parport_ip32_dma_start - begins a DMA transfer
  532. * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
  533. * @addr: pointer to data buffer
  534. * @count: buffer size
  535. *
  536. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  537. * correctly balanced.
  538. */
  539. static int parport_ip32_dma_start(enum dma_data_direction dir,
  540. void *addr, size_t count)
  541. {
  542. unsigned int limit;
  543. u64 ctrl;
  544. pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
  545. /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
  546. * be 64 bytes aligned. */
  547. BUG_ON(dir != DMA_TO_DEVICE);
  548. /* Reset DMA controller */
  549. ctrl = MACEPAR_CTLSTAT_RESET;
  550. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  551. /* DMA IRQs should normally be enabled */
  552. if (!parport_ip32_dma.irq_on) {
  553. WARN_ON(1);
  554. enable_irq(MACEISA_PAR_CTXA_IRQ);
  555. enable_irq(MACEISA_PAR_CTXB_IRQ);
  556. parport_ip32_dma.irq_on = 1;
  557. }
  558. /* Prepare DMA pointers */
  559. parport_ip32_dma.dir = dir;
  560. parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
  561. parport_ip32_dma.len = count;
  562. parport_ip32_dma.next = parport_ip32_dma.buf;
  563. parport_ip32_dma.left = parport_ip32_dma.len;
  564. parport_ip32_dma.ctx = 0;
  565. /* Setup DMA direction and first two contexts */
  566. ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
  567. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  568. /* Single transfer should not cross a 4K page boundary */
  569. limit = MACEPAR_CONTEXT_DATA_BOUND -
  570. (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
  571. parport_ip32_dma_setup_context(limit);
  572. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  573. /* Real start of DMA transfer */
  574. ctrl |= MACEPAR_CTLSTAT_ENABLE;
  575. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  576. return 0;
  577. }
  578. /**
  579. * parport_ip32_dma_stop - ends a running DMA transfer
  580. *
  581. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  582. * correctly balanced.
  583. */
  584. static void parport_ip32_dma_stop(void)
  585. {
  586. u64 ctx_a;
  587. u64 ctx_b;
  588. u64 ctrl;
  589. u64 diag;
  590. size_t res[2]; /* {[0] = res_a, [1] = res_b} */
  591. pr_trace(NULL, "()");
  592. /* Disable IRQs */
  593. spin_lock_irq(&parport_ip32_dma.lock);
  594. if (parport_ip32_dma.irq_on) {
  595. pr_debug(PPIP32 "IRQ off (stop)\n");
  596. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  597. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  598. parport_ip32_dma.irq_on = 0;
  599. }
  600. spin_unlock_irq(&parport_ip32_dma.lock);
  601. /* Force IRQ synchronization, even if the IRQs were disabled
  602. * elsewhere. */
  603. synchronize_irq(MACEISA_PAR_CTXA_IRQ);
  604. synchronize_irq(MACEISA_PAR_CTXB_IRQ);
  605. /* Stop DMA transfer */
  606. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  607. ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
  608. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  609. /* Adjust residue (parport_ip32_dma.left) */
  610. ctx_a = readq(&mace->perif.ctrl.parport.context_a);
  611. ctx_b = readq(&mace->perif.ctrl.parport.context_b);
  612. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  613. diag = readq(&mace->perif.ctrl.parport.diagnostic);
  614. res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
  615. 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
  616. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  617. 0;
  618. res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
  619. 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
  620. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  621. 0;
  622. if (diag & MACEPAR_DIAG_DMACTIVE)
  623. res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
  624. 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
  625. MACEPAR_DIAG_CTRSHIFT);
  626. parport_ip32_dma.left += res[0] + res[1];
  627. /* Reset DMA controller, and re-enable IRQs */
  628. ctrl = MACEPAR_CTLSTAT_RESET;
  629. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  630. pr_debug(PPIP32 "IRQ on (stop)\n");
  631. enable_irq(MACEISA_PAR_CTXA_IRQ);
  632. enable_irq(MACEISA_PAR_CTXB_IRQ);
  633. parport_ip32_dma.irq_on = 1;
  634. dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
  635. parport_ip32_dma.dir);
  636. }
  637. /**
  638. * parport_ip32_dma_get_residue - get residue from last DMA transfer
  639. *
  640. * Returns the number of bytes remaining from last DMA transfer.
  641. */
  642. static inline size_t parport_ip32_dma_get_residue(void)
  643. {
  644. return parport_ip32_dma.left;
  645. }
  646. /**
  647. * parport_ip32_dma_register - initialize DMA engine
  648. *
  649. * Returns zero for success.
  650. */
  651. static int parport_ip32_dma_register(void)
  652. {
  653. int err;
  654. spin_lock_init(&parport_ip32_dma.lock);
  655. parport_ip32_dma.irq_on = 1;
  656. /* Reset DMA controller */
  657. writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
  658. /* Request IRQs */
  659. err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
  660. 0, "parport_ip32", NULL);
  661. if (err)
  662. goto fail_a;
  663. err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
  664. 0, "parport_ip32", NULL);
  665. if (err)
  666. goto fail_b;
  667. #if DEBUG_PARPORT_IP32
  668. /* FIXME - what is this IRQ for? */
  669. err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
  670. 0, "parport_ip32", NULL);
  671. if (err)
  672. goto fail_merr;
  673. #endif
  674. return 0;
  675. #if DEBUG_PARPORT_IP32
  676. fail_merr:
  677. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  678. #endif
  679. fail_b:
  680. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  681. fail_a:
  682. return err;
  683. }
  684. /**
  685. * parport_ip32_dma_unregister - release and free resources for DMA engine
  686. */
  687. static void parport_ip32_dma_unregister(void)
  688. {
  689. #if DEBUG_PARPORT_IP32
  690. free_irq(MACEISA_PAR_MERR_IRQ, NULL);
  691. #endif
  692. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  693. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  694. }
  695. /*--- Interrupt handlers and associates --------------------------------*/
  696. /**
  697. * parport_ip32_wakeup - wakes up code waiting for an interrupt
  698. * @p: pointer to &struct parport
  699. */
  700. static inline void parport_ip32_wakeup(struct parport *p)
  701. {
  702. struct parport_ip32_private * const priv = p->physport->private_data;
  703. complete(&priv->irq_complete);
  704. }
  705. /**
  706. * parport_ip32_interrupt - interrupt handler
  707. * @irq: interrupt number
  708. * @dev_id: pointer to &struct parport
  709. *
  710. * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
  711. * %PARPORT_IP32_IRQ_FWD.
  712. */
  713. static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
  714. {
  715. struct parport * const p = dev_id;
  716. struct parport_ip32_private * const priv = p->physport->private_data;
  717. enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
  718. switch (irq_mode) {
  719. case PARPORT_IP32_IRQ_FWD:
  720. return parport_irq_handler(irq, dev_id);
  721. case PARPORT_IP32_IRQ_HERE:
  722. parport_ip32_wakeup(p);
  723. break;
  724. }
  725. return IRQ_HANDLED;
  726. }
  727. /*--- Some utility function to manipulate ECR register -----------------*/
  728. /**
  729. * parport_ip32_read_econtrol - read contents of the ECR register
  730. * @p: pointer to &struct parport
  731. */
  732. static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
  733. {
  734. struct parport_ip32_private * const priv = p->physport->private_data;
  735. return readb(priv->regs.ecr);
  736. }
  737. /**
  738. * parport_ip32_write_econtrol - write new contents to the ECR register
  739. * @p: pointer to &struct parport
  740. * @c: new value to write
  741. */
  742. static inline void parport_ip32_write_econtrol(struct parport *p,
  743. unsigned int c)
  744. {
  745. struct parport_ip32_private * const priv = p->physport->private_data;
  746. writeb(c, priv->regs.ecr);
  747. }
  748. /**
  749. * parport_ip32_frob_econtrol - change bits from the ECR register
  750. * @p: pointer to &struct parport
  751. * @mask: bit mask of bits to change
  752. * @val: new value for changed bits
  753. *
  754. * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
  755. * in @val, and write the result to the ECR.
  756. */
  757. static inline void parport_ip32_frob_econtrol(struct parport *p,
  758. unsigned int mask,
  759. unsigned int val)
  760. {
  761. unsigned int c;
  762. c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
  763. parport_ip32_write_econtrol(p, c);
  764. }
  765. /**
  766. * parport_ip32_set_mode - change mode of ECP port
  767. * @p: pointer to &struct parport
  768. * @mode: new mode to write in ECR
  769. *
  770. * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
  771. * mode @mode. Go through PS2 mode if needed.
  772. */
  773. static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
  774. {
  775. unsigned int omode;
  776. mode &= ECR_MODE_MASK;
  777. omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
  778. if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
  779. || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
  780. /* We have to go through PS2 mode */
  781. unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  782. parport_ip32_write_econtrol(p, ecr);
  783. }
  784. parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
  785. }
  786. /*--- Basic functions needed for parport -------------------------------*/
  787. /**
  788. * parport_ip32_read_data - return current contents of the DATA register
  789. * @p: pointer to &struct parport
  790. */
  791. static inline unsigned char parport_ip32_read_data(struct parport *p)
  792. {
  793. struct parport_ip32_private * const priv = p->physport->private_data;
  794. return readb(priv->regs.data);
  795. }
  796. /**
  797. * parport_ip32_write_data - set new contents for the DATA register
  798. * @p: pointer to &struct parport
  799. * @d: new value to write
  800. */
  801. static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
  802. {
  803. struct parport_ip32_private * const priv = p->physport->private_data;
  804. writeb(d, priv->regs.data);
  805. }
  806. /**
  807. * parport_ip32_read_status - return current contents of the DSR register
  808. * @p: pointer to &struct parport
  809. */
  810. static inline unsigned char parport_ip32_read_status(struct parport *p)
  811. {
  812. struct parport_ip32_private * const priv = p->physport->private_data;
  813. return readb(priv->regs.dsr);
  814. }
  815. /**
  816. * __parport_ip32_read_control - return cached contents of the DCR register
  817. * @p: pointer to &struct parport
  818. */
  819. static inline unsigned int __parport_ip32_read_control(struct parport *p)
  820. {
  821. struct parport_ip32_private * const priv = p->physport->private_data;
  822. return priv->dcr_cache; /* use soft copy */
  823. }
  824. /**
  825. * __parport_ip32_write_control - set new contents for the DCR register
  826. * @p: pointer to &struct parport
  827. * @c: new value to write
  828. */
  829. static inline void __parport_ip32_write_control(struct parport *p,
  830. unsigned int c)
  831. {
  832. struct parport_ip32_private * const priv = p->physport->private_data;
  833. CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
  834. c &= priv->dcr_writable; /* only writable bits */
  835. writeb(c, priv->regs.dcr);
  836. priv->dcr_cache = c; /* update soft copy */
  837. }
  838. /**
  839. * __parport_ip32_frob_control - change bits from the DCR register
  840. * @p: pointer to &struct parport
  841. * @mask: bit mask of bits to change
  842. * @val: new value for changed bits
  843. *
  844. * This is equivalent to read from the DCR, mask out the bits in @mask,
  845. * exclusive-or with the bits in @val, and write the result to the DCR.
  846. * Actually, the cached contents of the DCR is used.
  847. */
  848. static inline void __parport_ip32_frob_control(struct parport *p,
  849. unsigned int mask,
  850. unsigned int val)
  851. {
  852. unsigned int c;
  853. c = (__parport_ip32_read_control(p) & ~mask) ^ val;
  854. __parport_ip32_write_control(p, c);
  855. }
  856. /**
  857. * parport_ip32_read_control - return cached contents of the DCR register
  858. * @p: pointer to &struct parport
  859. *
  860. * The return value is masked so as to only return the value of %DCR_STROBE,
  861. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  862. */
  863. static inline unsigned char parport_ip32_read_control(struct parport *p)
  864. {
  865. const unsigned int rm =
  866. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  867. return __parport_ip32_read_control(p) & rm;
  868. }
  869. /**
  870. * parport_ip32_write_control - set new contents for the DCR register
  871. * @p: pointer to &struct parport
  872. * @c: new value to write
  873. *
  874. * The value is masked so as to only change the value of %DCR_STROBE,
  875. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  876. */
  877. static inline void parport_ip32_write_control(struct parport *p,
  878. unsigned char c)
  879. {
  880. const unsigned int wm =
  881. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  882. CHECK_EXTRA_BITS(p, c, wm);
  883. __parport_ip32_frob_control(p, wm, c & wm);
  884. }
  885. /**
  886. * parport_ip32_frob_control - change bits from the DCR register
  887. * @p: pointer to &struct parport
  888. * @mask: bit mask of bits to change
  889. * @val: new value for changed bits
  890. *
  891. * This differs from __parport_ip32_frob_control() in that it only allows to
  892. * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  893. */
  894. static inline unsigned char parport_ip32_frob_control(struct parport *p,
  895. unsigned char mask,
  896. unsigned char val)
  897. {
  898. const unsigned int wm =
  899. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  900. CHECK_EXTRA_BITS(p, mask, wm);
  901. CHECK_EXTRA_BITS(p, val, wm);
  902. __parport_ip32_frob_control(p, mask & wm, val & wm);
  903. return parport_ip32_read_control(p);
  904. }
  905. /**
  906. * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
  907. * @p: pointer to &struct parport
  908. */
  909. static inline void parport_ip32_disable_irq(struct parport *p)
  910. {
  911. __parport_ip32_frob_control(p, DCR_IRQ, 0);
  912. }
  913. /**
  914. * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
  915. * @p: pointer to &struct parport
  916. */
  917. static inline void parport_ip32_enable_irq(struct parport *p)
  918. {
  919. __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
  920. }
  921. /**
  922. * parport_ip32_data_forward - enable host-to-peripheral communications
  923. * @p: pointer to &struct parport
  924. *
  925. * Enable the data line drivers, for 8-bit host-to-peripheral communications.
  926. */
  927. static inline void parport_ip32_data_forward(struct parport *p)
  928. {
  929. __parport_ip32_frob_control(p, DCR_DIR, 0);
  930. }
  931. /**
  932. * parport_ip32_data_reverse - enable peripheral-to-host communications
  933. * @p: pointer to &struct parport
  934. *
  935. * Place the data bus in a high impedance state, if @p->modes has the
  936. * PARPORT_MODE_TRISTATE bit set.
  937. */
  938. static inline void parport_ip32_data_reverse(struct parport *p)
  939. {
  940. __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
  941. }
  942. /**
  943. * parport_ip32_init_state - for core parport code
  944. * @dev: pointer to &struct pardevice
  945. * @s: pointer to &struct parport_state to initialize
  946. */
  947. static void parport_ip32_init_state(struct pardevice *dev,
  948. struct parport_state *s)
  949. {
  950. s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
  951. s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  952. }
  953. /**
  954. * parport_ip32_save_state - for core parport code
  955. * @p: pointer to &struct parport
  956. * @s: pointer to &struct parport_state to save state to
  957. */
  958. static void parport_ip32_save_state(struct parport *p,
  959. struct parport_state *s)
  960. {
  961. s->u.ip32.dcr = __parport_ip32_read_control(p);
  962. s->u.ip32.ecr = parport_ip32_read_econtrol(p);
  963. }
  964. /**
  965. * parport_ip32_restore_state - for core parport code
  966. * @p: pointer to &struct parport
  967. * @s: pointer to &struct parport_state to restore state from
  968. */
  969. static void parport_ip32_restore_state(struct parport *p,
  970. struct parport_state *s)
  971. {
  972. parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
  973. parport_ip32_write_econtrol(p, s->u.ip32.ecr);
  974. __parport_ip32_write_control(p, s->u.ip32.dcr);
  975. }
  976. /*--- EPP mode functions -----------------------------------------------*/
  977. /**
  978. * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
  979. * @p: pointer to &struct parport
  980. *
  981. * Returns 1 if the Timeout bit is clear, and 0 otherwise.
  982. */
  983. static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
  984. {
  985. struct parport_ip32_private * const priv = p->physport->private_data;
  986. unsigned int cleared;
  987. if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
  988. cleared = 1;
  989. else {
  990. unsigned int r;
  991. /* To clear timeout some chips require double read */
  992. parport_ip32_read_status(p);
  993. r = parport_ip32_read_status(p);
  994. /* Some reset by writing 1 */
  995. writeb(r | DSR_TIMEOUT, priv->regs.dsr);
  996. /* Others by writing 0 */
  997. writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
  998. r = parport_ip32_read_status(p);
  999. cleared = !(r & DSR_TIMEOUT);
  1000. }
  1001. pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
  1002. return cleared;
  1003. }
  1004. /**
  1005. * parport_ip32_epp_read - generic EPP read function
  1006. * @eppreg: I/O register to read from
  1007. * @p: pointer to &struct parport
  1008. * @buf: buffer to store read data
  1009. * @len: length of buffer @buf
  1010. * @flags: may be PARPORT_EPP_FAST
  1011. */
  1012. static size_t parport_ip32_epp_read(void __iomem *eppreg,
  1013. struct parport *p, void *buf,
  1014. size_t len, int flags)
  1015. {
  1016. struct parport_ip32_private * const priv = p->physport->private_data;
  1017. size_t got;
  1018. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1019. parport_ip32_data_reverse(p);
  1020. parport_ip32_write_control(p, DCR_nINIT);
  1021. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1022. readsb(eppreg, buf, len);
  1023. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1024. parport_ip32_clear_epp_timeout(p);
  1025. return -EIO;
  1026. }
  1027. got = len;
  1028. } else {
  1029. u8 *bufp = buf;
  1030. for (got = 0; got < len; got++) {
  1031. *bufp++ = readb(eppreg);
  1032. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1033. parport_ip32_clear_epp_timeout(p);
  1034. break;
  1035. }
  1036. }
  1037. }
  1038. parport_ip32_data_forward(p);
  1039. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1040. return got;
  1041. }
  1042. /**
  1043. * parport_ip32_epp_write - generic EPP write function
  1044. * @eppreg: I/O register to write to
  1045. * @p: pointer to &struct parport
  1046. * @buf: buffer of data to write
  1047. * @len: length of buffer @buf
  1048. * @flags: may be PARPORT_EPP_FAST
  1049. */
  1050. static size_t parport_ip32_epp_write(void __iomem *eppreg,
  1051. struct parport *p, const void *buf,
  1052. size_t len, int flags)
  1053. {
  1054. struct parport_ip32_private * const priv = p->physport->private_data;
  1055. size_t written;
  1056. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1057. parport_ip32_data_forward(p);
  1058. parport_ip32_write_control(p, DCR_nINIT);
  1059. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1060. writesb(eppreg, buf, len);
  1061. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1062. parport_ip32_clear_epp_timeout(p);
  1063. return -EIO;
  1064. }
  1065. written = len;
  1066. } else {
  1067. const u8 *bufp = buf;
  1068. for (written = 0; written < len; written++) {
  1069. writeb(*bufp++, eppreg);
  1070. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1071. parport_ip32_clear_epp_timeout(p);
  1072. break;
  1073. }
  1074. }
  1075. }
  1076. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1077. return written;
  1078. }
  1079. /**
  1080. * parport_ip32_epp_read_data - read a block of data in EPP mode
  1081. * @p: pointer to &struct parport
  1082. * @buf: buffer to store read data
  1083. * @len: length of buffer @buf
  1084. * @flags: may be PARPORT_EPP_FAST
  1085. */
  1086. static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
  1087. size_t len, int flags)
  1088. {
  1089. struct parport_ip32_private * const priv = p->physport->private_data;
  1090. return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
  1091. }
  1092. /**
  1093. * parport_ip32_epp_write_data - write a block of data in EPP mode
  1094. * @p: pointer to &struct parport
  1095. * @buf: buffer of data to write
  1096. * @len: length of buffer @buf
  1097. * @flags: may be PARPORT_EPP_FAST
  1098. */
  1099. static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
  1100. size_t len, int flags)
  1101. {
  1102. struct parport_ip32_private * const priv = p->physport->private_data;
  1103. return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
  1104. }
  1105. /**
  1106. * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
  1107. * @p: pointer to &struct parport
  1108. * @buf: buffer to store read data
  1109. * @len: length of buffer @buf
  1110. * @flags: may be PARPORT_EPP_FAST
  1111. */
  1112. static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
  1113. size_t len, int flags)
  1114. {
  1115. struct parport_ip32_private * const priv = p->physport->private_data;
  1116. return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
  1117. }
  1118. /**
  1119. * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
  1120. * @p: pointer to &struct parport
  1121. * @buf: buffer of data to write
  1122. * @len: length of buffer @buf
  1123. * @flags: may be PARPORT_EPP_FAST
  1124. */
  1125. static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
  1126. size_t len, int flags)
  1127. {
  1128. struct parport_ip32_private * const priv = p->physport->private_data;
  1129. return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
  1130. }
  1131. /*--- ECP mode functions (FIFO) ----------------------------------------*/
  1132. /**
  1133. * parport_ip32_fifo_wait_break - check if the waiting function should return
  1134. * @p: pointer to &struct parport
  1135. * @expire: timeout expiring date, in jiffies
  1136. *
  1137. * parport_ip32_fifo_wait_break() checks if the waiting function should return
  1138. * immediately or not. The break conditions are:
  1139. * - expired timeout;
  1140. * - a pending signal;
  1141. * - nFault asserted low.
  1142. * This function also calls cond_resched().
  1143. */
  1144. static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
  1145. unsigned long expire)
  1146. {
  1147. cond_resched();
  1148. if (time_after(jiffies, expire)) {
  1149. pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
  1150. return 1;
  1151. }
  1152. if (signal_pending(current)) {
  1153. pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
  1154. return 1;
  1155. }
  1156. if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
  1157. pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
  1158. return 1;
  1159. }
  1160. return 0;
  1161. }
  1162. /**
  1163. * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
  1164. * @p: pointer to &struct parport
  1165. *
  1166. * Returns the number of bytes that can safely be written in the FIFO. A
  1167. * return value of zero means that the calling function should terminate as
  1168. * fast as possible.
  1169. */
  1170. static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
  1171. {
  1172. struct parport_ip32_private * const priv = p->physport->private_data;
  1173. struct parport * const physport = p->physport;
  1174. unsigned long expire;
  1175. unsigned int count;
  1176. unsigned int ecr;
  1177. expire = jiffies + physport->cad->timeout;
  1178. count = 0;
  1179. while (1) {
  1180. if (parport_ip32_fifo_wait_break(p, expire))
  1181. break;
  1182. /* Check FIFO state. We do nothing when the FIFO is nor full,
  1183. * nor empty. It appears that the FIFO full bit is not always
  1184. * reliable, the FIFO state is sometimes wrongly reported, and
  1185. * the chip gets confused if we give it another byte. */
  1186. ecr = parport_ip32_read_econtrol(p);
  1187. if (ecr & ECR_F_EMPTY) {
  1188. /* FIFO is empty, fill it up */
  1189. count = priv->fifo_depth;
  1190. break;
  1191. }
  1192. /* Wait a moment... */
  1193. udelay(FIFO_POLLING_INTERVAL);
  1194. } /* while (1) */
  1195. return count;
  1196. }
  1197. /**
  1198. * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
  1199. * @p: pointer to &struct parport
  1200. *
  1201. * Returns the number of bytes that can safely be written in the FIFO. A
  1202. * return value of zero means that the calling function should terminate as
  1203. * fast as possible.
  1204. */
  1205. static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
  1206. {
  1207. static unsigned int lost_interrupt = 0;
  1208. struct parport_ip32_private * const priv = p->physport->private_data;
  1209. struct parport * const physport = p->physport;
  1210. unsigned long nfault_timeout;
  1211. unsigned long expire;
  1212. unsigned int count;
  1213. unsigned int ecr;
  1214. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1215. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1216. expire = jiffies + physport->cad->timeout;
  1217. count = 0;
  1218. while (1) {
  1219. if (parport_ip32_fifo_wait_break(p, expire))
  1220. break;
  1221. /* Initialize mutex used to take interrupts into account */
  1222. INIT_COMPLETION(priv->irq_complete);
  1223. /* Enable serviceIntr */
  1224. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1225. /* Enabling serviceIntr while the FIFO is empty does not
  1226. * always generate an interrupt, so check for emptiness
  1227. * now. */
  1228. ecr = parport_ip32_read_econtrol(p);
  1229. if (!(ecr & ECR_F_EMPTY)) {
  1230. /* FIFO is not empty: wait for an interrupt or a
  1231. * timeout to occur */
  1232. wait_for_completion_interruptible_timeout(
  1233. &priv->irq_complete, nfault_timeout);
  1234. ecr = parport_ip32_read_econtrol(p);
  1235. if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
  1236. && !lost_interrupt) {
  1237. printk(KERN_WARNING PPIP32
  1238. "%s: lost interrupt in %s\n",
  1239. p->name, __func__);
  1240. lost_interrupt = 1;
  1241. }
  1242. }
  1243. /* Disable serviceIntr */
  1244. parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
  1245. /* Check FIFO state */
  1246. if (ecr & ECR_F_EMPTY) {
  1247. /* FIFO is empty, fill it up */
  1248. count = priv->fifo_depth;
  1249. break;
  1250. } else if (ecr & ECR_SERVINTR) {
  1251. /* FIFO is not empty, but we know that can safely push
  1252. * writeIntrThreshold bytes into it */
  1253. count = priv->writeIntrThreshold;
  1254. break;
  1255. }
  1256. /* FIFO is not empty, and we did not get any interrupt.
  1257. * Either it's time to check for nFault, or a signal is
  1258. * pending. This is verified in
  1259. * parport_ip32_fifo_wait_break(), so we continue the loop. */
  1260. } /* while (1) */
  1261. return count;
  1262. }
  1263. /**
  1264. * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
  1265. * @p: pointer to &struct parport
  1266. * @buf: buffer of data to write
  1267. * @len: length of buffer @buf
  1268. *
  1269. * Uses PIO to write the contents of the buffer @buf into the parallel port
  1270. * FIFO. Returns the number of bytes that were actually written. It can work
  1271. * with or without the help of interrupts. The parallel port must be
  1272. * correctly initialized before calling parport_ip32_fifo_write_block_pio().
  1273. */
  1274. static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
  1275. const void *buf, size_t len)
  1276. {
  1277. struct parport_ip32_private * const priv = p->physport->private_data;
  1278. const u8 *bufp = buf;
  1279. size_t left = len;
  1280. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1281. while (left > 0) {
  1282. unsigned int count;
  1283. count = (p->irq == PARPORT_IRQ_NONE) ?
  1284. parport_ip32_fwp_wait_polling(p) :
  1285. parport_ip32_fwp_wait_interrupt(p);
  1286. if (count == 0)
  1287. break; /* Transmission should be stopped */
  1288. if (count > left)
  1289. count = left;
  1290. if (count == 1) {
  1291. writeb(*bufp, priv->regs.fifo);
  1292. bufp++, left--;
  1293. } else {
  1294. writesb(priv->regs.fifo, bufp, count);
  1295. bufp += count, left -= count;
  1296. }
  1297. }
  1298. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1299. return len - left;
  1300. }
  1301. /**
  1302. * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
  1303. * @p: pointer to &struct parport
  1304. * @buf: buffer of data to write
  1305. * @len: length of buffer @buf
  1306. *
  1307. * Uses DMA to write the contents of the buffer @buf into the parallel port
  1308. * FIFO. Returns the number of bytes that were actually written. The
  1309. * parallel port must be correctly initialized before calling
  1310. * parport_ip32_fifo_write_block_dma().
  1311. */
  1312. static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
  1313. const void *buf, size_t len)
  1314. {
  1315. struct parport_ip32_private * const priv = p->physport->private_data;
  1316. struct parport * const physport = p->physport;
  1317. unsigned long nfault_timeout;
  1318. unsigned long expire;
  1319. size_t written;
  1320. unsigned int ecr;
  1321. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1322. parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
  1323. INIT_COMPLETION(priv->irq_complete);
  1324. parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
  1325. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1326. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1327. expire = jiffies + physport->cad->timeout;
  1328. while (1) {
  1329. if (parport_ip32_fifo_wait_break(p, expire))
  1330. break;
  1331. wait_for_completion_interruptible_timeout(&priv->irq_complete,
  1332. nfault_timeout);
  1333. ecr = parport_ip32_read_econtrol(p);
  1334. if (ecr & ECR_SERVINTR)
  1335. break; /* DMA transfer just finished */
  1336. }
  1337. parport_ip32_dma_stop();
  1338. written = len - parport_ip32_dma_get_residue();
  1339. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1340. return written;
  1341. }
  1342. /**
  1343. * parport_ip32_fifo_write_block - write a block of data
  1344. * @p: pointer to &struct parport
  1345. * @buf: buffer of data to write
  1346. * @len: length of buffer @buf
  1347. *
  1348. * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
  1349. * p FIFO. Returns the number of bytes that were actually written.
  1350. */
  1351. static size_t parport_ip32_fifo_write_block(struct parport *p,
  1352. const void *buf, size_t len)
  1353. {
  1354. size_t written = 0;
  1355. if (len)
  1356. /* FIXME - Maybe some threshold value should be set for @len
  1357. * under which we revert to PIO mode? */
  1358. written = (p->modes & PARPORT_MODE_DMA) ?
  1359. parport_ip32_fifo_write_block_dma(p, buf, len) :
  1360. parport_ip32_fifo_write_block_pio(p, buf, len);
  1361. return written;
  1362. }
  1363. /**
  1364. * parport_ip32_drain_fifo - wait for FIFO to empty
  1365. * @p: pointer to &struct parport
  1366. * @timeout: timeout, in jiffies
  1367. *
  1368. * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
  1369. * 0 if the timeout @timeout is reached before, or if a signal is pending.
  1370. */
  1371. static unsigned int parport_ip32_drain_fifo(struct parport *p,
  1372. unsigned long timeout)
  1373. {
  1374. unsigned long expire = jiffies + timeout;
  1375. unsigned int polling_interval;
  1376. unsigned int counter;
  1377. /* Busy wait for approx. 200us */
  1378. for (counter = 0; counter < 40; counter++) {
  1379. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1380. break;
  1381. if (time_after(jiffies, expire))
  1382. break;
  1383. if (signal_pending(current))
  1384. break;
  1385. udelay(5);
  1386. }
  1387. /* Poll slowly. Polling interval starts with 1 millisecond, and is
  1388. * increased exponentially until 128. */
  1389. polling_interval = 1; /* msecs */
  1390. while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
  1391. if (time_after_eq(jiffies, expire))
  1392. break;
  1393. msleep_interruptible(polling_interval);
  1394. if (signal_pending(current))
  1395. break;
  1396. if (polling_interval < 128)
  1397. polling_interval *= 2;
  1398. }
  1399. return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
  1400. }
  1401. /**
  1402. * parport_ip32_get_fifo_residue - reset FIFO
  1403. * @p: pointer to &struct parport
  1404. * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
  1405. *
  1406. * This function resets FIFO, and returns the number of bytes remaining in it.
  1407. */
  1408. static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
  1409. unsigned int mode)
  1410. {
  1411. struct parport_ip32_private * const priv = p->physport->private_data;
  1412. unsigned int residue;
  1413. unsigned int cnfga;
  1414. /* FIXME - We are missing one byte if the printer is off-line. I
  1415. * don't know how to detect this. It looks that the full bit is not
  1416. * always reliable. For the moment, the problem is avoided in most
  1417. * cases by testing for BUSY in parport_ip32_compat_write_data().
  1418. */
  1419. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1420. residue = 0;
  1421. else {
  1422. pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
  1423. /* Stop all transfers.
  1424. *
  1425. * Microsoft's document instructs to drive DCR_STROBE to 0,
  1426. * but it doesn't work (at least in Compatibility mode, not
  1427. * tested in ECP mode). Switching directly to Test mode (as
  1428. * in parport_pc) is not an option: it does confuse the port,
  1429. * ECP service interrupts are no more working after that. A
  1430. * hard reset is then needed to revert to a sane state.
  1431. *
  1432. * Let's hope that the FIFO is really stuck and that the
  1433. * peripheral doesn't wake up now.
  1434. */
  1435. parport_ip32_frob_control(p, DCR_STROBE, 0);
  1436. /* Fill up FIFO */
  1437. for (residue = priv->fifo_depth; residue > 0; residue--) {
  1438. if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
  1439. break;
  1440. writeb(0x00, priv->regs.fifo);
  1441. }
  1442. }
  1443. if (residue)
  1444. pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
  1445. p->name, residue,
  1446. (residue == 1) ? " was" : "s were");
  1447. /* Now reset the FIFO */
  1448. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1449. /* Host recovery for ECP mode */
  1450. if (mode == ECR_MODE_ECP) {
  1451. parport_ip32_data_reverse(p);
  1452. parport_ip32_frob_control(p, DCR_nINIT, 0);
  1453. if (parport_wait_peripheral(p, DSR_PERROR, 0))
  1454. pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
  1455. p->name, __func__);
  1456. parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
  1457. parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
  1458. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
  1459. pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
  1460. p->name, __func__);
  1461. }
  1462. /* Adjust residue if needed */
  1463. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1464. cnfga = readb(priv->regs.cnfgA);
  1465. if (!(cnfga & CNFGA_nBYTEINTRANS)) {
  1466. pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
  1467. p->name, cnfga);
  1468. pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
  1469. p->name);
  1470. residue++;
  1471. }
  1472. /* Don't care about partial PWords since we do not support
  1473. * PWord != 1 byte. */
  1474. /* Back to forward PS2 mode. */
  1475. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1476. parport_ip32_data_forward(p);
  1477. return residue;
  1478. }
  1479. /**
  1480. * parport_ip32_compat_write_data - write a block of data in SPP mode
  1481. * @p: pointer to &struct parport
  1482. * @buf: buffer of data to write
  1483. * @len: length of buffer @buf
  1484. * @flags: ignored
  1485. */
  1486. static size_t parport_ip32_compat_write_data(struct parport *p,
  1487. const void *buf, size_t len,
  1488. int flags)
  1489. {
  1490. static unsigned int ready_before = 1;
  1491. struct parport_ip32_private * const priv = p->physport->private_data;
  1492. struct parport * const physport = p->physport;
  1493. size_t written = 0;
  1494. /* Special case: a timeout of zero means we cannot call schedule().
  1495. * Also if O_NONBLOCK is set then use the default implementation. */
  1496. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1497. return parport_ieee1284_write_compat(p, buf, len, flags);
  1498. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1499. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1500. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1501. parport_ip32_data_forward(p);
  1502. parport_ip32_disable_irq(p);
  1503. parport_ip32_set_mode(p, ECR_MODE_PPF);
  1504. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1505. /* Wait for peripheral to become ready */
  1506. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1507. DSR_nBUSY | DSR_nFAULT)) {
  1508. /* Avoid to flood the logs */
  1509. if (ready_before)
  1510. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1511. p->name, __func__);
  1512. ready_before = 0;
  1513. goto stop;
  1514. }
  1515. ready_before = 1;
  1516. written = parport_ip32_fifo_write_block(p, buf, len);
  1517. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1518. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1519. /* Check for a potential residue */
  1520. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
  1521. /* Then, wait for BUSY to get low. */
  1522. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1523. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1524. p->name, __func__);
  1525. stop:
  1526. /* Reset FIFO */
  1527. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1528. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1529. return written;
  1530. }
  1531. /*
  1532. * FIXME - Insert here parport_ip32_ecp_read_data().
  1533. */
  1534. /**
  1535. * parport_ip32_ecp_write_data - write a block of data in ECP mode
  1536. * @p: pointer to &struct parport
  1537. * @buf: buffer of data to write
  1538. * @len: length of buffer @buf
  1539. * @flags: ignored
  1540. */
  1541. static size_t parport_ip32_ecp_write_data(struct parport *p,
  1542. const void *buf, size_t len,
  1543. int flags)
  1544. {
  1545. static unsigned int ready_before = 1;
  1546. struct parport_ip32_private * const priv = p->physport->private_data;
  1547. struct parport * const physport = p->physport;
  1548. size_t written = 0;
  1549. /* Special case: a timeout of zero means we cannot call schedule().
  1550. * Also if O_NONBLOCK is set then use the default implementation. */
  1551. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1552. return parport_ieee1284_ecp_write_data(p, buf, len, flags);
  1553. /* Negotiate to forward mode if necessary. */
  1554. if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
  1555. /* Event 47: Set nInit high. */
  1556. parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
  1557. DCR_nINIT | DCR_AUTOFD);
  1558. /* Event 49: PError goes high. */
  1559. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
  1560. printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
  1561. p->name, __func__);
  1562. physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
  1563. return 0;
  1564. }
  1565. }
  1566. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1567. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1568. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1569. parport_ip32_data_forward(p);
  1570. parport_ip32_disable_irq(p);
  1571. parport_ip32_set_mode(p, ECR_MODE_ECP);
  1572. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1573. /* Wait for peripheral to become ready */
  1574. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1575. DSR_nBUSY | DSR_nFAULT)) {
  1576. /* Avoid to flood the logs */
  1577. if (ready_before)
  1578. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1579. p->name, __func__);
  1580. ready_before = 0;
  1581. goto stop;
  1582. }
  1583. ready_before = 1;
  1584. written = parport_ip32_fifo_write_block(p, buf, len);
  1585. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1586. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1587. /* Check for a potential residue */
  1588. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
  1589. /* Then, wait for BUSY to get low. */
  1590. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1591. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1592. p->name, __func__);
  1593. stop:
  1594. /* Reset FIFO */
  1595. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1596. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1597. return written;
  1598. }
  1599. /*
  1600. * FIXME - Insert here parport_ip32_ecp_write_addr().
  1601. */
  1602. /*--- Default parport operations ---------------------------------------*/
  1603. static __initdata struct parport_operations parport_ip32_ops = {
  1604. .write_data = parport_ip32_write_data,
  1605. .read_data = parport_ip32_read_data,
  1606. .write_control = parport_ip32_write_control,
  1607. .read_control = parport_ip32_read_control,
  1608. .frob_control = parport_ip32_frob_control,
  1609. .read_status = parport_ip32_read_status,
  1610. .enable_irq = parport_ip32_enable_irq,
  1611. .disable_irq = parport_ip32_disable_irq,
  1612. .data_forward = parport_ip32_data_forward,
  1613. .data_reverse = parport_ip32_data_reverse,
  1614. .init_state = parport_ip32_init_state,
  1615. .save_state = parport_ip32_save_state,
  1616. .restore_state = parport_ip32_restore_state,
  1617. .epp_write_data = parport_ieee1284_epp_write_data,
  1618. .epp_read_data = parport_ieee1284_epp_read_data,
  1619. .epp_write_addr = parport_ieee1284_epp_write_addr,
  1620. .epp_read_addr = parport_ieee1284_epp_read_addr,
  1621. .ecp_write_data = parport_ieee1284_ecp_write_data,
  1622. .ecp_read_data = parport_ieee1284_ecp_read_data,
  1623. .ecp_write_addr = parport_ieee1284_ecp_write_addr,
  1624. .compat_write_data = parport_ieee1284_write_compat,
  1625. .nibble_read_data = parport_ieee1284_read_nibble,
  1626. .byte_read_data = parport_ieee1284_read_byte,
  1627. .owner = THIS_MODULE,
  1628. };
  1629. /*--- Device detection -------------------------------------------------*/
  1630. /**
  1631. * parport_ip32_ecp_supported - check for an ECP port
  1632. * @p: pointer to the &parport structure
  1633. *
  1634. * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
  1635. * checks if an Extended Control Register seems to be present. On successful
  1636. * return, the port is placed in SPP mode.
  1637. */
  1638. static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
  1639. {
  1640. struct parport_ip32_private * const priv = p->physport->private_data;
  1641. unsigned int ecr;
  1642. ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  1643. writeb(ecr, priv->regs.ecr);
  1644. if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
  1645. goto fail;
  1646. pr_probe(p, "Found working ECR register\n");
  1647. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1648. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1649. return 1;
  1650. fail:
  1651. pr_probe(p, "ECR register not found\n");
  1652. return 0;
  1653. }
  1654. /**
  1655. * parport_ip32_fifo_supported - check for FIFO parameters
  1656. * @p: pointer to the &parport structure
  1657. *
  1658. * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
  1659. * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
  1660. * On return, the port is placed in SPP mode.
  1661. */
  1662. static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
  1663. {
  1664. struct parport_ip32_private * const priv = p->physport->private_data;
  1665. unsigned int configa, configb;
  1666. unsigned int pword;
  1667. unsigned int i;
  1668. /* Configuration mode */
  1669. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1670. configa = readb(priv->regs.cnfgA);
  1671. configb = readb(priv->regs.cnfgB);
  1672. /* Find out PWord size */
  1673. switch (configa & CNFGA_ID_MASK) {
  1674. case CNFGA_ID_8:
  1675. pword = 1;
  1676. break;
  1677. case CNFGA_ID_16:
  1678. pword = 2;
  1679. break;
  1680. case CNFGA_ID_32:
  1681. pword = 4;
  1682. break;
  1683. default:
  1684. pr_probe(p, "Unknown implementation ID: 0x%0x\n",
  1685. (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
  1686. goto fail;
  1687. break;
  1688. }
  1689. if (pword != 1) {
  1690. pr_probe(p, "Unsupported PWord size: %u\n", pword);
  1691. goto fail;
  1692. }
  1693. priv->pword = pword;
  1694. pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
  1695. /* Check for compression support */
  1696. writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
  1697. if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
  1698. pr_probe(p, "Hardware compression detected (unsupported)\n");
  1699. writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
  1700. /* Reset FIFO and go in test mode (no interrupt, no DMA) */
  1701. parport_ip32_set_mode(p, ECR_MODE_TST);
  1702. /* FIFO must be empty now */
  1703. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1704. pr_probe(p, "FIFO not reset\n");
  1705. goto fail;
  1706. }
  1707. /* Find out FIFO depth. */
  1708. priv->fifo_depth = 0;
  1709. for (i = 0; i < 1024; i++) {
  1710. if (readb(priv->regs.ecr) & ECR_F_FULL) {
  1711. /* FIFO full */
  1712. priv->fifo_depth = i;
  1713. break;
  1714. }
  1715. writeb((u8)i, priv->regs.fifo);
  1716. }
  1717. if (i >= 1024) {
  1718. pr_probe(p, "Can't fill FIFO\n");
  1719. goto fail;
  1720. }
  1721. if (!priv->fifo_depth) {
  1722. pr_probe(p, "Can't get FIFO depth\n");
  1723. goto fail;
  1724. }
  1725. pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
  1726. /* Enable interrupts */
  1727. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1728. /* Find out writeIntrThreshold: number of PWords we know we can write
  1729. * if we get an interrupt. */
  1730. priv->writeIntrThreshold = 0;
  1731. for (i = 0; i < priv->fifo_depth; i++) {
  1732. if (readb(priv->regs.fifo) != (u8)i) {
  1733. pr_probe(p, "Invalid data in FIFO\n");
  1734. goto fail;
  1735. }
  1736. if (!priv->writeIntrThreshold
  1737. && readb(priv->regs.ecr) & ECR_SERVINTR)
  1738. /* writeIntrThreshold reached */
  1739. priv->writeIntrThreshold = i + 1;
  1740. if (i + 1 < priv->fifo_depth
  1741. && readb(priv->regs.ecr) & ECR_F_EMPTY) {
  1742. /* FIFO empty before the last byte? */
  1743. pr_probe(p, "Data lost in FIFO\n");
  1744. goto fail;
  1745. }
  1746. }
  1747. if (!priv->writeIntrThreshold) {
  1748. pr_probe(p, "Can't get writeIntrThreshold\n");
  1749. goto fail;
  1750. }
  1751. pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
  1752. /* FIFO must be empty now */
  1753. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1754. pr_probe(p, "Can't empty FIFO\n");
  1755. goto fail;
  1756. }
  1757. /* Reset FIFO */
  1758. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1759. /* Set reverse direction (must be in PS2 mode) */
  1760. parport_ip32_data_reverse(p);
  1761. /* Test FIFO, no interrupt, no DMA */
  1762. parport_ip32_set_mode(p, ECR_MODE_TST);
  1763. /* Enable interrupts */
  1764. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1765. /* Find out readIntrThreshold: number of PWords we can read if we get
  1766. * an interrupt. */
  1767. priv->readIntrThreshold = 0;
  1768. for (i = 0; i < priv->fifo_depth; i++) {
  1769. writeb(0xaa, priv->regs.fifo);
  1770. if (readb(priv->regs.ecr) & ECR_SERVINTR) {
  1771. /* readIntrThreshold reached */
  1772. priv->readIntrThreshold = i + 1;
  1773. break;
  1774. }
  1775. }
  1776. if (!priv->readIntrThreshold) {
  1777. pr_probe(p, "Can't get readIntrThreshold\n");
  1778. goto fail;
  1779. }
  1780. pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
  1781. /* Reset ECR */
  1782. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1783. parport_ip32_data_forward(p);
  1784. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1785. return 1;
  1786. fail:
  1787. priv->fifo_depth = 0;
  1788. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1789. return 0;
  1790. }
  1791. /*--- Initialization code ----------------------------------------------*/
  1792. /**
  1793. * parport_ip32_make_isa_registers - compute (ISA) register addresses
  1794. * @regs: pointer to &struct parport_ip32_regs to fill
  1795. * @base: base address of standard and EPP registers
  1796. * @base_hi: base address of ECP registers
  1797. * @regshift: how much to shift register offset by
  1798. *
  1799. * Compute register addresses, according to the ISA standard. The addresses
  1800. * of the standard and EPP registers are computed from address @base. The
  1801. * addresses of the ECP registers are computed from address @base_hi.
  1802. */
  1803. static void __init
  1804. parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
  1805. void __iomem *base, void __iomem *base_hi,
  1806. unsigned int regshift)
  1807. {
  1808. #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
  1809. #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
  1810. *regs = (struct parport_ip32_regs){
  1811. .data = r_base(0),
  1812. .dsr = r_base(1),
  1813. .dcr = r_base(2),
  1814. .eppAddr = r_base(3),
  1815. .eppData0 = r_base(4),
  1816. .eppData1 = r_base(5),
  1817. .eppData2 = r_base(6),
  1818. .eppData3 = r_base(7),
  1819. .ecpAFifo = r_base(0),
  1820. .fifo = r_base_hi(0),
  1821. .cnfgA = r_base_hi(0),
  1822. .cnfgB = r_base_hi(1),
  1823. .ecr = r_base_hi(2)
  1824. };
  1825. #undef r_base_hi
  1826. #undef r_base
  1827. }
  1828. /**
  1829. * parport_ip32_probe_port - probe and register IP32 built-in parallel port
  1830. *
  1831. * Returns the new allocated &parport structure. On error, an error code is
  1832. * encoded in return value with the ERR_PTR function.
  1833. */
  1834. static __init struct parport *parport_ip32_probe_port(void)
  1835. {
  1836. struct parport_ip32_regs regs;
  1837. struct parport_ip32_private *priv = NULL;
  1838. struct parport_operations *ops = NULL;
  1839. struct parport *p = NULL;
  1840. int err;
  1841. parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
  1842. &mace->isa.ecp1284, 8 /* regshift */);
  1843. ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
  1844. priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
  1845. p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
  1846. if (ops == NULL || priv == NULL || p == NULL) {
  1847. err = -ENOMEM;
  1848. goto fail;
  1849. }
  1850. p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
  1851. p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
  1852. p->private_data = priv;
  1853. *ops = parport_ip32_ops;
  1854. *priv = (struct parport_ip32_private){
  1855. .regs = regs,
  1856. .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
  1857. DCR_AUTOFD | DCR_STROBE,
  1858. .irq_mode = PARPORT_IP32_IRQ_FWD,
  1859. };
  1860. init_completion(&priv->irq_complete);
  1861. /* Probe port. */
  1862. if (!parport_ip32_ecp_supported(p)) {
  1863. err = -ENODEV;
  1864. goto fail;
  1865. }
  1866. parport_ip32_dump_state(p, "begin init", 0);
  1867. /* We found what looks like a working ECR register. Simply assume
  1868. * that all modes are correctly supported. Enable basic modes. */
  1869. p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
  1870. p->modes |= PARPORT_MODE_TRISTATE;
  1871. if (!parport_ip32_fifo_supported(p)) {
  1872. printk(KERN_WARNING PPIP32
  1873. "%s: error: FIFO disabled\n", p->name);
  1874. /* Disable hardware modes depending on a working FIFO. */
  1875. features &= ~PARPORT_IP32_ENABLE_SPP;
  1876. features &= ~PARPORT_IP32_ENABLE_ECP;
  1877. /* DMA is not needed if FIFO is not supported. */
  1878. features &= ~PARPORT_IP32_ENABLE_DMA;
  1879. }
  1880. /* Request IRQ */
  1881. if (features & PARPORT_IP32_ENABLE_IRQ) {
  1882. int irq = MACEISA_PARALLEL_IRQ;
  1883. if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
  1884. printk(KERN_WARNING PPIP32
  1885. "%s: error: IRQ disabled\n", p->name);
  1886. /* DMA cannot work without interrupts. */
  1887. features &= ~PARPORT_IP32_ENABLE_DMA;
  1888. } else {
  1889. pr_probe(p, "Interrupt support enabled\n");
  1890. p->irq = irq;
  1891. priv->dcr_writable |= DCR_IRQ;
  1892. }
  1893. }
  1894. /* Allocate DMA resources */
  1895. if (features & PARPORT_IP32_ENABLE_DMA) {
  1896. if (parport_ip32_dma_register())
  1897. printk(KERN_WARNING PPIP32
  1898. "%s: error: DMA disabled\n", p->name);
  1899. else {
  1900. pr_probe(p, "DMA support enabled\n");
  1901. p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
  1902. p->modes |= PARPORT_MODE_DMA;
  1903. }
  1904. }
  1905. if (features & PARPORT_IP32_ENABLE_SPP) {
  1906. /* Enable compatibility FIFO mode */
  1907. p->ops->compat_write_data = parport_ip32_compat_write_data;
  1908. p->modes |= PARPORT_MODE_COMPAT;
  1909. pr_probe(p, "Hardware support for SPP mode enabled\n");
  1910. }
  1911. if (features & PARPORT_IP32_ENABLE_EPP) {
  1912. /* Set up access functions to use EPP hardware. */
  1913. p->ops->epp_read_data = parport_ip32_epp_read_data;
  1914. p->ops->epp_write_data = parport_ip32_epp_write_data;
  1915. p->ops->epp_read_addr = parport_ip32_epp_read_addr;
  1916. p->ops->epp_write_addr = parport_ip32_epp_write_addr;
  1917. p->modes |= PARPORT_MODE_EPP;
  1918. pr_probe(p, "Hardware support for EPP mode enabled\n");
  1919. }
  1920. if (features & PARPORT_IP32_ENABLE_ECP) {
  1921. /* Enable ECP FIFO mode */
  1922. p->ops->ecp_write_data = parport_ip32_ecp_write_data;
  1923. /* FIXME - not implemented */
  1924. /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
  1925. /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
  1926. p->modes |= PARPORT_MODE_ECP;
  1927. pr_probe(p, "Hardware support for ECP mode enabled\n");
  1928. }
  1929. /* Initialize the port with sensible values */
  1930. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1931. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1932. parport_ip32_data_forward(p);
  1933. parport_ip32_disable_irq(p);
  1934. parport_ip32_write_data(p, 0x00);
  1935. parport_ip32_dump_state(p, "end init", 0);
  1936. /* Print out what we found */
  1937. printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
  1938. p->name, p->base, p->base_hi);
  1939. if (p->irq != PARPORT_IRQ_NONE)
  1940. printk(", irq %d", p->irq);
  1941. printk(" [");
  1942. #define printmode(x) if (p->modes & PARPORT_MODE_##x) \
  1943. printk("%s%s", f++ ? "," : "", #x)
  1944. {
  1945. unsigned int f = 0;
  1946. printmode(PCSPP);
  1947. printmode(TRISTATE);
  1948. printmode(COMPAT);
  1949. printmode(EPP);
  1950. printmode(ECP);
  1951. printmode(DMA);
  1952. }
  1953. #undef printmode
  1954. printk("]\n");
  1955. parport_announce_port(p);
  1956. return p;
  1957. fail:
  1958. if (p)
  1959. parport_put_port(p);
  1960. kfree(priv);
  1961. kfree(ops);
  1962. return ERR_PTR(err);
  1963. }
  1964. /**
  1965. * parport_ip32_unregister_port - unregister a parallel port
  1966. * @p: pointer to the &struct parport
  1967. *
  1968. * Unregisters a parallel port and free previously allocated resources
  1969. * (memory, IRQ, ...).
  1970. */
  1971. static __exit void parport_ip32_unregister_port(struct parport *p)
  1972. {
  1973. struct parport_ip32_private * const priv = p->physport->private_data;
  1974. struct parport_operations *ops = p->ops;
  1975. parport_remove_port(p);
  1976. if (p->modes & PARPORT_MODE_DMA)
  1977. parport_ip32_dma_unregister();
  1978. if (p->irq != PARPORT_IRQ_NONE)
  1979. free_irq(p->irq, p);
  1980. parport_put_port(p);
  1981. kfree(priv);
  1982. kfree(ops);
  1983. }
  1984. /**
  1985. * parport_ip32_init - module initialization function
  1986. */
  1987. static int __init parport_ip32_init(void)
  1988. {
  1989. pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
  1990. this_port = parport_ip32_probe_port();
  1991. return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
  1992. }
  1993. /**
  1994. * parport_ip32_exit - module termination function
  1995. */
  1996. static void __exit parport_ip32_exit(void)
  1997. {
  1998. parport_ip32_unregister_port(this_port);
  1999. }
  2000. /*--- Module stuff -----------------------------------------------------*/
  2001. MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
  2002. MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
  2003. MODULE_LICENSE("GPL");
  2004. MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
  2005. module_init(parport_ip32_init);
  2006. module_exit(parport_ip32_exit);
  2007. module_param(verbose_probing, bool, S_IRUGO);
  2008. MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
  2009. module_param(features, uint, S_IRUGO);
  2010. MODULE_PARM_DESC(features,
  2011. "Bit mask of features to enable"
  2012. ", bit 0: IRQ support"
  2013. ", bit 1: DMA support"
  2014. ", bit 2: hardware SPP mode"
  2015. ", bit 3: hardware EPP mode"
  2016. ", bit 4: hardware ECP mode");
  2017. /*--- Inform (X)Emacs about preferred coding style ---------------------*/
  2018. /*
  2019. * Local Variables:
  2020. * mode: c
  2021. * c-file-style: "linux"
  2022. * indent-tabs-mode: t
  2023. * tab-width: 8
  2024. * fill-column: 78
  2025. * ispell-local-dictionary: "american"
  2026. * End:
  2027. */