seq_clientmgr.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596
  1. /*
  2. * ALSA sequencer Client Manager
  3. * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * Takashi Iwai <tiwai@suse.de>
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/init.h>
  24. #include <linux/export.h>
  25. #include <linux/slab.h>
  26. #include <sound/core.h>
  27. #include <sound/minors.h>
  28. #include <linux/kmod.h>
  29. #include <sound/seq_kernel.h>
  30. #include "seq_clientmgr.h"
  31. #include "seq_memory.h"
  32. #include "seq_queue.h"
  33. #include "seq_timer.h"
  34. #include "seq_info.h"
  35. #include "seq_system.h"
  36. #include <sound/seq_device.h>
  37. #ifdef CONFIG_COMPAT
  38. #include <linux/compat.h>
  39. #endif
  40. /* Client Manager
  41. * this module handles the connections of userland and kernel clients
  42. *
  43. */
  44. /*
  45. * There are four ranges of client numbers (last two shared):
  46. * 0..15: global clients
  47. * 16..127: statically allocated client numbers for cards 0..27
  48. * 128..191: dynamically allocated client numbers for cards 28..31
  49. * 128..191: dynamically allocated client numbers for applications
  50. */
  51. /* number of kernel non-card clients */
  52. #define SNDRV_SEQ_GLOBAL_CLIENTS 16
  53. /* clients per cards, for static clients */
  54. #define SNDRV_SEQ_CLIENTS_PER_CARD 4
  55. /* dynamically allocated client numbers (both kernel drivers and user space) */
  56. #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
  57. #define SNDRV_SEQ_LFLG_INPUT 0x0001
  58. #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
  59. #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
  60. static DEFINE_SPINLOCK(clients_lock);
  61. static DEFINE_MUTEX(register_mutex);
  62. /*
  63. * client table
  64. */
  65. static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
  66. static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
  67. static struct snd_seq_usage client_usage;
  68. /*
  69. * prototypes
  70. */
  71. static int bounce_error_event(struct snd_seq_client *client,
  72. struct snd_seq_event *event,
  73. int err, int atomic, int hop);
  74. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  75. struct snd_seq_event *event,
  76. int filter, int atomic, int hop);
  77. /*
  78. */
  79. static inline mm_segment_t snd_enter_user(void)
  80. {
  81. mm_segment_t fs = get_fs();
  82. set_fs(get_ds());
  83. return fs;
  84. }
  85. static inline void snd_leave_user(mm_segment_t fs)
  86. {
  87. set_fs(fs);
  88. }
  89. /*
  90. */
  91. static inline unsigned short snd_seq_file_flags(struct file *file)
  92. {
  93. switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
  94. case FMODE_WRITE:
  95. return SNDRV_SEQ_LFLG_OUTPUT;
  96. case FMODE_READ:
  97. return SNDRV_SEQ_LFLG_INPUT;
  98. default:
  99. return SNDRV_SEQ_LFLG_OPEN;
  100. }
  101. }
  102. static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
  103. {
  104. return snd_seq_total_cells(client->pool) > 0;
  105. }
  106. /* return pointer to client structure for specified id */
  107. static struct snd_seq_client *clientptr(int clientid)
  108. {
  109. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  110. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  111. clientid);
  112. return NULL;
  113. }
  114. return clienttab[clientid];
  115. }
  116. struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
  117. {
  118. unsigned long flags;
  119. struct snd_seq_client *client;
  120. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  121. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  122. clientid);
  123. return NULL;
  124. }
  125. spin_lock_irqsave(&clients_lock, flags);
  126. client = clientptr(clientid);
  127. if (client)
  128. goto __lock;
  129. if (clienttablock[clientid]) {
  130. spin_unlock_irqrestore(&clients_lock, flags);
  131. return NULL;
  132. }
  133. spin_unlock_irqrestore(&clients_lock, flags);
  134. #ifdef CONFIG_MODULES
  135. if (!in_interrupt()) {
  136. static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
  137. static char card_requested[SNDRV_CARDS];
  138. if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
  139. int idx;
  140. if (!client_requested[clientid]) {
  141. client_requested[clientid] = 1;
  142. for (idx = 0; idx < 15; idx++) {
  143. if (seq_client_load[idx] < 0)
  144. break;
  145. if (seq_client_load[idx] == clientid) {
  146. request_module("snd-seq-client-%i",
  147. clientid);
  148. break;
  149. }
  150. }
  151. }
  152. } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
  153. int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
  154. SNDRV_SEQ_CLIENTS_PER_CARD;
  155. if (card < snd_ecards_limit) {
  156. if (! card_requested[card]) {
  157. card_requested[card] = 1;
  158. snd_request_card(card);
  159. }
  160. snd_seq_device_load_drivers();
  161. }
  162. }
  163. spin_lock_irqsave(&clients_lock, flags);
  164. client = clientptr(clientid);
  165. if (client)
  166. goto __lock;
  167. spin_unlock_irqrestore(&clients_lock, flags);
  168. }
  169. #endif
  170. return NULL;
  171. __lock:
  172. snd_use_lock_use(&client->use_lock);
  173. spin_unlock_irqrestore(&clients_lock, flags);
  174. return client;
  175. }
  176. static void usage_alloc(struct snd_seq_usage *res, int num)
  177. {
  178. res->cur += num;
  179. if (res->cur > res->peak)
  180. res->peak = res->cur;
  181. }
  182. static void usage_free(struct snd_seq_usage *res, int num)
  183. {
  184. res->cur -= num;
  185. }
  186. /* initialise data structures */
  187. int __init client_init_data(void)
  188. {
  189. /* zap out the client table */
  190. memset(&clienttablock, 0, sizeof(clienttablock));
  191. memset(&clienttab, 0, sizeof(clienttab));
  192. return 0;
  193. }
  194. static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
  195. {
  196. unsigned long flags;
  197. int c;
  198. struct snd_seq_client *client;
  199. /* init client data */
  200. client = kzalloc(sizeof(*client), GFP_KERNEL);
  201. if (client == NULL)
  202. return NULL;
  203. client->pool = snd_seq_pool_new(poolsize);
  204. if (client->pool == NULL) {
  205. kfree(client);
  206. return NULL;
  207. }
  208. client->type = NO_CLIENT;
  209. snd_use_lock_init(&client->use_lock);
  210. rwlock_init(&client->ports_lock);
  211. mutex_init(&client->ports_mutex);
  212. INIT_LIST_HEAD(&client->ports_list_head);
  213. /* find free slot in the client table */
  214. spin_lock_irqsave(&clients_lock, flags);
  215. if (client_index < 0) {
  216. for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
  217. c < SNDRV_SEQ_MAX_CLIENTS;
  218. c++) {
  219. if (clienttab[c] || clienttablock[c])
  220. continue;
  221. clienttab[client->number = c] = client;
  222. spin_unlock_irqrestore(&clients_lock, flags);
  223. return client;
  224. }
  225. } else {
  226. if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
  227. clienttab[client->number = client_index] = client;
  228. spin_unlock_irqrestore(&clients_lock, flags);
  229. return client;
  230. }
  231. }
  232. spin_unlock_irqrestore(&clients_lock, flags);
  233. snd_seq_pool_delete(&client->pool);
  234. kfree(client);
  235. return NULL; /* no free slot found or busy, return failure code */
  236. }
  237. static int seq_free_client1(struct snd_seq_client *client)
  238. {
  239. unsigned long flags;
  240. if (!client)
  241. return 0;
  242. snd_seq_delete_all_ports(client);
  243. snd_seq_queue_client_leave(client->number);
  244. spin_lock_irqsave(&clients_lock, flags);
  245. clienttablock[client->number] = 1;
  246. clienttab[client->number] = NULL;
  247. spin_unlock_irqrestore(&clients_lock, flags);
  248. snd_use_lock_sync(&client->use_lock);
  249. snd_seq_queue_client_termination(client->number);
  250. if (client->pool)
  251. snd_seq_pool_delete(&client->pool);
  252. spin_lock_irqsave(&clients_lock, flags);
  253. clienttablock[client->number] = 0;
  254. spin_unlock_irqrestore(&clients_lock, flags);
  255. return 0;
  256. }
  257. static void seq_free_client(struct snd_seq_client * client)
  258. {
  259. mutex_lock(&register_mutex);
  260. switch (client->type) {
  261. case NO_CLIENT:
  262. snd_printk(KERN_WARNING "Seq: Trying to free unused client %d\n",
  263. client->number);
  264. break;
  265. case USER_CLIENT:
  266. case KERNEL_CLIENT:
  267. seq_free_client1(client);
  268. usage_free(&client_usage, 1);
  269. break;
  270. default:
  271. snd_printk(KERN_ERR "Seq: Trying to free client %d with undefined type = %d\n",
  272. client->number, client->type);
  273. }
  274. mutex_unlock(&register_mutex);
  275. snd_seq_system_client_ev_client_exit(client->number);
  276. }
  277. /* -------------------------------------------------------- */
  278. /* create a user client */
  279. static int snd_seq_open(struct inode *inode, struct file *file)
  280. {
  281. int c, mode; /* client id */
  282. struct snd_seq_client *client;
  283. struct snd_seq_user_client *user;
  284. int err;
  285. err = nonseekable_open(inode, file);
  286. if (err < 0)
  287. return err;
  288. if (mutex_lock_interruptible(&register_mutex))
  289. return -ERESTARTSYS;
  290. client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
  291. if (client == NULL) {
  292. mutex_unlock(&register_mutex);
  293. return -ENOMEM; /* failure code */
  294. }
  295. mode = snd_seq_file_flags(file);
  296. if (mode & SNDRV_SEQ_LFLG_INPUT)
  297. client->accept_input = 1;
  298. if (mode & SNDRV_SEQ_LFLG_OUTPUT)
  299. client->accept_output = 1;
  300. user = &client->data.user;
  301. user->fifo = NULL;
  302. user->fifo_pool_size = 0;
  303. if (mode & SNDRV_SEQ_LFLG_INPUT) {
  304. user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
  305. user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
  306. if (user->fifo == NULL) {
  307. seq_free_client1(client);
  308. kfree(client);
  309. mutex_unlock(&register_mutex);
  310. return -ENOMEM;
  311. }
  312. }
  313. usage_alloc(&client_usage, 1);
  314. client->type = USER_CLIENT;
  315. mutex_unlock(&register_mutex);
  316. c = client->number;
  317. file->private_data = client;
  318. /* fill client data */
  319. user->file = file;
  320. sprintf(client->name, "Client-%d", c);
  321. /* make others aware this new client */
  322. snd_seq_system_client_ev_client_start(c);
  323. return 0;
  324. }
  325. /* delete a user client */
  326. static int snd_seq_release(struct inode *inode, struct file *file)
  327. {
  328. struct snd_seq_client *client = file->private_data;
  329. if (client) {
  330. seq_free_client(client);
  331. if (client->data.user.fifo)
  332. snd_seq_fifo_delete(&client->data.user.fifo);
  333. kfree(client);
  334. }
  335. return 0;
  336. }
  337. /* handle client read() */
  338. /* possible error values:
  339. * -ENXIO invalid client or file open mode
  340. * -ENOSPC FIFO overflow (the flag is cleared after this error report)
  341. * -EINVAL no enough user-space buffer to write the whole event
  342. * -EFAULT seg. fault during copy to user space
  343. */
  344. static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  345. loff_t *offset)
  346. {
  347. struct snd_seq_client *client = file->private_data;
  348. struct snd_seq_fifo *fifo;
  349. int err;
  350. long result = 0;
  351. struct snd_seq_event_cell *cell;
  352. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
  353. return -ENXIO;
  354. if (!access_ok(VERIFY_WRITE, buf, count))
  355. return -EFAULT;
  356. /* check client structures are in place */
  357. if (snd_BUG_ON(!client))
  358. return -ENXIO;
  359. if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
  360. return -ENXIO;
  361. if (atomic_read(&fifo->overflow) > 0) {
  362. /* buffer overflow is detected */
  363. snd_seq_fifo_clear(fifo);
  364. /* return error code */
  365. return -ENOSPC;
  366. }
  367. cell = NULL;
  368. err = 0;
  369. snd_seq_fifo_lock(fifo);
  370. /* while data available in queue */
  371. while (count >= sizeof(struct snd_seq_event)) {
  372. int nonblock;
  373. nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
  374. if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
  375. break;
  376. }
  377. if (snd_seq_ev_is_variable(&cell->event)) {
  378. struct snd_seq_event tmpev;
  379. tmpev = cell->event;
  380. tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
  381. if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
  382. err = -EFAULT;
  383. break;
  384. }
  385. count -= sizeof(struct snd_seq_event);
  386. buf += sizeof(struct snd_seq_event);
  387. err = snd_seq_expand_var_event(&cell->event, count,
  388. (char __force *)buf, 0,
  389. sizeof(struct snd_seq_event));
  390. if (err < 0)
  391. break;
  392. result += err;
  393. count -= err;
  394. buf += err;
  395. } else {
  396. if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
  397. err = -EFAULT;
  398. break;
  399. }
  400. count -= sizeof(struct snd_seq_event);
  401. buf += sizeof(struct snd_seq_event);
  402. }
  403. snd_seq_cell_free(cell);
  404. cell = NULL; /* to be sure */
  405. result += sizeof(struct snd_seq_event);
  406. }
  407. if (err < 0) {
  408. if (cell)
  409. snd_seq_fifo_cell_putback(fifo, cell);
  410. if (err == -EAGAIN && result > 0)
  411. err = 0;
  412. }
  413. snd_seq_fifo_unlock(fifo);
  414. return (err < 0) ? err : result;
  415. }
  416. /*
  417. * check access permission to the port
  418. */
  419. static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
  420. {
  421. if ((port->capability & flags) != flags)
  422. return 0;
  423. return flags;
  424. }
  425. /*
  426. * check if the destination client is available, and return the pointer
  427. * if filter is non-zero, client filter bitmap is tested.
  428. */
  429. static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
  430. int filter)
  431. {
  432. struct snd_seq_client *dest;
  433. dest = snd_seq_client_use_ptr(event->dest.client);
  434. if (dest == NULL)
  435. return NULL;
  436. if (! dest->accept_input)
  437. goto __not_avail;
  438. if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
  439. ! test_bit(event->type, dest->event_filter))
  440. goto __not_avail;
  441. if (filter && !(dest->filter & filter))
  442. goto __not_avail;
  443. return dest; /* ok - accessible */
  444. __not_avail:
  445. snd_seq_client_unlock(dest);
  446. return NULL;
  447. }
  448. /*
  449. * Return the error event.
  450. *
  451. * If the receiver client is a user client, the original event is
  452. * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
  453. * the original event is also variable length, the external data is
  454. * copied after the event record.
  455. * If the receiver client is a kernel client, the original event is
  456. * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
  457. * kmalloc.
  458. */
  459. static int bounce_error_event(struct snd_seq_client *client,
  460. struct snd_seq_event *event,
  461. int err, int atomic, int hop)
  462. {
  463. struct snd_seq_event bounce_ev;
  464. int result;
  465. if (client == NULL ||
  466. ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
  467. ! client->accept_input)
  468. return 0; /* ignored */
  469. /* set up quoted error */
  470. memset(&bounce_ev, 0, sizeof(bounce_ev));
  471. bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
  472. bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
  473. bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
  474. bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
  475. bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
  476. bounce_ev.dest.client = client->number;
  477. bounce_ev.dest.port = event->source.port;
  478. bounce_ev.data.quote.origin = event->dest;
  479. bounce_ev.data.quote.event = event;
  480. bounce_ev.data.quote.value = -err; /* use positive value */
  481. result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
  482. if (result < 0) {
  483. client->event_lost++;
  484. return result;
  485. }
  486. return result;
  487. }
  488. /*
  489. * rewrite the time-stamp of the event record with the curren time
  490. * of the given queue.
  491. * return non-zero if updated.
  492. */
  493. static int update_timestamp_of_queue(struct snd_seq_event *event,
  494. int queue, int real_time)
  495. {
  496. struct snd_seq_queue *q;
  497. q = queueptr(queue);
  498. if (! q)
  499. return 0;
  500. event->queue = queue;
  501. event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
  502. if (real_time) {
  503. event->time.time = snd_seq_timer_get_cur_time(q->timer);
  504. event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
  505. } else {
  506. event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
  507. event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
  508. }
  509. queuefree(q);
  510. return 1;
  511. }
  512. /*
  513. * deliver an event to the specified destination.
  514. * if filter is non-zero, client filter bitmap is tested.
  515. *
  516. * RETURN VALUE: 0 : if succeeded
  517. * <0 : error
  518. */
  519. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  520. struct snd_seq_event *event,
  521. int filter, int atomic, int hop)
  522. {
  523. struct snd_seq_client *dest = NULL;
  524. struct snd_seq_client_port *dest_port = NULL;
  525. int result = -ENOENT;
  526. int direct;
  527. direct = snd_seq_ev_is_direct(event);
  528. dest = get_event_dest_client(event, filter);
  529. if (dest == NULL)
  530. goto __skip;
  531. dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
  532. if (dest_port == NULL)
  533. goto __skip;
  534. /* check permission */
  535. if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
  536. result = -EPERM;
  537. goto __skip;
  538. }
  539. if (dest_port->timestamping)
  540. update_timestamp_of_queue(event, dest_port->time_queue,
  541. dest_port->time_real);
  542. switch (dest->type) {
  543. case USER_CLIENT:
  544. if (dest->data.user.fifo)
  545. result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
  546. break;
  547. case KERNEL_CLIENT:
  548. if (dest_port->event_input == NULL)
  549. break;
  550. result = dest_port->event_input(event, direct,
  551. dest_port->private_data,
  552. atomic, hop);
  553. break;
  554. default:
  555. break;
  556. }
  557. __skip:
  558. if (dest_port)
  559. snd_seq_port_unlock(dest_port);
  560. if (dest)
  561. snd_seq_client_unlock(dest);
  562. if (result < 0 && !direct) {
  563. result = bounce_error_event(client, event, result, atomic, hop);
  564. }
  565. return result;
  566. }
  567. /*
  568. * send the event to all subscribers:
  569. */
  570. static int deliver_to_subscribers(struct snd_seq_client *client,
  571. struct snd_seq_event *event,
  572. int atomic, int hop)
  573. {
  574. struct snd_seq_subscribers *subs;
  575. int err = 0, num_ev = 0;
  576. struct snd_seq_event event_saved;
  577. struct snd_seq_client_port *src_port;
  578. struct snd_seq_port_subs_info *grp;
  579. src_port = snd_seq_port_use_ptr(client, event->source.port);
  580. if (src_port == NULL)
  581. return -EINVAL; /* invalid source port */
  582. /* save original event record */
  583. event_saved = *event;
  584. grp = &src_port->c_src;
  585. /* lock list */
  586. if (atomic)
  587. read_lock(&grp->list_lock);
  588. else
  589. down_read(&grp->list_mutex);
  590. list_for_each_entry(subs, &grp->list_head, src_list) {
  591. event->dest = subs->info.dest;
  592. if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  593. /* convert time according to flag with subscription */
  594. update_timestamp_of_queue(event, subs->info.queue,
  595. subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
  596. err = snd_seq_deliver_single_event(client, event,
  597. 0, atomic, hop);
  598. if (err < 0)
  599. break;
  600. num_ev++;
  601. /* restore original event record */
  602. *event = event_saved;
  603. }
  604. if (atomic)
  605. read_unlock(&grp->list_lock);
  606. else
  607. up_read(&grp->list_mutex);
  608. *event = event_saved; /* restore */
  609. snd_seq_port_unlock(src_port);
  610. return (err < 0) ? err : num_ev;
  611. }
  612. #ifdef SUPPORT_BROADCAST
  613. /*
  614. * broadcast to all ports:
  615. */
  616. static int port_broadcast_event(struct snd_seq_client *client,
  617. struct snd_seq_event *event,
  618. int atomic, int hop)
  619. {
  620. int num_ev = 0, err = 0;
  621. struct snd_seq_client *dest_client;
  622. struct snd_seq_client_port *port;
  623. dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
  624. if (dest_client == NULL)
  625. return 0; /* no matching destination */
  626. read_lock(&dest_client->ports_lock);
  627. list_for_each_entry(port, &dest_client->ports_list_head, list) {
  628. event->dest.port = port->addr.port;
  629. /* pass NULL as source client to avoid error bounce */
  630. err = snd_seq_deliver_single_event(NULL, event,
  631. SNDRV_SEQ_FILTER_BROADCAST,
  632. atomic, hop);
  633. if (err < 0)
  634. break;
  635. num_ev++;
  636. }
  637. read_unlock(&dest_client->ports_lock);
  638. snd_seq_client_unlock(dest_client);
  639. event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
  640. return (err < 0) ? err : num_ev;
  641. }
  642. /*
  643. * send the event to all clients:
  644. * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
  645. */
  646. static int broadcast_event(struct snd_seq_client *client,
  647. struct snd_seq_event *event, int atomic, int hop)
  648. {
  649. int err = 0, num_ev = 0;
  650. int dest;
  651. struct snd_seq_addr addr;
  652. addr = event->dest; /* save */
  653. for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
  654. /* don't send to itself */
  655. if (dest == client->number)
  656. continue;
  657. event->dest.client = dest;
  658. event->dest.port = addr.port;
  659. if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  660. err = port_broadcast_event(client, event, atomic, hop);
  661. else
  662. /* pass NULL as source client to avoid error bounce */
  663. err = snd_seq_deliver_single_event(NULL, event,
  664. SNDRV_SEQ_FILTER_BROADCAST,
  665. atomic, hop);
  666. if (err < 0)
  667. break;
  668. num_ev += err;
  669. }
  670. event->dest = addr; /* restore */
  671. return (err < 0) ? err : num_ev;
  672. }
  673. /* multicast - not supported yet */
  674. static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
  675. int atomic, int hop)
  676. {
  677. snd_printd("seq: multicast not supported yet.\n");
  678. return 0; /* ignored */
  679. }
  680. #endif /* SUPPORT_BROADCAST */
  681. /* deliver an event to the destination port(s).
  682. * if the event is to subscribers or broadcast, the event is dispatched
  683. * to multiple targets.
  684. *
  685. * RETURN VALUE: n > 0 : the number of delivered events.
  686. * n == 0 : the event was not passed to any client.
  687. * n < 0 : error - event was not processed.
  688. */
  689. static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
  690. int atomic, int hop)
  691. {
  692. int result;
  693. hop++;
  694. if (hop >= SNDRV_SEQ_MAX_HOPS) {
  695. snd_printd("too long delivery path (%d:%d->%d:%d)\n",
  696. event->source.client, event->source.port,
  697. event->dest.client, event->dest.port);
  698. return -EMLINK;
  699. }
  700. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
  701. event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
  702. result = deliver_to_subscribers(client, event, atomic, hop);
  703. #ifdef SUPPORT_BROADCAST
  704. else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
  705. event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
  706. result = broadcast_event(client, event, atomic, hop);
  707. else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
  708. result = multicast_event(client, event, atomic, hop);
  709. else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  710. result = port_broadcast_event(client, event, atomic, hop);
  711. #endif
  712. else
  713. result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
  714. return result;
  715. }
  716. /*
  717. * dispatch an event cell:
  718. * This function is called only from queue check routines in timer
  719. * interrupts or after enqueued.
  720. * The event cell shall be released or re-queued in this function.
  721. *
  722. * RETURN VALUE: n > 0 : the number of delivered events.
  723. * n == 0 : the event was not passed to any client.
  724. * n < 0 : error - event was not processed.
  725. */
  726. int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
  727. {
  728. struct snd_seq_client *client;
  729. int result;
  730. if (snd_BUG_ON(!cell))
  731. return -EINVAL;
  732. client = snd_seq_client_use_ptr(cell->event.source.client);
  733. if (client == NULL) {
  734. snd_seq_cell_free(cell); /* release this cell */
  735. return -EINVAL;
  736. }
  737. if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
  738. /* NOTE event:
  739. * the event cell is re-used as a NOTE-OFF event and
  740. * enqueued again.
  741. */
  742. struct snd_seq_event tmpev, *ev;
  743. /* reserve this event to enqueue note-off later */
  744. tmpev = cell->event;
  745. tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
  746. result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
  747. /*
  748. * This was originally a note event. We now re-use the
  749. * cell for the note-off event.
  750. */
  751. ev = &cell->event;
  752. ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
  753. ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
  754. /* add the duration time */
  755. switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
  756. case SNDRV_SEQ_TIME_STAMP_TICK:
  757. ev->time.tick += ev->data.note.duration;
  758. break;
  759. case SNDRV_SEQ_TIME_STAMP_REAL:
  760. /* unit for duration is ms */
  761. ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
  762. ev->time.time.tv_sec += ev->data.note.duration / 1000 +
  763. ev->time.time.tv_nsec / 1000000000;
  764. ev->time.time.tv_nsec %= 1000000000;
  765. break;
  766. }
  767. ev->data.note.velocity = ev->data.note.off_velocity;
  768. /* Now queue this cell as the note off event */
  769. if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
  770. snd_seq_cell_free(cell); /* release this cell */
  771. } else {
  772. /* Normal events:
  773. * event cell is freed after processing the event
  774. */
  775. result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
  776. snd_seq_cell_free(cell);
  777. }
  778. snd_seq_client_unlock(client);
  779. return result;
  780. }
  781. /* Allocate a cell from client pool and enqueue it to queue:
  782. * if pool is empty and blocking is TRUE, sleep until a new cell is
  783. * available.
  784. */
  785. static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
  786. struct snd_seq_event *event,
  787. struct file *file, int blocking,
  788. int atomic, int hop)
  789. {
  790. struct snd_seq_event_cell *cell;
  791. int err;
  792. /* special queue values - force direct passing */
  793. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  794. event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
  795. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  796. } else
  797. #ifdef SUPPORT_BROADCAST
  798. if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
  799. event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
  800. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  801. }
  802. #endif
  803. if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  804. /* check presence of source port */
  805. struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
  806. if (src_port == NULL)
  807. return -EINVAL;
  808. snd_seq_port_unlock(src_port);
  809. }
  810. /* direct event processing without enqueued */
  811. if (snd_seq_ev_is_direct(event)) {
  812. if (event->type == SNDRV_SEQ_EVENT_NOTE)
  813. return -EINVAL; /* this event must be enqueued! */
  814. return snd_seq_deliver_event(client, event, atomic, hop);
  815. }
  816. /* Not direct, normal queuing */
  817. if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
  818. return -EINVAL; /* invalid queue */
  819. if (! snd_seq_write_pool_allocated(client))
  820. return -ENXIO; /* queue is not allocated */
  821. /* allocate an event cell */
  822. err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
  823. if (err < 0)
  824. return err;
  825. /* we got a cell. enqueue it. */
  826. if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
  827. snd_seq_cell_free(cell);
  828. return err;
  829. }
  830. return 0;
  831. }
  832. /*
  833. * check validity of event type and data length.
  834. * return non-zero if invalid.
  835. */
  836. static int check_event_type_and_length(struct snd_seq_event *ev)
  837. {
  838. switch (snd_seq_ev_length_type(ev)) {
  839. case SNDRV_SEQ_EVENT_LENGTH_FIXED:
  840. if (snd_seq_ev_is_variable_type(ev))
  841. return -EINVAL;
  842. break;
  843. case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
  844. if (! snd_seq_ev_is_variable_type(ev) ||
  845. (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
  846. return -EINVAL;
  847. break;
  848. case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
  849. if (! snd_seq_ev_is_direct(ev))
  850. return -EINVAL;
  851. break;
  852. }
  853. return 0;
  854. }
  855. /* handle write() */
  856. /* possible error values:
  857. * -ENXIO invalid client or file open mode
  858. * -ENOMEM malloc failed
  859. * -EFAULT seg. fault during copy from user space
  860. * -EINVAL invalid event
  861. * -EAGAIN no space in output pool
  862. * -EINTR interrupts while sleep
  863. * -EMLINK too many hops
  864. * others depends on return value from driver callback
  865. */
  866. static ssize_t snd_seq_write(struct file *file, const char __user *buf,
  867. size_t count, loff_t *offset)
  868. {
  869. struct snd_seq_client *client = file->private_data;
  870. int written = 0, len;
  871. int err = -EINVAL;
  872. struct snd_seq_event event;
  873. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
  874. return -ENXIO;
  875. /* check client structures are in place */
  876. if (snd_BUG_ON(!client))
  877. return -ENXIO;
  878. if (!client->accept_output || client->pool == NULL)
  879. return -ENXIO;
  880. /* allocate the pool now if the pool is not allocated yet */
  881. if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
  882. if (snd_seq_pool_init(client->pool) < 0)
  883. return -ENOMEM;
  884. }
  885. /* only process whole events */
  886. while (count >= sizeof(struct snd_seq_event)) {
  887. /* Read in the event header from the user */
  888. len = sizeof(event);
  889. if (copy_from_user(&event, buf, len)) {
  890. err = -EFAULT;
  891. break;
  892. }
  893. event.source.client = client->number; /* fill in client number */
  894. /* Check for extension data length */
  895. if (check_event_type_and_length(&event)) {
  896. err = -EINVAL;
  897. break;
  898. }
  899. /* check for special events */
  900. if (event.type == SNDRV_SEQ_EVENT_NONE)
  901. goto __skip_event;
  902. else if (snd_seq_ev_is_reserved(&event)) {
  903. err = -EINVAL;
  904. break;
  905. }
  906. if (snd_seq_ev_is_variable(&event)) {
  907. int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  908. if ((size_t)(extlen + len) > count) {
  909. /* back out, will get an error this time or next */
  910. err = -EINVAL;
  911. break;
  912. }
  913. /* set user space pointer */
  914. event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
  915. event.data.ext.ptr = (char __force *)buf
  916. + sizeof(struct snd_seq_event);
  917. len += extlen; /* increment data length */
  918. } else {
  919. #ifdef CONFIG_COMPAT
  920. if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
  921. void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]);
  922. event.data.ext.ptr = ptr;
  923. }
  924. #endif
  925. }
  926. /* ok, enqueue it */
  927. err = snd_seq_client_enqueue_event(client, &event, file,
  928. !(file->f_flags & O_NONBLOCK),
  929. 0, 0);
  930. if (err < 0)
  931. break;
  932. __skip_event:
  933. /* Update pointers and counts */
  934. count -= len;
  935. buf += len;
  936. written += len;
  937. }
  938. return written ? written : err;
  939. }
  940. /*
  941. * handle polling
  942. */
  943. static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
  944. {
  945. struct snd_seq_client *client = file->private_data;
  946. unsigned int mask = 0;
  947. /* check client structures are in place */
  948. if (snd_BUG_ON(!client))
  949. return -ENXIO;
  950. if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
  951. client->data.user.fifo) {
  952. /* check if data is available in the outqueue */
  953. if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
  954. mask |= POLLIN | POLLRDNORM;
  955. }
  956. if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
  957. /* check if data is available in the pool */
  958. if (!snd_seq_write_pool_allocated(client) ||
  959. snd_seq_pool_poll_wait(client->pool, file, wait))
  960. mask |= POLLOUT | POLLWRNORM;
  961. }
  962. return mask;
  963. }
  964. /*-----------------------------------------------------*/
  965. /* SYSTEM_INFO ioctl() */
  966. static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user *arg)
  967. {
  968. struct snd_seq_system_info info;
  969. memset(&info, 0, sizeof(info));
  970. /* fill the info fields */
  971. info.queues = SNDRV_SEQ_MAX_QUEUES;
  972. info.clients = SNDRV_SEQ_MAX_CLIENTS;
  973. info.ports = 256; /* fixed limit */
  974. info.channels = 256; /* fixed limit */
  975. info.cur_clients = client_usage.cur;
  976. info.cur_queues = snd_seq_queue_get_cur_queues();
  977. if (copy_to_user(arg, &info, sizeof(info)))
  978. return -EFAULT;
  979. return 0;
  980. }
  981. /* RUNNING_MODE ioctl() */
  982. static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void __user *arg)
  983. {
  984. struct snd_seq_running_info info;
  985. struct snd_seq_client *cptr;
  986. int err = 0;
  987. if (copy_from_user(&info, arg, sizeof(info)))
  988. return -EFAULT;
  989. /* requested client number */
  990. cptr = snd_seq_client_use_ptr(info.client);
  991. if (cptr == NULL)
  992. return -ENOENT; /* don't change !!! */
  993. #ifdef SNDRV_BIG_ENDIAN
  994. if (! info.big_endian) {
  995. err = -EINVAL;
  996. goto __err;
  997. }
  998. #else
  999. if (info.big_endian) {
  1000. err = -EINVAL;
  1001. goto __err;
  1002. }
  1003. #endif
  1004. if (info.cpu_mode > sizeof(long)) {
  1005. err = -EINVAL;
  1006. goto __err;
  1007. }
  1008. cptr->convert32 = (info.cpu_mode < sizeof(long));
  1009. __err:
  1010. snd_seq_client_unlock(cptr);
  1011. return err;
  1012. }
  1013. /* CLIENT_INFO ioctl() */
  1014. static void get_client_info(struct snd_seq_client *cptr,
  1015. struct snd_seq_client_info *info)
  1016. {
  1017. info->client = cptr->number;
  1018. /* fill the info fields */
  1019. info->type = cptr->type;
  1020. strcpy(info->name, cptr->name);
  1021. info->filter = cptr->filter;
  1022. info->event_lost = cptr->event_lost;
  1023. memcpy(info->event_filter, cptr->event_filter, 32);
  1024. info->num_ports = cptr->num_ports;
  1025. memset(info->reserved, 0, sizeof(info->reserved));
  1026. }
  1027. static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
  1028. void __user *arg)
  1029. {
  1030. struct snd_seq_client *cptr;
  1031. struct snd_seq_client_info client_info;
  1032. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1033. return -EFAULT;
  1034. /* requested client number */
  1035. cptr = snd_seq_client_use_ptr(client_info.client);
  1036. if (cptr == NULL)
  1037. return -ENOENT; /* don't change !!! */
  1038. get_client_info(cptr, &client_info);
  1039. snd_seq_client_unlock(cptr);
  1040. if (copy_to_user(arg, &client_info, sizeof(client_info)))
  1041. return -EFAULT;
  1042. return 0;
  1043. }
  1044. /* CLIENT_INFO ioctl() */
  1045. static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
  1046. void __user *arg)
  1047. {
  1048. struct snd_seq_client_info client_info;
  1049. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1050. return -EFAULT;
  1051. /* it is not allowed to set the info fields for an another client */
  1052. if (client->number != client_info.client)
  1053. return -EPERM;
  1054. /* also client type must be set now */
  1055. if (client->type != client_info.type)
  1056. return -EINVAL;
  1057. /* fill the info fields */
  1058. if (client_info.name[0])
  1059. strlcpy(client->name, client_info.name, sizeof(client->name));
  1060. client->filter = client_info.filter;
  1061. client->event_lost = client_info.event_lost;
  1062. memcpy(client->event_filter, client_info.event_filter, 32);
  1063. return 0;
  1064. }
  1065. /*
  1066. * CREATE PORT ioctl()
  1067. */
  1068. static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
  1069. void __user *arg)
  1070. {
  1071. struct snd_seq_client_port *port;
  1072. struct snd_seq_port_info info;
  1073. struct snd_seq_port_callback *callback;
  1074. int port_idx;
  1075. if (copy_from_user(&info, arg, sizeof(info)))
  1076. return -EFAULT;
  1077. /* it is not allowed to create the port for an another client */
  1078. if (info.addr.client != client->number)
  1079. return -EPERM;
  1080. port = snd_seq_create_port(client, (info.flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info.addr.port : -1);
  1081. if (port == NULL)
  1082. return -ENOMEM;
  1083. if (client->type == USER_CLIENT && info.kernel) {
  1084. port_idx = port->addr.port;
  1085. snd_seq_port_unlock(port);
  1086. snd_seq_delete_port(client, port_idx);
  1087. return -EINVAL;
  1088. }
  1089. if (client->type == KERNEL_CLIENT) {
  1090. if ((callback = info.kernel) != NULL) {
  1091. if (callback->owner)
  1092. port->owner = callback->owner;
  1093. port->private_data = callback->private_data;
  1094. port->private_free = callback->private_free;
  1095. port->callback_all = callback->callback_all;
  1096. port->event_input = callback->event_input;
  1097. port->c_src.open = callback->subscribe;
  1098. port->c_src.close = callback->unsubscribe;
  1099. port->c_dest.open = callback->use;
  1100. port->c_dest.close = callback->unuse;
  1101. }
  1102. }
  1103. info.addr = port->addr;
  1104. snd_seq_set_port_info(port, &info);
  1105. snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
  1106. snd_seq_port_unlock(port);
  1107. if (copy_to_user(arg, &info, sizeof(info)))
  1108. return -EFAULT;
  1109. return 0;
  1110. }
  1111. /*
  1112. * DELETE PORT ioctl()
  1113. */
  1114. static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
  1115. void __user *arg)
  1116. {
  1117. struct snd_seq_port_info info;
  1118. int err;
  1119. /* set passed parameters */
  1120. if (copy_from_user(&info, arg, sizeof(info)))
  1121. return -EFAULT;
  1122. /* it is not allowed to remove the port for an another client */
  1123. if (info.addr.client != client->number)
  1124. return -EPERM;
  1125. err = snd_seq_delete_port(client, info.addr.port);
  1126. if (err >= 0)
  1127. snd_seq_system_client_ev_port_exit(client->number, info.addr.port);
  1128. return err;
  1129. }
  1130. /*
  1131. * GET_PORT_INFO ioctl() (on any client)
  1132. */
  1133. static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
  1134. void __user *arg)
  1135. {
  1136. struct snd_seq_client *cptr;
  1137. struct snd_seq_client_port *port;
  1138. struct snd_seq_port_info info;
  1139. if (copy_from_user(&info, arg, sizeof(info)))
  1140. return -EFAULT;
  1141. cptr = snd_seq_client_use_ptr(info.addr.client);
  1142. if (cptr == NULL)
  1143. return -ENXIO;
  1144. port = snd_seq_port_use_ptr(cptr, info.addr.port);
  1145. if (port == NULL) {
  1146. snd_seq_client_unlock(cptr);
  1147. return -ENOENT; /* don't change */
  1148. }
  1149. /* get port info */
  1150. snd_seq_get_port_info(port, &info);
  1151. snd_seq_port_unlock(port);
  1152. snd_seq_client_unlock(cptr);
  1153. if (copy_to_user(arg, &info, sizeof(info)))
  1154. return -EFAULT;
  1155. return 0;
  1156. }
  1157. /*
  1158. * SET_PORT_INFO ioctl() (only ports on this/own client)
  1159. */
  1160. static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client,
  1161. void __user *arg)
  1162. {
  1163. struct snd_seq_client_port *port;
  1164. struct snd_seq_port_info info;
  1165. if (copy_from_user(&info, arg, sizeof(info)))
  1166. return -EFAULT;
  1167. if (info.addr.client != client->number) /* only set our own ports ! */
  1168. return -EPERM;
  1169. port = snd_seq_port_use_ptr(client, info.addr.port);
  1170. if (port) {
  1171. snd_seq_set_port_info(port, &info);
  1172. snd_seq_port_unlock(port);
  1173. }
  1174. return 0;
  1175. }
  1176. /*
  1177. * port subscription (connection)
  1178. */
  1179. #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
  1180. #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
  1181. static int check_subscription_permission(struct snd_seq_client *client,
  1182. struct snd_seq_client_port *sport,
  1183. struct snd_seq_client_port *dport,
  1184. struct snd_seq_port_subscribe *subs)
  1185. {
  1186. if (client->number != subs->sender.client &&
  1187. client->number != subs->dest.client) {
  1188. /* connection by third client - check export permission */
  1189. if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1190. return -EPERM;
  1191. if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1192. return -EPERM;
  1193. }
  1194. /* check read permission */
  1195. /* if sender or receiver is the subscribing client itself,
  1196. * no permission check is necessary
  1197. */
  1198. if (client->number != subs->sender.client) {
  1199. if (! check_port_perm(sport, PERM_RD))
  1200. return -EPERM;
  1201. }
  1202. /* check write permission */
  1203. if (client->number != subs->dest.client) {
  1204. if (! check_port_perm(dport, PERM_WR))
  1205. return -EPERM;
  1206. }
  1207. return 0;
  1208. }
  1209. /*
  1210. * send an subscription notify event to user client:
  1211. * client must be user client.
  1212. */
  1213. int snd_seq_client_notify_subscription(int client, int port,
  1214. struct snd_seq_port_subscribe *info,
  1215. int evtype)
  1216. {
  1217. struct snd_seq_event event;
  1218. memset(&event, 0, sizeof(event));
  1219. event.type = evtype;
  1220. event.data.connect.dest = info->dest;
  1221. event.data.connect.sender = info->sender;
  1222. return snd_seq_system_notify(client, port, &event); /* non-atomic */
  1223. }
  1224. /*
  1225. * add to port's subscription list IOCTL interface
  1226. */
  1227. static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
  1228. void __user *arg)
  1229. {
  1230. int result = -EINVAL;
  1231. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1232. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1233. struct snd_seq_port_subscribe subs;
  1234. if (copy_from_user(&subs, arg, sizeof(subs)))
  1235. return -EFAULT;
  1236. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1237. goto __end;
  1238. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1239. goto __end;
  1240. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1241. goto __end;
  1242. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1243. goto __end;
  1244. result = check_subscription_permission(client, sport, dport, &subs);
  1245. if (result < 0)
  1246. goto __end;
  1247. /* connect them */
  1248. result = snd_seq_port_connect(client, sender, sport, receiver, dport, &subs);
  1249. if (! result) /* broadcast announce */
  1250. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1251. &subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
  1252. __end:
  1253. if (sport)
  1254. snd_seq_port_unlock(sport);
  1255. if (dport)
  1256. snd_seq_port_unlock(dport);
  1257. if (sender)
  1258. snd_seq_client_unlock(sender);
  1259. if (receiver)
  1260. snd_seq_client_unlock(receiver);
  1261. return result;
  1262. }
  1263. /*
  1264. * remove from port's subscription list
  1265. */
  1266. static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
  1267. void __user *arg)
  1268. {
  1269. int result = -ENXIO;
  1270. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1271. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1272. struct snd_seq_port_subscribe subs;
  1273. if (copy_from_user(&subs, arg, sizeof(subs)))
  1274. return -EFAULT;
  1275. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1276. goto __end;
  1277. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1278. goto __end;
  1279. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1280. goto __end;
  1281. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1282. goto __end;
  1283. result = check_subscription_permission(client, sport, dport, &subs);
  1284. if (result < 0)
  1285. goto __end;
  1286. result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, &subs);
  1287. if (! result) /* broadcast announce */
  1288. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1289. &subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
  1290. __end:
  1291. if (sport)
  1292. snd_seq_port_unlock(sport);
  1293. if (dport)
  1294. snd_seq_port_unlock(dport);
  1295. if (sender)
  1296. snd_seq_client_unlock(sender);
  1297. if (receiver)
  1298. snd_seq_client_unlock(receiver);
  1299. return result;
  1300. }
  1301. /* CREATE_QUEUE ioctl() */
  1302. static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
  1303. void __user *arg)
  1304. {
  1305. struct snd_seq_queue_info info;
  1306. int result;
  1307. struct snd_seq_queue *q;
  1308. if (copy_from_user(&info, arg, sizeof(info)))
  1309. return -EFAULT;
  1310. result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
  1311. if (result < 0)
  1312. return result;
  1313. q = queueptr(result);
  1314. if (q == NULL)
  1315. return -EINVAL;
  1316. info.queue = q->queue;
  1317. info.locked = q->locked;
  1318. info.owner = q->owner;
  1319. /* set queue name */
  1320. if (! info.name[0])
  1321. snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
  1322. strlcpy(q->name, info.name, sizeof(q->name));
  1323. queuefree(q);
  1324. if (copy_to_user(arg, &info, sizeof(info)))
  1325. return -EFAULT;
  1326. return 0;
  1327. }
  1328. /* DELETE_QUEUE ioctl() */
  1329. static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client,
  1330. void __user *arg)
  1331. {
  1332. struct snd_seq_queue_info info;
  1333. if (copy_from_user(&info, arg, sizeof(info)))
  1334. return -EFAULT;
  1335. return snd_seq_queue_delete(client->number, info.queue);
  1336. }
  1337. /* GET_QUEUE_INFO ioctl() */
  1338. static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
  1339. void __user *arg)
  1340. {
  1341. struct snd_seq_queue_info info;
  1342. struct snd_seq_queue *q;
  1343. if (copy_from_user(&info, arg, sizeof(info)))
  1344. return -EFAULT;
  1345. q = queueptr(info.queue);
  1346. if (q == NULL)
  1347. return -EINVAL;
  1348. memset(&info, 0, sizeof(info));
  1349. info.queue = q->queue;
  1350. info.owner = q->owner;
  1351. info.locked = q->locked;
  1352. strlcpy(info.name, q->name, sizeof(info.name));
  1353. queuefree(q);
  1354. if (copy_to_user(arg, &info, sizeof(info)))
  1355. return -EFAULT;
  1356. return 0;
  1357. }
  1358. /* SET_QUEUE_INFO ioctl() */
  1359. static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
  1360. void __user *arg)
  1361. {
  1362. struct snd_seq_queue_info info;
  1363. struct snd_seq_queue *q;
  1364. if (copy_from_user(&info, arg, sizeof(info)))
  1365. return -EFAULT;
  1366. if (info.owner != client->number)
  1367. return -EINVAL;
  1368. /* change owner/locked permission */
  1369. if (snd_seq_queue_check_access(info.queue, client->number)) {
  1370. if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0)
  1371. return -EPERM;
  1372. if (info.locked)
  1373. snd_seq_queue_use(info.queue, client->number, 1);
  1374. } else {
  1375. return -EPERM;
  1376. }
  1377. q = queueptr(info.queue);
  1378. if (! q)
  1379. return -EINVAL;
  1380. if (q->owner != client->number) {
  1381. queuefree(q);
  1382. return -EPERM;
  1383. }
  1384. strlcpy(q->name, info.name, sizeof(q->name));
  1385. queuefree(q);
  1386. return 0;
  1387. }
  1388. /* GET_NAMED_QUEUE ioctl() */
  1389. static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void __user *arg)
  1390. {
  1391. struct snd_seq_queue_info info;
  1392. struct snd_seq_queue *q;
  1393. if (copy_from_user(&info, arg, sizeof(info)))
  1394. return -EFAULT;
  1395. q = snd_seq_queue_find_name(info.name);
  1396. if (q == NULL)
  1397. return -EINVAL;
  1398. info.queue = q->queue;
  1399. info.owner = q->owner;
  1400. info.locked = q->locked;
  1401. queuefree(q);
  1402. if (copy_to_user(arg, &info, sizeof(info)))
  1403. return -EFAULT;
  1404. return 0;
  1405. }
  1406. /* GET_QUEUE_STATUS ioctl() */
  1407. static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
  1408. void __user *arg)
  1409. {
  1410. struct snd_seq_queue_status status;
  1411. struct snd_seq_queue *queue;
  1412. struct snd_seq_timer *tmr;
  1413. if (copy_from_user(&status, arg, sizeof(status)))
  1414. return -EFAULT;
  1415. queue = queueptr(status.queue);
  1416. if (queue == NULL)
  1417. return -EINVAL;
  1418. memset(&status, 0, sizeof(status));
  1419. status.queue = queue->queue;
  1420. tmr = queue->timer;
  1421. status.events = queue->tickq->cells + queue->timeq->cells;
  1422. status.time = snd_seq_timer_get_cur_time(tmr);
  1423. status.tick = snd_seq_timer_get_cur_tick(tmr);
  1424. status.running = tmr->running;
  1425. status.flags = queue->flags;
  1426. queuefree(queue);
  1427. if (copy_to_user(arg, &status, sizeof(status)))
  1428. return -EFAULT;
  1429. return 0;
  1430. }
  1431. /* GET_QUEUE_TEMPO ioctl() */
  1432. static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
  1433. void __user *arg)
  1434. {
  1435. struct snd_seq_queue_tempo tempo;
  1436. struct snd_seq_queue *queue;
  1437. struct snd_seq_timer *tmr;
  1438. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1439. return -EFAULT;
  1440. queue = queueptr(tempo.queue);
  1441. if (queue == NULL)
  1442. return -EINVAL;
  1443. memset(&tempo, 0, sizeof(tempo));
  1444. tempo.queue = queue->queue;
  1445. tmr = queue->timer;
  1446. tempo.tempo = tmr->tempo;
  1447. tempo.ppq = tmr->ppq;
  1448. tempo.skew_value = tmr->skew;
  1449. tempo.skew_base = tmr->skew_base;
  1450. queuefree(queue);
  1451. if (copy_to_user(arg, &tempo, sizeof(tempo)))
  1452. return -EFAULT;
  1453. return 0;
  1454. }
  1455. /* SET_QUEUE_TEMPO ioctl() */
  1456. int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
  1457. {
  1458. if (!snd_seq_queue_check_access(tempo->queue, client))
  1459. return -EPERM;
  1460. return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
  1461. }
  1462. EXPORT_SYMBOL(snd_seq_set_queue_tempo);
  1463. static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
  1464. void __user *arg)
  1465. {
  1466. int result;
  1467. struct snd_seq_queue_tempo tempo;
  1468. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1469. return -EFAULT;
  1470. result = snd_seq_set_queue_tempo(client->number, &tempo);
  1471. return result < 0 ? result : 0;
  1472. }
  1473. /* GET_QUEUE_TIMER ioctl() */
  1474. static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
  1475. void __user *arg)
  1476. {
  1477. struct snd_seq_queue_timer timer;
  1478. struct snd_seq_queue *queue;
  1479. struct snd_seq_timer *tmr;
  1480. if (copy_from_user(&timer, arg, sizeof(timer)))
  1481. return -EFAULT;
  1482. queue = queueptr(timer.queue);
  1483. if (queue == NULL)
  1484. return -EINVAL;
  1485. if (mutex_lock_interruptible(&queue->timer_mutex)) {
  1486. queuefree(queue);
  1487. return -ERESTARTSYS;
  1488. }
  1489. tmr = queue->timer;
  1490. memset(&timer, 0, sizeof(timer));
  1491. timer.queue = queue->queue;
  1492. timer.type = tmr->type;
  1493. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1494. timer.u.alsa.id = tmr->alsa_id;
  1495. timer.u.alsa.resolution = tmr->preferred_resolution;
  1496. }
  1497. mutex_unlock(&queue->timer_mutex);
  1498. queuefree(queue);
  1499. if (copy_to_user(arg, &timer, sizeof(timer)))
  1500. return -EFAULT;
  1501. return 0;
  1502. }
  1503. /* SET_QUEUE_TIMER ioctl() */
  1504. static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
  1505. void __user *arg)
  1506. {
  1507. int result = 0;
  1508. struct snd_seq_queue_timer timer;
  1509. if (copy_from_user(&timer, arg, sizeof(timer)))
  1510. return -EFAULT;
  1511. if (timer.type != SNDRV_SEQ_TIMER_ALSA)
  1512. return -EINVAL;
  1513. if (snd_seq_queue_check_access(timer.queue, client->number)) {
  1514. struct snd_seq_queue *q;
  1515. struct snd_seq_timer *tmr;
  1516. q = queueptr(timer.queue);
  1517. if (q == NULL)
  1518. return -ENXIO;
  1519. if (mutex_lock_interruptible(&q->timer_mutex)) {
  1520. queuefree(q);
  1521. return -ERESTARTSYS;
  1522. }
  1523. tmr = q->timer;
  1524. snd_seq_queue_timer_close(timer.queue);
  1525. tmr->type = timer.type;
  1526. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1527. tmr->alsa_id = timer.u.alsa.id;
  1528. tmr->preferred_resolution = timer.u.alsa.resolution;
  1529. }
  1530. result = snd_seq_queue_timer_open(timer.queue);
  1531. mutex_unlock(&q->timer_mutex);
  1532. queuefree(q);
  1533. } else {
  1534. return -EPERM;
  1535. }
  1536. return result;
  1537. }
  1538. /* GET_QUEUE_CLIENT ioctl() */
  1539. static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
  1540. void __user *arg)
  1541. {
  1542. struct snd_seq_queue_client info;
  1543. int used;
  1544. if (copy_from_user(&info, arg, sizeof(info)))
  1545. return -EFAULT;
  1546. used = snd_seq_queue_is_used(info.queue, client->number);
  1547. if (used < 0)
  1548. return -EINVAL;
  1549. info.used = used;
  1550. info.client = client->number;
  1551. if (copy_to_user(arg, &info, sizeof(info)))
  1552. return -EFAULT;
  1553. return 0;
  1554. }
  1555. /* SET_QUEUE_CLIENT ioctl() */
  1556. static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
  1557. void __user *arg)
  1558. {
  1559. int err;
  1560. struct snd_seq_queue_client info;
  1561. if (copy_from_user(&info, arg, sizeof(info)))
  1562. return -EFAULT;
  1563. if (info.used >= 0) {
  1564. err = snd_seq_queue_use(info.queue, client->number, info.used);
  1565. if (err < 0)
  1566. return err;
  1567. }
  1568. return snd_seq_ioctl_get_queue_client(client, arg);
  1569. }
  1570. /* GET_CLIENT_POOL ioctl() */
  1571. static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
  1572. void __user *arg)
  1573. {
  1574. struct snd_seq_client_pool info;
  1575. struct snd_seq_client *cptr;
  1576. if (copy_from_user(&info, arg, sizeof(info)))
  1577. return -EFAULT;
  1578. cptr = snd_seq_client_use_ptr(info.client);
  1579. if (cptr == NULL)
  1580. return -ENOENT;
  1581. memset(&info, 0, sizeof(info));
  1582. info.output_pool = cptr->pool->size;
  1583. info.output_room = cptr->pool->room;
  1584. info.output_free = info.output_pool;
  1585. info.output_free = snd_seq_unused_cells(cptr->pool);
  1586. if (cptr->type == USER_CLIENT) {
  1587. info.input_pool = cptr->data.user.fifo_pool_size;
  1588. info.input_free = info.input_pool;
  1589. if (cptr->data.user.fifo)
  1590. info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
  1591. } else {
  1592. info.input_pool = 0;
  1593. info.input_free = 0;
  1594. }
  1595. snd_seq_client_unlock(cptr);
  1596. if (copy_to_user(arg, &info, sizeof(info)))
  1597. return -EFAULT;
  1598. return 0;
  1599. }
  1600. /* SET_CLIENT_POOL ioctl() */
  1601. static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
  1602. void __user *arg)
  1603. {
  1604. struct snd_seq_client_pool info;
  1605. int rc;
  1606. if (copy_from_user(&info, arg, sizeof(info)))
  1607. return -EFAULT;
  1608. if (client->number != info.client)
  1609. return -EINVAL; /* can't change other clients */
  1610. if (info.output_pool >= 1 && info.output_pool <= SNDRV_SEQ_MAX_EVENTS &&
  1611. (! snd_seq_write_pool_allocated(client) ||
  1612. info.output_pool != client->pool->size)) {
  1613. if (snd_seq_write_pool_allocated(client)) {
  1614. /* remove all existing cells */
  1615. snd_seq_queue_client_leave_cells(client->number);
  1616. snd_seq_pool_done(client->pool);
  1617. }
  1618. client->pool->size = info.output_pool;
  1619. rc = snd_seq_pool_init(client->pool);
  1620. if (rc < 0)
  1621. return rc;
  1622. }
  1623. if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
  1624. info.input_pool >= 1 &&
  1625. info.input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
  1626. info.input_pool != client->data.user.fifo_pool_size) {
  1627. /* change pool size */
  1628. rc = snd_seq_fifo_resize(client->data.user.fifo, info.input_pool);
  1629. if (rc < 0)
  1630. return rc;
  1631. client->data.user.fifo_pool_size = info.input_pool;
  1632. }
  1633. if (info.output_room >= 1 &&
  1634. info.output_room <= client->pool->size) {
  1635. client->pool->room = info.output_room;
  1636. }
  1637. return snd_seq_ioctl_get_client_pool(client, arg);
  1638. }
  1639. /* REMOVE_EVENTS ioctl() */
  1640. static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
  1641. void __user *arg)
  1642. {
  1643. struct snd_seq_remove_events info;
  1644. if (copy_from_user(&info, arg, sizeof(info)))
  1645. return -EFAULT;
  1646. /*
  1647. * Input mostly not implemented XXX.
  1648. */
  1649. if (info.remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
  1650. /*
  1651. * No restrictions so for a user client we can clear
  1652. * the whole fifo
  1653. */
  1654. if (client->type == USER_CLIENT && client->data.user.fifo)
  1655. snd_seq_fifo_clear(client->data.user.fifo);
  1656. }
  1657. if (info.remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
  1658. snd_seq_queue_remove_cells(client->number, &info);
  1659. return 0;
  1660. }
  1661. /*
  1662. * get subscription info
  1663. */
  1664. static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
  1665. void __user *arg)
  1666. {
  1667. int result;
  1668. struct snd_seq_client *sender = NULL;
  1669. struct snd_seq_client_port *sport = NULL;
  1670. struct snd_seq_port_subscribe subs;
  1671. struct snd_seq_subscribers *p;
  1672. if (copy_from_user(&subs, arg, sizeof(subs)))
  1673. return -EFAULT;
  1674. result = -EINVAL;
  1675. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1676. goto __end;
  1677. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1678. goto __end;
  1679. p = snd_seq_port_get_subscription(&sport->c_src, &subs.dest);
  1680. if (p) {
  1681. result = 0;
  1682. subs = p->info;
  1683. } else
  1684. result = -ENOENT;
  1685. __end:
  1686. if (sport)
  1687. snd_seq_port_unlock(sport);
  1688. if (sender)
  1689. snd_seq_client_unlock(sender);
  1690. if (result >= 0) {
  1691. if (copy_to_user(arg, &subs, sizeof(subs)))
  1692. return -EFAULT;
  1693. }
  1694. return result;
  1695. }
  1696. /*
  1697. * get subscription info - check only its presence
  1698. */
  1699. static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
  1700. void __user *arg)
  1701. {
  1702. int result = -ENXIO;
  1703. struct snd_seq_client *cptr = NULL;
  1704. struct snd_seq_client_port *port = NULL;
  1705. struct snd_seq_query_subs subs;
  1706. struct snd_seq_port_subs_info *group;
  1707. struct list_head *p;
  1708. int i;
  1709. if (copy_from_user(&subs, arg, sizeof(subs)))
  1710. return -EFAULT;
  1711. if ((cptr = snd_seq_client_use_ptr(subs.root.client)) == NULL)
  1712. goto __end;
  1713. if ((port = snd_seq_port_use_ptr(cptr, subs.root.port)) == NULL)
  1714. goto __end;
  1715. switch (subs.type) {
  1716. case SNDRV_SEQ_QUERY_SUBS_READ:
  1717. group = &port->c_src;
  1718. break;
  1719. case SNDRV_SEQ_QUERY_SUBS_WRITE:
  1720. group = &port->c_dest;
  1721. break;
  1722. default:
  1723. goto __end;
  1724. }
  1725. down_read(&group->list_mutex);
  1726. /* search for the subscriber */
  1727. subs.num_subs = group->count;
  1728. i = 0;
  1729. result = -ENOENT;
  1730. list_for_each(p, &group->list_head) {
  1731. if (i++ == subs.index) {
  1732. /* found! */
  1733. struct snd_seq_subscribers *s;
  1734. if (subs.type == SNDRV_SEQ_QUERY_SUBS_READ) {
  1735. s = list_entry(p, struct snd_seq_subscribers, src_list);
  1736. subs.addr = s->info.dest;
  1737. } else {
  1738. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  1739. subs.addr = s->info.sender;
  1740. }
  1741. subs.flags = s->info.flags;
  1742. subs.queue = s->info.queue;
  1743. result = 0;
  1744. break;
  1745. }
  1746. }
  1747. up_read(&group->list_mutex);
  1748. __end:
  1749. if (port)
  1750. snd_seq_port_unlock(port);
  1751. if (cptr)
  1752. snd_seq_client_unlock(cptr);
  1753. if (result >= 0) {
  1754. if (copy_to_user(arg, &subs, sizeof(subs)))
  1755. return -EFAULT;
  1756. }
  1757. return result;
  1758. }
  1759. /*
  1760. * query next client
  1761. */
  1762. static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
  1763. void __user *arg)
  1764. {
  1765. struct snd_seq_client *cptr = NULL;
  1766. struct snd_seq_client_info info;
  1767. if (copy_from_user(&info, arg, sizeof(info)))
  1768. return -EFAULT;
  1769. /* search for next client */
  1770. info.client++;
  1771. if (info.client < 0)
  1772. info.client = 0;
  1773. for (; info.client < SNDRV_SEQ_MAX_CLIENTS; info.client++) {
  1774. cptr = snd_seq_client_use_ptr(info.client);
  1775. if (cptr)
  1776. break; /* found */
  1777. }
  1778. if (cptr == NULL)
  1779. return -ENOENT;
  1780. get_client_info(cptr, &info);
  1781. snd_seq_client_unlock(cptr);
  1782. if (copy_to_user(arg, &info, sizeof(info)))
  1783. return -EFAULT;
  1784. return 0;
  1785. }
  1786. /*
  1787. * query next port
  1788. */
  1789. static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
  1790. void __user *arg)
  1791. {
  1792. struct snd_seq_client *cptr;
  1793. struct snd_seq_client_port *port = NULL;
  1794. struct snd_seq_port_info info;
  1795. if (copy_from_user(&info, arg, sizeof(info)))
  1796. return -EFAULT;
  1797. cptr = snd_seq_client_use_ptr(info.addr.client);
  1798. if (cptr == NULL)
  1799. return -ENXIO;
  1800. /* search for next port */
  1801. info.addr.port++;
  1802. port = snd_seq_port_query_nearest(cptr, &info);
  1803. if (port == NULL) {
  1804. snd_seq_client_unlock(cptr);
  1805. return -ENOENT;
  1806. }
  1807. /* get port info */
  1808. info.addr = port->addr;
  1809. snd_seq_get_port_info(port, &info);
  1810. snd_seq_port_unlock(port);
  1811. snd_seq_client_unlock(cptr);
  1812. if (copy_to_user(arg, &info, sizeof(info)))
  1813. return -EFAULT;
  1814. return 0;
  1815. }
  1816. /* -------------------------------------------------------- */
  1817. static struct seq_ioctl_table {
  1818. unsigned int cmd;
  1819. int (*func)(struct snd_seq_client *client, void __user * arg);
  1820. } ioctl_tables[] = {
  1821. { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
  1822. { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
  1823. { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
  1824. { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
  1825. { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
  1826. { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
  1827. { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
  1828. { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
  1829. { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
  1830. { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
  1831. { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
  1832. { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
  1833. { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
  1834. { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
  1835. { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
  1836. { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
  1837. { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
  1838. { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
  1839. { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
  1840. { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
  1841. { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
  1842. { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
  1843. { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
  1844. { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
  1845. { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
  1846. { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
  1847. { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
  1848. { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
  1849. { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
  1850. { 0, NULL },
  1851. };
  1852. static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
  1853. void __user *arg)
  1854. {
  1855. struct seq_ioctl_table *p;
  1856. switch (cmd) {
  1857. case SNDRV_SEQ_IOCTL_PVERSION:
  1858. /* return sequencer version number */
  1859. return put_user(SNDRV_SEQ_VERSION, (int __user *)arg) ? -EFAULT : 0;
  1860. case SNDRV_SEQ_IOCTL_CLIENT_ID:
  1861. /* return the id of this client */
  1862. return put_user(client->number, (int __user *)arg) ? -EFAULT : 0;
  1863. }
  1864. if (! arg)
  1865. return -EFAULT;
  1866. for (p = ioctl_tables; p->cmd; p++) {
  1867. if (p->cmd == cmd)
  1868. return p->func(client, arg);
  1869. }
  1870. snd_printd("seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
  1871. cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
  1872. return -ENOTTY;
  1873. }
  1874. static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1875. {
  1876. struct snd_seq_client *client = file->private_data;
  1877. if (snd_BUG_ON(!client))
  1878. return -ENXIO;
  1879. return snd_seq_do_ioctl(client, cmd, (void __user *) arg);
  1880. }
  1881. #ifdef CONFIG_COMPAT
  1882. #include "seq_compat.c"
  1883. #else
  1884. #define snd_seq_ioctl_compat NULL
  1885. #endif
  1886. /* -------------------------------------------------------- */
  1887. /* exported to kernel modules */
  1888. int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
  1889. const char *name_fmt, ...)
  1890. {
  1891. struct snd_seq_client *client;
  1892. va_list args;
  1893. if (snd_BUG_ON(in_interrupt()))
  1894. return -EBUSY;
  1895. if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
  1896. return -EINVAL;
  1897. if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
  1898. return -EINVAL;
  1899. if (mutex_lock_interruptible(&register_mutex))
  1900. return -ERESTARTSYS;
  1901. if (card) {
  1902. client_index += SNDRV_SEQ_GLOBAL_CLIENTS
  1903. + card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
  1904. if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
  1905. client_index = -1;
  1906. }
  1907. /* empty write queue as default */
  1908. client = seq_create_client1(client_index, 0);
  1909. if (client == NULL) {
  1910. mutex_unlock(&register_mutex);
  1911. return -EBUSY; /* failure code */
  1912. }
  1913. usage_alloc(&client_usage, 1);
  1914. client->accept_input = 1;
  1915. client->accept_output = 1;
  1916. va_start(args, name_fmt);
  1917. vsnprintf(client->name, sizeof(client->name), name_fmt, args);
  1918. va_end(args);
  1919. client->type = KERNEL_CLIENT;
  1920. mutex_unlock(&register_mutex);
  1921. /* make others aware this new client */
  1922. snd_seq_system_client_ev_client_start(client->number);
  1923. /* return client number to caller */
  1924. return client->number;
  1925. }
  1926. EXPORT_SYMBOL(snd_seq_create_kernel_client);
  1927. /* exported to kernel modules */
  1928. int snd_seq_delete_kernel_client(int client)
  1929. {
  1930. struct snd_seq_client *ptr;
  1931. if (snd_BUG_ON(in_interrupt()))
  1932. return -EBUSY;
  1933. ptr = clientptr(client);
  1934. if (ptr == NULL)
  1935. return -EINVAL;
  1936. seq_free_client(ptr);
  1937. kfree(ptr);
  1938. return 0;
  1939. }
  1940. EXPORT_SYMBOL(snd_seq_delete_kernel_client);
  1941. /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
  1942. * and snd_seq_kernel_client_enqueue_blocking
  1943. */
  1944. static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
  1945. struct file *file, int blocking,
  1946. int atomic, int hop)
  1947. {
  1948. struct snd_seq_client *cptr;
  1949. int result;
  1950. if (snd_BUG_ON(!ev))
  1951. return -EINVAL;
  1952. if (ev->type == SNDRV_SEQ_EVENT_NONE)
  1953. return 0; /* ignore this */
  1954. if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
  1955. return -EINVAL; /* quoted events can't be enqueued */
  1956. /* fill in client number */
  1957. ev->source.client = client;
  1958. if (check_event_type_and_length(ev))
  1959. return -EINVAL;
  1960. cptr = snd_seq_client_use_ptr(client);
  1961. if (cptr == NULL)
  1962. return -EINVAL;
  1963. if (! cptr->accept_output)
  1964. result = -EPERM;
  1965. else /* send it */
  1966. result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
  1967. snd_seq_client_unlock(cptr);
  1968. return result;
  1969. }
  1970. /*
  1971. * exported, called by kernel clients to enqueue events (w/o blocking)
  1972. *
  1973. * RETURN VALUE: zero if succeed, negative if error
  1974. */
  1975. int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
  1976. int atomic, int hop)
  1977. {
  1978. return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
  1979. }
  1980. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
  1981. /*
  1982. * exported, called by kernel clients to enqueue events (with blocking)
  1983. *
  1984. * RETURN VALUE: zero if succeed, negative if error
  1985. */
  1986. int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
  1987. struct file *file,
  1988. int atomic, int hop)
  1989. {
  1990. return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
  1991. }
  1992. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
  1993. /*
  1994. * exported, called by kernel clients to dispatch events directly to other
  1995. * clients, bypassing the queues. Event time-stamp will be updated.
  1996. *
  1997. * RETURN VALUE: negative = delivery failed,
  1998. * zero, or positive: the number of delivered events
  1999. */
  2000. int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
  2001. int atomic, int hop)
  2002. {
  2003. struct snd_seq_client *cptr;
  2004. int result;
  2005. if (snd_BUG_ON(!ev))
  2006. return -EINVAL;
  2007. /* fill in client number */
  2008. ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
  2009. ev->source.client = client;
  2010. if (check_event_type_and_length(ev))
  2011. return -EINVAL;
  2012. cptr = snd_seq_client_use_ptr(client);
  2013. if (cptr == NULL)
  2014. return -EINVAL;
  2015. if (!cptr->accept_output)
  2016. result = -EPERM;
  2017. else
  2018. result = snd_seq_deliver_event(cptr, ev, atomic, hop);
  2019. snd_seq_client_unlock(cptr);
  2020. return result;
  2021. }
  2022. EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
  2023. /*
  2024. * exported, called by kernel clients to perform same functions as with
  2025. * userland ioctl()
  2026. */
  2027. int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
  2028. {
  2029. struct snd_seq_client *client;
  2030. mm_segment_t fs;
  2031. int result;
  2032. client = clientptr(clientid);
  2033. if (client == NULL)
  2034. return -ENXIO;
  2035. fs = snd_enter_user();
  2036. result = snd_seq_do_ioctl(client, cmd, (void __force __user *)arg);
  2037. snd_leave_user(fs);
  2038. return result;
  2039. }
  2040. EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
  2041. /* exported (for OSS emulator) */
  2042. int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
  2043. {
  2044. struct snd_seq_client *client;
  2045. client = clientptr(clientid);
  2046. if (client == NULL)
  2047. return -ENXIO;
  2048. if (! snd_seq_write_pool_allocated(client))
  2049. return 1;
  2050. if (snd_seq_pool_poll_wait(client->pool, file, wait))
  2051. return 1;
  2052. return 0;
  2053. }
  2054. EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
  2055. /*---------------------------------------------------------------------------*/
  2056. #ifdef CONFIG_PROC_FS
  2057. /*
  2058. * /proc interface
  2059. */
  2060. static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
  2061. struct snd_seq_port_subs_info *group,
  2062. int is_src, char *msg)
  2063. {
  2064. struct list_head *p;
  2065. struct snd_seq_subscribers *s;
  2066. int count = 0;
  2067. down_read(&group->list_mutex);
  2068. if (list_empty(&group->list_head)) {
  2069. up_read(&group->list_mutex);
  2070. return;
  2071. }
  2072. snd_iprintf(buffer, msg);
  2073. list_for_each(p, &group->list_head) {
  2074. if (is_src)
  2075. s = list_entry(p, struct snd_seq_subscribers, src_list);
  2076. else
  2077. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  2078. if (count++)
  2079. snd_iprintf(buffer, ", ");
  2080. snd_iprintf(buffer, "%d:%d",
  2081. is_src ? s->info.dest.client : s->info.sender.client,
  2082. is_src ? s->info.dest.port : s->info.sender.port);
  2083. if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  2084. snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
  2085. if (group->exclusive)
  2086. snd_iprintf(buffer, "[ex]");
  2087. }
  2088. up_read(&group->list_mutex);
  2089. snd_iprintf(buffer, "\n");
  2090. }
  2091. #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
  2092. #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
  2093. #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
  2094. #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
  2095. static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
  2096. struct snd_seq_client *client)
  2097. {
  2098. struct snd_seq_client_port *p;
  2099. mutex_lock(&client->ports_mutex);
  2100. list_for_each_entry(p, &client->ports_list_head, list) {
  2101. snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n",
  2102. p->addr.port, p->name,
  2103. FLAG_PERM_RD(p->capability),
  2104. FLAG_PERM_WR(p->capability),
  2105. FLAG_PERM_EX(p->capability),
  2106. FLAG_PERM_DUPLEX(p->capability));
  2107. snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: ");
  2108. snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: ");
  2109. }
  2110. mutex_unlock(&client->ports_mutex);
  2111. }
  2112. /* exported to seq_info.c */
  2113. void snd_seq_info_clients_read(struct snd_info_entry *entry,
  2114. struct snd_info_buffer *buffer)
  2115. {
  2116. int c;
  2117. struct snd_seq_client *client;
  2118. snd_iprintf(buffer, "Client info\n");
  2119. snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
  2120. snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak);
  2121. snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
  2122. snd_iprintf(buffer, "\n");
  2123. /* list the client table */
  2124. for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
  2125. client = snd_seq_client_use_ptr(c);
  2126. if (client == NULL)
  2127. continue;
  2128. if (client->type == NO_CLIENT) {
  2129. snd_seq_client_unlock(client);
  2130. continue;
  2131. }
  2132. snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
  2133. c, client->name,
  2134. client->type == USER_CLIENT ? "User" : "Kernel");
  2135. snd_seq_info_dump_ports(buffer, client);
  2136. if (snd_seq_write_pool_allocated(client)) {
  2137. snd_iprintf(buffer, " Output pool :\n");
  2138. snd_seq_info_pool(buffer, client->pool, " ");
  2139. }
  2140. if (client->type == USER_CLIENT && client->data.user.fifo &&
  2141. client->data.user.fifo->pool) {
  2142. snd_iprintf(buffer, " Input pool :\n");
  2143. snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
  2144. }
  2145. snd_seq_client_unlock(client);
  2146. }
  2147. }
  2148. #endif /* CONFIG_PROC_FS */
  2149. /*---------------------------------------------------------------------------*/
  2150. /*
  2151. * REGISTRATION PART
  2152. */
  2153. static const struct file_operations snd_seq_f_ops =
  2154. {
  2155. .owner = THIS_MODULE,
  2156. .read = snd_seq_read,
  2157. .write = snd_seq_write,
  2158. .open = snd_seq_open,
  2159. .release = snd_seq_release,
  2160. .llseek = no_llseek,
  2161. .poll = snd_seq_poll,
  2162. .unlocked_ioctl = snd_seq_ioctl,
  2163. .compat_ioctl = snd_seq_ioctl_compat,
  2164. };
  2165. /*
  2166. * register sequencer device
  2167. */
  2168. int __init snd_sequencer_device_init(void)
  2169. {
  2170. int err;
  2171. if (mutex_lock_interruptible(&register_mutex))
  2172. return -ERESTARTSYS;
  2173. if ((err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
  2174. &snd_seq_f_ops, NULL, "seq")) < 0) {
  2175. mutex_unlock(&register_mutex);
  2176. return err;
  2177. }
  2178. mutex_unlock(&register_mutex);
  2179. return 0;
  2180. }
  2181. /*
  2182. * unregister sequencer device
  2183. */
  2184. void __exit snd_sequencer_device_done(void)
  2185. {
  2186. snd_unregister_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0);
  2187. }