signal.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668
  1. /*
  2. * linux/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
  7. *
  8. * 2003-06-02 Jim Houston - Concurrent Computer Corp.
  9. * Changes to use preallocated sigqueue structures
  10. * to allow signals to be sent reliably.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/export.h>
  14. #include <linux/init.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/tty.h>
  18. #include <linux/binfmts.h>
  19. #include <linux/coredump.h>
  20. #include <linux/security.h>
  21. #include <linux/syscalls.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/signal.h>
  24. #include <linux/signalfd.h>
  25. #include <linux/ratelimit.h>
  26. #include <linux/tracehook.h>
  27. #include <linux/capability.h>
  28. #include <linux/freezer.h>
  29. #include <linux/pid_namespace.h>
  30. #include <linux/nsproxy.h>
  31. #include <linux/user_namespace.h>
  32. #include <linux/uprobes.h>
  33. #include <linux/compat.h>
  34. #include <linux/cn_proc.h>
  35. #include <linux/compiler.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/signal.h>
  38. #include <asm/param.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/unistd.h>
  41. #include <asm/siginfo.h>
  42. #include <asm/cacheflush.h>
  43. #include "audit.h" /* audit_signal_info() */
  44. /*
  45. * SLAB caches for signal bits.
  46. */
  47. static struct kmem_cache *sigqueue_cachep;
  48. int print_fatal_signals __read_mostly;
  49. static void __user *sig_handler(struct task_struct *t, int sig)
  50. {
  51. return t->sighand->action[sig - 1].sa.sa_handler;
  52. }
  53. static int sig_handler_ignored(void __user *handler, int sig)
  54. {
  55. /* Is it explicitly or implicitly ignored? */
  56. return handler == SIG_IGN ||
  57. (handler == SIG_DFL && sig_kernel_ignore(sig));
  58. }
  59. static int sig_task_ignored(struct task_struct *t, int sig, bool force)
  60. {
  61. void __user *handler;
  62. handler = sig_handler(t, sig);
  63. if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  64. handler == SIG_DFL && !(force && sig_kernel_only(sig)))
  65. return 1;
  66. return sig_handler_ignored(handler, sig);
  67. }
  68. static int sig_ignored(struct task_struct *t, int sig, bool force)
  69. {
  70. /*
  71. * Blocked signals are never ignored, since the
  72. * signal handler may change by the time it is
  73. * unblocked.
  74. */
  75. if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  76. return 0;
  77. /*
  78. * Tracers may want to know about even ignored signal unless it
  79. * is SIGKILL which can't be reported anyway but can be ignored
  80. * by SIGNAL_UNKILLABLE task.
  81. */
  82. if (t->ptrace && sig != SIGKILL)
  83. return 0;
  84. return sig_task_ignored(t, sig, force);
  85. }
  86. /*
  87. * Re-calculate pending state from the set of locally pending
  88. * signals, globally pending signals, and blocked signals.
  89. */
  90. static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
  91. {
  92. unsigned long ready;
  93. long i;
  94. switch (_NSIG_WORDS) {
  95. default:
  96. for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
  97. ready |= signal->sig[i] &~ blocked->sig[i];
  98. break;
  99. case 4: ready = signal->sig[3] &~ blocked->sig[3];
  100. ready |= signal->sig[2] &~ blocked->sig[2];
  101. ready |= signal->sig[1] &~ blocked->sig[1];
  102. ready |= signal->sig[0] &~ blocked->sig[0];
  103. break;
  104. case 2: ready = signal->sig[1] &~ blocked->sig[1];
  105. ready |= signal->sig[0] &~ blocked->sig[0];
  106. break;
  107. case 1: ready = signal->sig[0] &~ blocked->sig[0];
  108. }
  109. return ready != 0;
  110. }
  111. #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
  112. static int recalc_sigpending_tsk(struct task_struct *t)
  113. {
  114. if ((t->jobctl & JOBCTL_PENDING_MASK) ||
  115. PENDING(&t->pending, &t->blocked) ||
  116. PENDING(&t->signal->shared_pending, &t->blocked)) {
  117. set_tsk_thread_flag(t, TIF_SIGPENDING);
  118. return 1;
  119. }
  120. /*
  121. * We must never clear the flag in another thread, or in current
  122. * when it's possible the current syscall is returning -ERESTART*.
  123. * So we don't clear it here, and only callers who know they should do.
  124. */
  125. return 0;
  126. }
  127. /*
  128. * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
  129. * This is superfluous when called on current, the wakeup is a harmless no-op.
  130. */
  131. void recalc_sigpending_and_wake(struct task_struct *t)
  132. {
  133. if (recalc_sigpending_tsk(t))
  134. signal_wake_up(t, 0);
  135. }
  136. void recalc_sigpending(void)
  137. {
  138. if (!recalc_sigpending_tsk(current) && !freezing(current))
  139. clear_thread_flag(TIF_SIGPENDING);
  140. }
  141. /* Given the mask, find the first available signal that should be serviced. */
  142. #define SYNCHRONOUS_MASK \
  143. (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
  144. sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
  145. int next_signal(struct sigpending *pending, sigset_t *mask)
  146. {
  147. unsigned long i, *s, *m, x;
  148. int sig = 0;
  149. s = pending->signal.sig;
  150. m = mask->sig;
  151. /*
  152. * Handle the first word specially: it contains the
  153. * synchronous signals that need to be dequeued first.
  154. */
  155. x = *s &~ *m;
  156. if (x) {
  157. if (x & SYNCHRONOUS_MASK)
  158. x &= SYNCHRONOUS_MASK;
  159. sig = ffz(~x) + 1;
  160. return sig;
  161. }
  162. switch (_NSIG_WORDS) {
  163. default:
  164. for (i = 1; i < _NSIG_WORDS; ++i) {
  165. x = *++s &~ *++m;
  166. if (!x)
  167. continue;
  168. sig = ffz(~x) + i*_NSIG_BPW + 1;
  169. break;
  170. }
  171. break;
  172. case 2:
  173. x = s[1] &~ m[1];
  174. if (!x)
  175. break;
  176. sig = ffz(~x) + _NSIG_BPW + 1;
  177. break;
  178. case 1:
  179. /* Nothing to do */
  180. break;
  181. }
  182. return sig;
  183. }
  184. static inline void print_dropped_signal(int sig)
  185. {
  186. static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
  187. if (!print_fatal_signals)
  188. return;
  189. if (!__ratelimit(&ratelimit_state))
  190. return;
  191. pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
  192. current->comm, current->pid, sig);
  193. }
  194. /**
  195. * task_set_jobctl_pending - set jobctl pending bits
  196. * @task: target task
  197. * @mask: pending bits to set
  198. *
  199. * Clear @mask from @task->jobctl. @mask must be subset of
  200. * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
  201. * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
  202. * cleared. If @task is already being killed or exiting, this function
  203. * becomes noop.
  204. *
  205. * CONTEXT:
  206. * Must be called with @task->sighand->siglock held.
  207. *
  208. * RETURNS:
  209. * %true if @mask is set, %false if made noop because @task was dying.
  210. */
  211. bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
  212. {
  213. BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
  214. JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
  215. BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
  216. if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
  217. return false;
  218. if (mask & JOBCTL_STOP_SIGMASK)
  219. task->jobctl &= ~JOBCTL_STOP_SIGMASK;
  220. task->jobctl |= mask;
  221. return true;
  222. }
  223. /**
  224. * task_clear_jobctl_trapping - clear jobctl trapping bit
  225. * @task: target task
  226. *
  227. * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
  228. * Clear it and wake up the ptracer. Note that we don't need any further
  229. * locking. @task->siglock guarantees that @task->parent points to the
  230. * ptracer.
  231. *
  232. * CONTEXT:
  233. * Must be called with @task->sighand->siglock held.
  234. */
  235. void task_clear_jobctl_trapping(struct task_struct *task)
  236. {
  237. if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
  238. task->jobctl &= ~JOBCTL_TRAPPING;
  239. smp_mb(); /* advised by wake_up_bit() */
  240. wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
  241. }
  242. }
  243. /**
  244. * task_clear_jobctl_pending - clear jobctl pending bits
  245. * @task: target task
  246. * @mask: pending bits to clear
  247. *
  248. * Clear @mask from @task->jobctl. @mask must be subset of
  249. * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
  250. * STOP bits are cleared together.
  251. *
  252. * If clearing of @mask leaves no stop or trap pending, this function calls
  253. * task_clear_jobctl_trapping().
  254. *
  255. * CONTEXT:
  256. * Must be called with @task->sighand->siglock held.
  257. */
  258. void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
  259. {
  260. BUG_ON(mask & ~JOBCTL_PENDING_MASK);
  261. if (mask & JOBCTL_STOP_PENDING)
  262. mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
  263. task->jobctl &= ~mask;
  264. if (!(task->jobctl & JOBCTL_PENDING_MASK))
  265. task_clear_jobctl_trapping(task);
  266. }
  267. /**
  268. * task_participate_group_stop - participate in a group stop
  269. * @task: task participating in a group stop
  270. *
  271. * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
  272. * Group stop states are cleared and the group stop count is consumed if
  273. * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
  274. * stop, the appropriate %SIGNAL_* flags are set.
  275. *
  276. * CONTEXT:
  277. * Must be called with @task->sighand->siglock held.
  278. *
  279. * RETURNS:
  280. * %true if group stop completion should be notified to the parent, %false
  281. * otherwise.
  282. */
  283. static bool task_participate_group_stop(struct task_struct *task)
  284. {
  285. struct signal_struct *sig = task->signal;
  286. bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
  287. WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
  288. task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
  289. if (!consume)
  290. return false;
  291. if (!WARN_ON_ONCE(sig->group_stop_count == 0))
  292. sig->group_stop_count--;
  293. /*
  294. * Tell the caller to notify completion iff we are entering into a
  295. * fresh group stop. Read comment in do_signal_stop() for details.
  296. */
  297. if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
  298. signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
  299. return true;
  300. }
  301. return false;
  302. }
  303. /*
  304. * allocate a new signal queue record
  305. * - this may be called without locks if and only if t == current, otherwise an
  306. * appropriate lock must be held to stop the target task from exiting
  307. */
  308. static struct sigqueue *
  309. __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
  310. {
  311. struct sigqueue *q = NULL;
  312. struct user_struct *user;
  313. /*
  314. * Protect access to @t credentials. This can go away when all
  315. * callers hold rcu read lock.
  316. */
  317. rcu_read_lock();
  318. user = get_uid(__task_cred(t)->user);
  319. atomic_inc(&user->sigpending);
  320. rcu_read_unlock();
  321. if (override_rlimit ||
  322. atomic_read(&user->sigpending) <=
  323. task_rlimit(t, RLIMIT_SIGPENDING)) {
  324. q = kmem_cache_alloc(sigqueue_cachep, flags);
  325. } else {
  326. print_dropped_signal(sig);
  327. }
  328. if (unlikely(q == NULL)) {
  329. atomic_dec(&user->sigpending);
  330. free_uid(user);
  331. } else {
  332. INIT_LIST_HEAD(&q->list);
  333. q->flags = 0;
  334. q->user = user;
  335. }
  336. return q;
  337. }
  338. static void __sigqueue_free(struct sigqueue *q)
  339. {
  340. if (q->flags & SIGQUEUE_PREALLOC)
  341. return;
  342. atomic_dec(&q->user->sigpending);
  343. free_uid(q->user);
  344. kmem_cache_free(sigqueue_cachep, q);
  345. }
  346. void flush_sigqueue(struct sigpending *queue)
  347. {
  348. struct sigqueue *q;
  349. sigemptyset(&queue->signal);
  350. while (!list_empty(&queue->list)) {
  351. q = list_entry(queue->list.next, struct sigqueue , list);
  352. list_del_init(&q->list);
  353. __sigqueue_free(q);
  354. }
  355. }
  356. /*
  357. * Flush all pending signals for this kthread.
  358. */
  359. void flush_signals(struct task_struct *t)
  360. {
  361. unsigned long flags;
  362. spin_lock_irqsave(&t->sighand->siglock, flags);
  363. clear_tsk_thread_flag(t, TIF_SIGPENDING);
  364. flush_sigqueue(&t->pending);
  365. flush_sigqueue(&t->signal->shared_pending);
  366. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  367. }
  368. static void __flush_itimer_signals(struct sigpending *pending)
  369. {
  370. sigset_t signal, retain;
  371. struct sigqueue *q, *n;
  372. signal = pending->signal;
  373. sigemptyset(&retain);
  374. list_for_each_entry_safe(q, n, &pending->list, list) {
  375. int sig = q->info.si_signo;
  376. if (likely(q->info.si_code != SI_TIMER)) {
  377. sigaddset(&retain, sig);
  378. } else {
  379. sigdelset(&signal, sig);
  380. list_del_init(&q->list);
  381. __sigqueue_free(q);
  382. }
  383. }
  384. sigorsets(&pending->signal, &signal, &retain);
  385. }
  386. void flush_itimer_signals(void)
  387. {
  388. struct task_struct *tsk = current;
  389. unsigned long flags;
  390. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  391. __flush_itimer_signals(&tsk->pending);
  392. __flush_itimer_signals(&tsk->signal->shared_pending);
  393. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  394. }
  395. void ignore_signals(struct task_struct *t)
  396. {
  397. int i;
  398. for (i = 0; i < _NSIG; ++i)
  399. t->sighand->action[i].sa.sa_handler = SIG_IGN;
  400. flush_signals(t);
  401. }
  402. /*
  403. * Flush all handlers for a task.
  404. */
  405. void
  406. flush_signal_handlers(struct task_struct *t, int force_default)
  407. {
  408. int i;
  409. struct k_sigaction *ka = &t->sighand->action[0];
  410. for (i = _NSIG ; i != 0 ; i--) {
  411. if (force_default || ka->sa.sa_handler != SIG_IGN)
  412. ka->sa.sa_handler = SIG_DFL;
  413. ka->sa.sa_flags = 0;
  414. #ifdef __ARCH_HAS_SA_RESTORER
  415. ka->sa.sa_restorer = NULL;
  416. #endif
  417. sigemptyset(&ka->sa.sa_mask);
  418. ka++;
  419. }
  420. }
  421. int unhandled_signal(struct task_struct *tsk, int sig)
  422. {
  423. void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
  424. if (is_global_init(tsk))
  425. return 1;
  426. if (handler != SIG_IGN && handler != SIG_DFL)
  427. return 0;
  428. /* if ptraced, let the tracer determine */
  429. return !tsk->ptrace;
  430. }
  431. static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
  432. bool *resched_timer)
  433. {
  434. struct sigqueue *q, *first = NULL;
  435. /*
  436. * Collect the siginfo appropriate to this signal. Check if
  437. * there is another siginfo for the same signal.
  438. */
  439. list_for_each_entry(q, &list->list, list) {
  440. if (q->info.si_signo == sig) {
  441. if (first)
  442. goto still_pending;
  443. first = q;
  444. }
  445. }
  446. sigdelset(&list->signal, sig);
  447. if (first) {
  448. still_pending:
  449. list_del_init(&first->list);
  450. copy_siginfo(info, &first->info);
  451. *resched_timer =
  452. (first->flags & SIGQUEUE_PREALLOC) &&
  453. (info->si_code == SI_TIMER) &&
  454. (info->si_sys_private);
  455. __sigqueue_free(first);
  456. } else {
  457. /*
  458. * Ok, it wasn't in the queue. This must be
  459. * a fast-pathed signal or we must have been
  460. * out of queue space. So zero out the info.
  461. */
  462. info->si_signo = sig;
  463. info->si_errno = 0;
  464. info->si_code = SI_USER;
  465. info->si_pid = 0;
  466. info->si_uid = 0;
  467. }
  468. }
  469. static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  470. siginfo_t *info, bool *resched_timer)
  471. {
  472. int sig = next_signal(pending, mask);
  473. if (sig)
  474. collect_signal(sig, pending, info, resched_timer);
  475. return sig;
  476. }
  477. /*
  478. * Dequeue a signal and return the element to the caller, which is
  479. * expected to free it.
  480. *
  481. * All callers have to hold the siglock.
  482. */
  483. int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  484. {
  485. bool resched_timer = false;
  486. int signr;
  487. /* We only dequeue private signals from ourselves, we don't let
  488. * signalfd steal them
  489. */
  490. signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
  491. if (!signr) {
  492. signr = __dequeue_signal(&tsk->signal->shared_pending,
  493. mask, info, &resched_timer);
  494. /*
  495. * itimer signal ?
  496. *
  497. * itimers are process shared and we restart periodic
  498. * itimers in the signal delivery path to prevent DoS
  499. * attacks in the high resolution timer case. This is
  500. * compliant with the old way of self-restarting
  501. * itimers, as the SIGALRM is a legacy signal and only
  502. * queued once. Changing the restart behaviour to
  503. * restart the timer in the signal dequeue path is
  504. * reducing the timer noise on heavy loaded !highres
  505. * systems too.
  506. */
  507. if (unlikely(signr == SIGALRM)) {
  508. struct hrtimer *tmr = &tsk->signal->real_timer;
  509. if (!hrtimer_is_queued(tmr) &&
  510. tsk->signal->it_real_incr.tv64 != 0) {
  511. hrtimer_forward(tmr, tmr->base->get_time(),
  512. tsk->signal->it_real_incr);
  513. hrtimer_restart(tmr);
  514. }
  515. }
  516. }
  517. recalc_sigpending();
  518. if (!signr)
  519. return 0;
  520. if (unlikely(sig_kernel_stop(signr))) {
  521. /*
  522. * Set a marker that we have dequeued a stop signal. Our
  523. * caller might release the siglock and then the pending
  524. * stop signal it is about to process is no longer in the
  525. * pending bitmasks, but must still be cleared by a SIGCONT
  526. * (and overruled by a SIGKILL). So those cases clear this
  527. * shared flag after we've set it. Note that this flag may
  528. * remain set after the signal we return is ignored or
  529. * handled. That doesn't matter because its only purpose
  530. * is to alert stop-signal processing code when another
  531. * processor has come along and cleared the flag.
  532. */
  533. current->jobctl |= JOBCTL_STOP_DEQUEUED;
  534. }
  535. if (resched_timer) {
  536. /*
  537. * Release the siglock to ensure proper locking order
  538. * of timer locks outside of siglocks. Note, we leave
  539. * irqs disabled here, since the posix-timers code is
  540. * about to disable them again anyway.
  541. */
  542. spin_unlock(&tsk->sighand->siglock);
  543. do_schedule_next_timer(info);
  544. spin_lock(&tsk->sighand->siglock);
  545. }
  546. return signr;
  547. }
  548. /*
  549. * Tell a process that it has a new active signal..
  550. *
  551. * NOTE! we rely on the previous spin_lock to
  552. * lock interrupts for us! We can only be called with
  553. * "siglock" held, and the local interrupt must
  554. * have been disabled when that got acquired!
  555. *
  556. * No need to set need_resched since signal event passing
  557. * goes through ->blocked
  558. */
  559. void signal_wake_up_state(struct task_struct *t, unsigned int state)
  560. {
  561. set_tsk_thread_flag(t, TIF_SIGPENDING);
  562. /*
  563. * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
  564. * case. We don't check t->state here because there is a race with it
  565. * executing another processor and just now entering stopped state.
  566. * By using wake_up_state, we ensure the process will wake up and
  567. * handle its death signal.
  568. */
  569. if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
  570. kick_process(t);
  571. }
  572. /*
  573. * Remove signals in mask from the pending set and queue.
  574. * Returns 1 if any signals were found.
  575. *
  576. * All callers must be holding the siglock.
  577. */
  578. static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
  579. {
  580. struct sigqueue *q, *n;
  581. sigset_t m;
  582. sigandsets(&m, mask, &s->signal);
  583. if (sigisemptyset(&m))
  584. return 0;
  585. sigandnsets(&s->signal, &s->signal, mask);
  586. list_for_each_entry_safe(q, n, &s->list, list) {
  587. if (sigismember(mask, q->info.si_signo)) {
  588. list_del_init(&q->list);
  589. __sigqueue_free(q);
  590. }
  591. }
  592. return 1;
  593. }
  594. static inline int is_si_special(const struct siginfo *info)
  595. {
  596. return info <= SEND_SIG_FORCED;
  597. }
  598. static inline bool si_fromuser(const struct siginfo *info)
  599. {
  600. return info == SEND_SIG_NOINFO ||
  601. (!is_si_special(info) && SI_FROMUSER(info));
  602. }
  603. /*
  604. * called with RCU read lock from check_kill_permission()
  605. */
  606. static int kill_ok_by_cred(struct task_struct *t)
  607. {
  608. const struct cred *cred = current_cred();
  609. const struct cred *tcred = __task_cred(t);
  610. if (uid_eq(cred->euid, tcred->suid) ||
  611. uid_eq(cred->euid, tcred->uid) ||
  612. uid_eq(cred->uid, tcred->suid) ||
  613. uid_eq(cred->uid, tcred->uid))
  614. return 1;
  615. if (ns_capable(tcred->user_ns, CAP_KILL))
  616. return 1;
  617. return 0;
  618. }
  619. /*
  620. * Bad permissions for sending the signal
  621. * - the caller must hold the RCU read lock
  622. */
  623. static int check_kill_permission(int sig, struct siginfo *info,
  624. struct task_struct *t)
  625. {
  626. struct pid *sid;
  627. int error;
  628. if (!valid_signal(sig))
  629. return -EINVAL;
  630. if (!si_fromuser(info))
  631. return 0;
  632. error = audit_signal_info(sig, t); /* Let audit system see the signal */
  633. if (error)
  634. return error;
  635. if (!same_thread_group(current, t) &&
  636. !kill_ok_by_cred(t)) {
  637. switch (sig) {
  638. case SIGCONT:
  639. sid = task_session(t);
  640. /*
  641. * We don't return the error if sid == NULL. The
  642. * task was unhashed, the caller must notice this.
  643. */
  644. if (!sid || sid == task_session(current))
  645. break;
  646. default:
  647. return -EPERM;
  648. }
  649. }
  650. return security_task_kill(t, info, sig, 0);
  651. }
  652. /**
  653. * ptrace_trap_notify - schedule trap to notify ptracer
  654. * @t: tracee wanting to notify tracer
  655. *
  656. * This function schedules sticky ptrace trap which is cleared on the next
  657. * TRAP_STOP to notify ptracer of an event. @t must have been seized by
  658. * ptracer.
  659. *
  660. * If @t is running, STOP trap will be taken. If trapped for STOP and
  661. * ptracer is listening for events, tracee is woken up so that it can
  662. * re-trap for the new event. If trapped otherwise, STOP trap will be
  663. * eventually taken without returning to userland after the existing traps
  664. * are finished by PTRACE_CONT.
  665. *
  666. * CONTEXT:
  667. * Must be called with @task->sighand->siglock held.
  668. */
  669. static void ptrace_trap_notify(struct task_struct *t)
  670. {
  671. WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
  672. assert_spin_locked(&t->sighand->siglock);
  673. task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
  674. ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
  675. }
  676. /*
  677. * Handle magic process-wide effects of stop/continue signals. Unlike
  678. * the signal actions, these happen immediately at signal-generation
  679. * time regardless of blocking, ignoring, or handling. This does the
  680. * actual continuing for SIGCONT, but not the actual stopping for stop
  681. * signals. The process stop is done as a signal action for SIG_DFL.
  682. *
  683. * Returns true if the signal should be actually delivered, otherwise
  684. * it should be dropped.
  685. */
  686. static bool prepare_signal(int sig, struct task_struct *p, bool force)
  687. {
  688. struct signal_struct *signal = p->signal;
  689. struct task_struct *t;
  690. sigset_t flush;
  691. if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
  692. if (!(signal->flags & SIGNAL_GROUP_EXIT))
  693. return sig == SIGKILL;
  694. /*
  695. * The process is in the middle of dying, nothing to do.
  696. */
  697. } else if (sig_kernel_stop(sig)) {
  698. /*
  699. * This is a stop signal. Remove SIGCONT from all queues.
  700. */
  701. siginitset(&flush, sigmask(SIGCONT));
  702. flush_sigqueue_mask(&flush, &signal->shared_pending);
  703. for_each_thread(p, t)
  704. flush_sigqueue_mask(&flush, &t->pending);
  705. } else if (sig == SIGCONT) {
  706. unsigned int why;
  707. /*
  708. * Remove all stop signals from all queues, wake all threads.
  709. */
  710. siginitset(&flush, SIG_KERNEL_STOP_MASK);
  711. flush_sigqueue_mask(&flush, &signal->shared_pending);
  712. for_each_thread(p, t) {
  713. flush_sigqueue_mask(&flush, &t->pending);
  714. task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
  715. if (likely(!(t->ptrace & PT_SEIZED)))
  716. wake_up_state(t, __TASK_STOPPED);
  717. else
  718. ptrace_trap_notify(t);
  719. }
  720. /*
  721. * Notify the parent with CLD_CONTINUED if we were stopped.
  722. *
  723. * If we were in the middle of a group stop, we pretend it
  724. * was already finished, and then continued. Since SIGCHLD
  725. * doesn't queue we report only CLD_STOPPED, as if the next
  726. * CLD_CONTINUED was dropped.
  727. */
  728. why = 0;
  729. if (signal->flags & SIGNAL_STOP_STOPPED)
  730. why |= SIGNAL_CLD_CONTINUED;
  731. else if (signal->group_stop_count)
  732. why |= SIGNAL_CLD_STOPPED;
  733. if (why) {
  734. /*
  735. * The first thread which returns from do_signal_stop()
  736. * will take ->siglock, notice SIGNAL_CLD_MASK, and
  737. * notify its parent. See get_signal_to_deliver().
  738. */
  739. signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
  740. signal->group_stop_count = 0;
  741. signal->group_exit_code = 0;
  742. }
  743. }
  744. return !sig_ignored(p, sig, force);
  745. }
  746. /*
  747. * Test if P wants to take SIG. After we've checked all threads with this,
  748. * it's equivalent to finding no threads not blocking SIG. Any threads not
  749. * blocking SIG were ruled out because they are not running and already
  750. * have pending signals. Such threads will dequeue from the shared queue
  751. * as soon as they're available, so putting the signal on the shared queue
  752. * will be equivalent to sending it to one such thread.
  753. */
  754. static inline int wants_signal(int sig, struct task_struct *p)
  755. {
  756. if (sigismember(&p->blocked, sig))
  757. return 0;
  758. if (p->flags & PF_EXITING)
  759. return 0;
  760. if (sig == SIGKILL)
  761. return 1;
  762. if (task_is_stopped_or_traced(p))
  763. return 0;
  764. return task_curr(p) || !signal_pending(p);
  765. }
  766. static void complete_signal(int sig, struct task_struct *p, int group)
  767. {
  768. struct signal_struct *signal = p->signal;
  769. struct task_struct *t;
  770. /*
  771. * Now find a thread we can wake up to take the signal off the queue.
  772. *
  773. * If the main thread wants the signal, it gets first crack.
  774. * Probably the least surprising to the average bear.
  775. */
  776. if (wants_signal(sig, p))
  777. t = p;
  778. else if (!group || thread_group_empty(p))
  779. /*
  780. * There is just one thread and it does not need to be woken.
  781. * It will dequeue unblocked signals before it runs again.
  782. */
  783. return;
  784. else {
  785. /*
  786. * Otherwise try to find a suitable thread.
  787. */
  788. t = signal->curr_target;
  789. while (!wants_signal(sig, t)) {
  790. t = next_thread(t);
  791. if (t == signal->curr_target)
  792. /*
  793. * No thread needs to be woken.
  794. * Any eligible threads will see
  795. * the signal in the queue soon.
  796. */
  797. return;
  798. }
  799. signal->curr_target = t;
  800. }
  801. /*
  802. * Found a killable thread. If the signal will be fatal,
  803. * then start taking the whole group down immediately.
  804. */
  805. if (sig_fatal(p, sig) &&
  806. !(signal->flags & SIGNAL_GROUP_EXIT) &&
  807. !sigismember(&t->real_blocked, sig) &&
  808. (sig == SIGKILL || !p->ptrace)) {
  809. /*
  810. * This signal will be fatal to the whole group.
  811. */
  812. if (!sig_kernel_coredump(sig)) {
  813. /*
  814. * Start a group exit and wake everybody up.
  815. * This way we don't have other threads
  816. * running and doing things after a slower
  817. * thread has the fatal signal pending.
  818. */
  819. signal->flags = SIGNAL_GROUP_EXIT;
  820. signal->group_exit_code = sig;
  821. signal->group_stop_count = 0;
  822. t = p;
  823. do {
  824. task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
  825. sigaddset(&t->pending.signal, SIGKILL);
  826. signal_wake_up(t, 1);
  827. } while_each_thread(p, t);
  828. return;
  829. }
  830. }
  831. /*
  832. * The signal is already in the shared-pending queue.
  833. * Tell the chosen thread to wake up and dequeue it.
  834. */
  835. signal_wake_up(t, sig == SIGKILL);
  836. return;
  837. }
  838. static inline int legacy_queue(struct sigpending *signals, int sig)
  839. {
  840. return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
  841. }
  842. #ifdef CONFIG_USER_NS
  843. static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
  844. {
  845. if (current_user_ns() == task_cred_xxx(t, user_ns))
  846. return;
  847. if (SI_FROMKERNEL(info))
  848. return;
  849. rcu_read_lock();
  850. info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
  851. make_kuid(current_user_ns(), info->si_uid));
  852. rcu_read_unlock();
  853. }
  854. #else
  855. static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
  856. {
  857. return;
  858. }
  859. #endif
  860. static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
  861. int group, int from_ancestor_ns)
  862. {
  863. struct sigpending *pending;
  864. struct sigqueue *q;
  865. int override_rlimit;
  866. int ret = 0, result;
  867. assert_spin_locked(&t->sighand->siglock);
  868. result = TRACE_SIGNAL_IGNORED;
  869. if (!prepare_signal(sig, t,
  870. from_ancestor_ns || (info == SEND_SIG_FORCED)))
  871. goto ret;
  872. pending = group ? &t->signal->shared_pending : &t->pending;
  873. /*
  874. * Short-circuit ignored signals and support queuing
  875. * exactly one non-rt signal, so that we can get more
  876. * detailed information about the cause of the signal.
  877. */
  878. result = TRACE_SIGNAL_ALREADY_PENDING;
  879. if (legacy_queue(pending, sig))
  880. goto ret;
  881. result = TRACE_SIGNAL_DELIVERED;
  882. /*
  883. * fast-pathed signals for kernel-internal things like SIGSTOP
  884. * or SIGKILL.
  885. */
  886. if (info == SEND_SIG_FORCED)
  887. goto out_set;
  888. /*
  889. * Real-time signals must be queued if sent by sigqueue, or
  890. * some other real-time mechanism. It is implementation
  891. * defined whether kill() does so. We attempt to do so, on
  892. * the principle of least surprise, but since kill is not
  893. * allowed to fail with EAGAIN when low on memory we just
  894. * make sure at least one signal gets delivered and don't
  895. * pass on the info struct.
  896. */
  897. if (sig < SIGRTMIN)
  898. override_rlimit = (is_si_special(info) || info->si_code >= 0);
  899. else
  900. override_rlimit = 0;
  901. q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
  902. override_rlimit);
  903. if (q) {
  904. list_add_tail(&q->list, &pending->list);
  905. switch ((unsigned long) info) {
  906. case (unsigned long) SEND_SIG_NOINFO:
  907. q->info.si_signo = sig;
  908. q->info.si_errno = 0;
  909. q->info.si_code = SI_USER;
  910. q->info.si_pid = task_tgid_nr_ns(current,
  911. task_active_pid_ns(t));
  912. q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
  913. break;
  914. case (unsigned long) SEND_SIG_PRIV:
  915. q->info.si_signo = sig;
  916. q->info.si_errno = 0;
  917. q->info.si_code = SI_KERNEL;
  918. q->info.si_pid = 0;
  919. q->info.si_uid = 0;
  920. break;
  921. default:
  922. copy_siginfo(&q->info, info);
  923. if (from_ancestor_ns)
  924. q->info.si_pid = 0;
  925. break;
  926. }
  927. userns_fixup_signal_uid(&q->info, t);
  928. } else if (!is_si_special(info)) {
  929. if (sig >= SIGRTMIN && info->si_code != SI_USER) {
  930. /*
  931. * Queue overflow, abort. We may abort if the
  932. * signal was rt and sent by user using something
  933. * other than kill().
  934. */
  935. result = TRACE_SIGNAL_OVERFLOW_FAIL;
  936. ret = -EAGAIN;
  937. goto ret;
  938. } else {
  939. /*
  940. * This is a silent loss of information. We still
  941. * send the signal, but the *info bits are lost.
  942. */
  943. result = TRACE_SIGNAL_LOSE_INFO;
  944. }
  945. }
  946. out_set:
  947. signalfd_notify(t, sig);
  948. sigaddset(&pending->signal, sig);
  949. complete_signal(sig, t, group);
  950. ret:
  951. trace_signal_generate(sig, info, t, group, result);
  952. return ret;
  953. }
  954. static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
  955. int group)
  956. {
  957. int from_ancestor_ns = 0;
  958. #ifdef CONFIG_PID_NS
  959. from_ancestor_ns = si_fromuser(info) &&
  960. !task_pid_nr_ns(current, task_active_pid_ns(t));
  961. #endif
  962. return __send_signal(sig, info, t, group, from_ancestor_ns);
  963. }
  964. static void print_fatal_signal(int signr)
  965. {
  966. struct pt_regs *regs = signal_pt_regs();
  967. pr_info("potentially unexpected fatal signal %d.\n", signr);
  968. #if defined(__i386__) && !defined(__arch_um__)
  969. pr_info("code at %08lx: ", regs->ip);
  970. {
  971. int i;
  972. for (i = 0; i < 16; i++) {
  973. unsigned char insn;
  974. if (get_user(insn, (unsigned char *)(regs->ip + i)))
  975. break;
  976. pr_cont("%02x ", insn);
  977. }
  978. }
  979. pr_cont("\n");
  980. #endif
  981. preempt_disable();
  982. show_regs(regs);
  983. preempt_enable();
  984. }
  985. static int __init setup_print_fatal_signals(char *str)
  986. {
  987. get_option (&str, &print_fatal_signals);
  988. return 1;
  989. }
  990. __setup("print-fatal-signals=", setup_print_fatal_signals);
  991. int
  992. __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  993. {
  994. return send_signal(sig, info, p, 1);
  995. }
  996. static int
  997. specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  998. {
  999. return send_signal(sig, info, t, 0);
  1000. }
  1001. int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
  1002. bool group)
  1003. {
  1004. unsigned long flags;
  1005. int ret = -ESRCH;
  1006. if (lock_task_sighand(p, &flags)) {
  1007. ret = send_signal(sig, info, p, group);
  1008. unlock_task_sighand(p, &flags);
  1009. }
  1010. return ret;
  1011. }
  1012. /*
  1013. * Force a signal that the process can't ignore: if necessary
  1014. * we unblock the signal and change any SIG_IGN to SIG_DFL.
  1015. *
  1016. * Note: If we unblock the signal, we always reset it to SIG_DFL,
  1017. * since we do not want to have a signal handler that was blocked
  1018. * be invoked when user space had explicitly blocked it.
  1019. *
  1020. * We don't want to have recursive SIGSEGV's etc, for example,
  1021. * that is why we also clear SIGNAL_UNKILLABLE.
  1022. */
  1023. int
  1024. force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  1025. {
  1026. unsigned long int flags;
  1027. int ret, blocked, ignored;
  1028. struct k_sigaction *action;
  1029. spin_lock_irqsave(&t->sighand->siglock, flags);
  1030. action = &t->sighand->action[sig-1];
  1031. ignored = action->sa.sa_handler == SIG_IGN;
  1032. blocked = sigismember(&t->blocked, sig);
  1033. if (blocked || ignored) {
  1034. action->sa.sa_handler = SIG_DFL;
  1035. if (blocked) {
  1036. sigdelset(&t->blocked, sig);
  1037. recalc_sigpending_and_wake(t);
  1038. }
  1039. }
  1040. if (action->sa.sa_handler == SIG_DFL)
  1041. t->signal->flags &= ~SIGNAL_UNKILLABLE;
  1042. ret = specific_send_sig_info(sig, info, t);
  1043. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  1044. return ret;
  1045. }
  1046. /*
  1047. * Nuke all other threads in the group.
  1048. */
  1049. int zap_other_threads(struct task_struct *p)
  1050. {
  1051. struct task_struct *t = p;
  1052. int count = 0;
  1053. p->signal->group_stop_count = 0;
  1054. while_each_thread(p, t) {
  1055. task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
  1056. count++;
  1057. /* Don't bother with already dead threads */
  1058. if (t->exit_state)
  1059. continue;
  1060. sigaddset(&t->pending.signal, SIGKILL);
  1061. signal_wake_up(t, 1);
  1062. }
  1063. return count;
  1064. }
  1065. struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
  1066. unsigned long *flags)
  1067. {
  1068. struct sighand_struct *sighand;
  1069. for (;;) {
  1070. /*
  1071. * Disable interrupts early to avoid deadlocks.
  1072. * See rcu_read_unlock() comment header for details.
  1073. */
  1074. local_irq_save(*flags);
  1075. rcu_read_lock();
  1076. sighand = rcu_dereference(tsk->sighand);
  1077. if (unlikely(sighand == NULL)) {
  1078. rcu_read_unlock();
  1079. local_irq_restore(*flags);
  1080. break;
  1081. }
  1082. /*
  1083. * This sighand can be already freed and even reused, but
  1084. * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
  1085. * initializes ->siglock: this slab can't go away, it has
  1086. * the same object type, ->siglock can't be reinitialized.
  1087. *
  1088. * We need to ensure that tsk->sighand is still the same
  1089. * after we take the lock, we can race with de_thread() or
  1090. * __exit_signal(). In the latter case the next iteration
  1091. * must see ->sighand == NULL.
  1092. */
  1093. spin_lock(&sighand->siglock);
  1094. if (likely(sighand == tsk->sighand)) {
  1095. rcu_read_unlock();
  1096. break;
  1097. }
  1098. spin_unlock(&sighand->siglock);
  1099. rcu_read_unlock();
  1100. local_irq_restore(*flags);
  1101. }
  1102. return sighand;
  1103. }
  1104. /*
  1105. * send signal info to all the members of a group
  1106. */
  1107. int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1108. {
  1109. int ret;
  1110. rcu_read_lock();
  1111. ret = check_kill_permission(sig, info, p);
  1112. rcu_read_unlock();
  1113. if (!ret && sig)
  1114. ret = do_send_sig_info(sig, info, p, true);
  1115. return ret;
  1116. }
  1117. /*
  1118. * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  1119. * control characters do (^C, ^Z etc)
  1120. * - the caller must hold at least a readlock on tasklist_lock
  1121. */
  1122. int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
  1123. {
  1124. struct task_struct *p = NULL;
  1125. int retval, success;
  1126. success = 0;
  1127. retval = -ESRCH;
  1128. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  1129. int err = group_send_sig_info(sig, info, p);
  1130. success |= !err;
  1131. retval = err;
  1132. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  1133. return success ? 0 : retval;
  1134. }
  1135. int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
  1136. {
  1137. int error = -ESRCH;
  1138. struct task_struct *p;
  1139. for (;;) {
  1140. rcu_read_lock();
  1141. p = pid_task(pid, PIDTYPE_PID);
  1142. if (p)
  1143. error = group_send_sig_info(sig, info, p);
  1144. rcu_read_unlock();
  1145. if (likely(!p || error != -ESRCH))
  1146. return error;
  1147. /*
  1148. * The task was unhashed in between, try again. If it
  1149. * is dead, pid_task() will return NULL, if we race with
  1150. * de_thread() it will find the new leader.
  1151. */
  1152. }
  1153. }
  1154. int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
  1155. {
  1156. int error;
  1157. rcu_read_lock();
  1158. error = kill_pid_info(sig, info, find_vpid(pid));
  1159. rcu_read_unlock();
  1160. return error;
  1161. }
  1162. static int kill_as_cred_perm(const struct cred *cred,
  1163. struct task_struct *target)
  1164. {
  1165. const struct cred *pcred = __task_cred(target);
  1166. if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
  1167. !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
  1168. return 0;
  1169. return 1;
  1170. }
  1171. /* like kill_pid_info(), but doesn't use uid/euid of "current" */
  1172. int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
  1173. const struct cred *cred, u32 secid)
  1174. {
  1175. int ret = -EINVAL;
  1176. struct task_struct *p;
  1177. unsigned long flags;
  1178. if (!valid_signal(sig))
  1179. return ret;
  1180. rcu_read_lock();
  1181. p = pid_task(pid, PIDTYPE_PID);
  1182. if (!p) {
  1183. ret = -ESRCH;
  1184. goto out_unlock;
  1185. }
  1186. if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
  1187. ret = -EPERM;
  1188. goto out_unlock;
  1189. }
  1190. ret = security_task_kill(p, info, sig, secid);
  1191. if (ret)
  1192. goto out_unlock;
  1193. if (sig) {
  1194. if (lock_task_sighand(p, &flags)) {
  1195. ret = __send_signal(sig, info, p, 1, 0);
  1196. unlock_task_sighand(p, &flags);
  1197. } else
  1198. ret = -ESRCH;
  1199. }
  1200. out_unlock:
  1201. rcu_read_unlock();
  1202. return ret;
  1203. }
  1204. EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
  1205. /*
  1206. * kill_something_info() interprets pid in interesting ways just like kill(2).
  1207. *
  1208. * POSIX specifies that kill(-1,sig) is unspecified, but what we have
  1209. * is probably wrong. Should make it like BSD or SYSV.
  1210. */
  1211. static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
  1212. {
  1213. int ret;
  1214. if (pid > 0) {
  1215. rcu_read_lock();
  1216. ret = kill_pid_info(sig, info, find_vpid(pid));
  1217. rcu_read_unlock();
  1218. return ret;
  1219. }
  1220. /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
  1221. if (pid == INT_MIN)
  1222. return -ESRCH;
  1223. read_lock(&tasklist_lock);
  1224. if (pid != -1) {
  1225. ret = __kill_pgrp_info(sig, info,
  1226. pid ? find_vpid(-pid) : task_pgrp(current));
  1227. } else {
  1228. int retval = 0, count = 0;
  1229. struct task_struct * p;
  1230. for_each_process(p) {
  1231. if (task_pid_vnr(p) > 1 &&
  1232. !same_thread_group(p, current)) {
  1233. int err = group_send_sig_info(sig, info, p);
  1234. ++count;
  1235. if (err != -EPERM)
  1236. retval = err;
  1237. }
  1238. }
  1239. ret = count ? retval : -ESRCH;
  1240. }
  1241. read_unlock(&tasklist_lock);
  1242. return ret;
  1243. }
  1244. /*
  1245. * These are for backward compatibility with the rest of the kernel source.
  1246. */
  1247. int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1248. {
  1249. /*
  1250. * Make sure legacy kernel users don't send in bad values
  1251. * (normal paths check this in check_kill_permission).
  1252. */
  1253. if (!valid_signal(sig))
  1254. return -EINVAL;
  1255. return do_send_sig_info(sig, info, p, false);
  1256. }
  1257. #define __si_special(priv) \
  1258. ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
  1259. int
  1260. send_sig(int sig, struct task_struct *p, int priv)
  1261. {
  1262. return send_sig_info(sig, __si_special(priv), p);
  1263. }
  1264. void
  1265. force_sig(int sig, struct task_struct *p)
  1266. {
  1267. force_sig_info(sig, SEND_SIG_PRIV, p);
  1268. }
  1269. /*
  1270. * When things go south during signal handling, we
  1271. * will force a SIGSEGV. And if the signal that caused
  1272. * the problem was already a SIGSEGV, we'll want to
  1273. * make sure we don't even try to deliver the signal..
  1274. */
  1275. int
  1276. force_sigsegv(int sig, struct task_struct *p)
  1277. {
  1278. if (sig == SIGSEGV) {
  1279. unsigned long flags;
  1280. spin_lock_irqsave(&p->sighand->siglock, flags);
  1281. p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
  1282. spin_unlock_irqrestore(&p->sighand->siglock, flags);
  1283. }
  1284. force_sig(SIGSEGV, p);
  1285. return 0;
  1286. }
  1287. int kill_pgrp(struct pid *pid, int sig, int priv)
  1288. {
  1289. int ret;
  1290. read_lock(&tasklist_lock);
  1291. ret = __kill_pgrp_info(sig, __si_special(priv), pid);
  1292. read_unlock(&tasklist_lock);
  1293. return ret;
  1294. }
  1295. EXPORT_SYMBOL(kill_pgrp);
  1296. int kill_pid(struct pid *pid, int sig, int priv)
  1297. {
  1298. return kill_pid_info(sig, __si_special(priv), pid);
  1299. }
  1300. EXPORT_SYMBOL(kill_pid);
  1301. /*
  1302. * These functions support sending signals using preallocated sigqueue
  1303. * structures. This is needed "because realtime applications cannot
  1304. * afford to lose notifications of asynchronous events, like timer
  1305. * expirations or I/O completions". In the case of POSIX Timers
  1306. * we allocate the sigqueue structure from the timer_create. If this
  1307. * allocation fails we are able to report the failure to the application
  1308. * with an EAGAIN error.
  1309. */
  1310. struct sigqueue *sigqueue_alloc(void)
  1311. {
  1312. struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
  1313. if (q)
  1314. q->flags |= SIGQUEUE_PREALLOC;
  1315. return q;
  1316. }
  1317. void sigqueue_free(struct sigqueue *q)
  1318. {
  1319. unsigned long flags;
  1320. spinlock_t *lock = &current->sighand->siglock;
  1321. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1322. /*
  1323. * We must hold ->siglock while testing q->list
  1324. * to serialize with collect_signal() or with
  1325. * __exit_signal()->flush_sigqueue().
  1326. */
  1327. spin_lock_irqsave(lock, flags);
  1328. q->flags &= ~SIGQUEUE_PREALLOC;
  1329. /*
  1330. * If it is queued it will be freed when dequeued,
  1331. * like the "regular" sigqueue.
  1332. */
  1333. if (!list_empty(&q->list))
  1334. q = NULL;
  1335. spin_unlock_irqrestore(lock, flags);
  1336. if (q)
  1337. __sigqueue_free(q);
  1338. }
  1339. int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
  1340. {
  1341. int sig = q->info.si_signo;
  1342. struct sigpending *pending;
  1343. unsigned long flags;
  1344. int ret, result;
  1345. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1346. ret = -1;
  1347. if (!likely(lock_task_sighand(t, &flags)))
  1348. goto ret;
  1349. ret = 1; /* the signal is ignored */
  1350. result = TRACE_SIGNAL_IGNORED;
  1351. if (!prepare_signal(sig, t, false))
  1352. goto out;
  1353. ret = 0;
  1354. if (unlikely(!list_empty(&q->list))) {
  1355. /*
  1356. * If an SI_TIMER entry is already queue just increment
  1357. * the overrun count.
  1358. */
  1359. BUG_ON(q->info.si_code != SI_TIMER);
  1360. q->info.si_overrun++;
  1361. result = TRACE_SIGNAL_ALREADY_PENDING;
  1362. goto out;
  1363. }
  1364. q->info.si_overrun = 0;
  1365. signalfd_notify(t, sig);
  1366. pending = group ? &t->signal->shared_pending : &t->pending;
  1367. list_add_tail(&q->list, &pending->list);
  1368. sigaddset(&pending->signal, sig);
  1369. complete_signal(sig, t, group);
  1370. result = TRACE_SIGNAL_DELIVERED;
  1371. out:
  1372. trace_signal_generate(sig, &q->info, t, group, result);
  1373. unlock_task_sighand(t, &flags);
  1374. ret:
  1375. return ret;
  1376. }
  1377. /*
  1378. * Let a parent know about the death of a child.
  1379. * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  1380. *
  1381. * Returns true if our parent ignored us and so we've switched to
  1382. * self-reaping.
  1383. */
  1384. bool do_notify_parent(struct task_struct *tsk, int sig)
  1385. {
  1386. struct siginfo info;
  1387. unsigned long flags;
  1388. struct sighand_struct *psig;
  1389. bool autoreap = false;
  1390. cputime_t utime, stime;
  1391. BUG_ON(sig == -1);
  1392. /* do_notify_parent_cldstop should have been called instead. */
  1393. BUG_ON(task_is_stopped_or_traced(tsk));
  1394. BUG_ON(!tsk->ptrace &&
  1395. (tsk->group_leader != tsk || !thread_group_empty(tsk)));
  1396. if (sig != SIGCHLD) {
  1397. /*
  1398. * This is only possible if parent == real_parent.
  1399. * Check if it has changed security domain.
  1400. */
  1401. if (tsk->parent_exec_id != tsk->parent->self_exec_id)
  1402. sig = SIGCHLD;
  1403. }
  1404. info.si_signo = sig;
  1405. info.si_errno = 0;
  1406. /*
  1407. * We are under tasklist_lock here so our parent is tied to
  1408. * us and cannot change.
  1409. *
  1410. * task_active_pid_ns will always return the same pid namespace
  1411. * until a task passes through release_task.
  1412. *
  1413. * write_lock() currently calls preempt_disable() which is the
  1414. * same as rcu_read_lock(), but according to Oleg, this is not
  1415. * correct to rely on this
  1416. */
  1417. rcu_read_lock();
  1418. info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
  1419. info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
  1420. task_uid(tsk));
  1421. rcu_read_unlock();
  1422. task_cputime(tsk, &utime, &stime);
  1423. info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
  1424. info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
  1425. info.si_status = tsk->exit_code & 0x7f;
  1426. if (tsk->exit_code & 0x80)
  1427. info.si_code = CLD_DUMPED;
  1428. else if (tsk->exit_code & 0x7f)
  1429. info.si_code = CLD_KILLED;
  1430. else {
  1431. info.si_code = CLD_EXITED;
  1432. info.si_status = tsk->exit_code >> 8;
  1433. }
  1434. psig = tsk->parent->sighand;
  1435. spin_lock_irqsave(&psig->siglock, flags);
  1436. if (!tsk->ptrace && sig == SIGCHLD &&
  1437. (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
  1438. (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
  1439. /*
  1440. * We are exiting and our parent doesn't care. POSIX.1
  1441. * defines special semantics for setting SIGCHLD to SIG_IGN
  1442. * or setting the SA_NOCLDWAIT flag: we should be reaped
  1443. * automatically and not left for our parent's wait4 call.
  1444. * Rather than having the parent do it as a magic kind of
  1445. * signal handler, we just set this to tell do_exit that we
  1446. * can be cleaned up without becoming a zombie. Note that
  1447. * we still call __wake_up_parent in this case, because a
  1448. * blocked sys_wait4 might now return -ECHILD.
  1449. *
  1450. * Whether we send SIGCHLD or not for SA_NOCLDWAIT
  1451. * is implementation-defined: we do (if you don't want
  1452. * it, just use SIG_IGN instead).
  1453. */
  1454. autoreap = true;
  1455. if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
  1456. sig = 0;
  1457. }
  1458. if (valid_signal(sig) && sig)
  1459. __group_send_sig_info(sig, &info, tsk->parent);
  1460. __wake_up_parent(tsk, tsk->parent);
  1461. spin_unlock_irqrestore(&psig->siglock, flags);
  1462. return autoreap;
  1463. }
  1464. /**
  1465. * do_notify_parent_cldstop - notify parent of stopped/continued state change
  1466. * @tsk: task reporting the state change
  1467. * @for_ptracer: the notification is for ptracer
  1468. * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
  1469. *
  1470. * Notify @tsk's parent that the stopped/continued state has changed. If
  1471. * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
  1472. * If %true, @tsk reports to @tsk->parent which should be the ptracer.
  1473. *
  1474. * CONTEXT:
  1475. * Must be called with tasklist_lock at least read locked.
  1476. */
  1477. static void do_notify_parent_cldstop(struct task_struct *tsk,
  1478. bool for_ptracer, int why)
  1479. {
  1480. struct siginfo info;
  1481. unsigned long flags;
  1482. struct task_struct *parent;
  1483. struct sighand_struct *sighand;
  1484. cputime_t utime, stime;
  1485. if (for_ptracer) {
  1486. parent = tsk->parent;
  1487. } else {
  1488. tsk = tsk->group_leader;
  1489. parent = tsk->real_parent;
  1490. }
  1491. info.si_signo = SIGCHLD;
  1492. info.si_errno = 0;
  1493. /*
  1494. * see comment in do_notify_parent() about the following 4 lines
  1495. */
  1496. rcu_read_lock();
  1497. info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
  1498. info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
  1499. rcu_read_unlock();
  1500. task_cputime(tsk, &utime, &stime);
  1501. info.si_utime = cputime_to_clock_t(utime);
  1502. info.si_stime = cputime_to_clock_t(stime);
  1503. info.si_code = why;
  1504. switch (why) {
  1505. case CLD_CONTINUED:
  1506. info.si_status = SIGCONT;
  1507. break;
  1508. case CLD_STOPPED:
  1509. info.si_status = tsk->signal->group_exit_code & 0x7f;
  1510. break;
  1511. case CLD_TRAPPED:
  1512. info.si_status = tsk->exit_code & 0x7f;
  1513. break;
  1514. default:
  1515. BUG();
  1516. }
  1517. sighand = parent->sighand;
  1518. spin_lock_irqsave(&sighand->siglock, flags);
  1519. if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
  1520. !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
  1521. __group_send_sig_info(SIGCHLD, &info, parent);
  1522. /*
  1523. * Even if SIGCHLD is not generated, we must wake up wait4 calls.
  1524. */
  1525. __wake_up_parent(tsk, parent);
  1526. spin_unlock_irqrestore(&sighand->siglock, flags);
  1527. }
  1528. static inline int may_ptrace_stop(void)
  1529. {
  1530. if (!likely(current->ptrace))
  1531. return 0;
  1532. /*
  1533. * Are we in the middle of do_coredump?
  1534. * If so and our tracer is also part of the coredump stopping
  1535. * is a deadlock situation, and pointless because our tracer
  1536. * is dead so don't allow us to stop.
  1537. * If SIGKILL was already sent before the caller unlocked
  1538. * ->siglock we must see ->core_state != NULL. Otherwise it
  1539. * is safe to enter schedule().
  1540. *
  1541. * This is almost outdated, a task with the pending SIGKILL can't
  1542. * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
  1543. * after SIGKILL was already dequeued.
  1544. */
  1545. if (unlikely(current->mm->core_state) &&
  1546. unlikely(current->mm == current->parent->mm))
  1547. return 0;
  1548. return 1;
  1549. }
  1550. /*
  1551. * Return non-zero if there is a SIGKILL that should be waking us up.
  1552. * Called with the siglock held.
  1553. */
  1554. static int sigkill_pending(struct task_struct *tsk)
  1555. {
  1556. return sigismember(&tsk->pending.signal, SIGKILL) ||
  1557. sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
  1558. }
  1559. /*
  1560. * This must be called with current->sighand->siglock held.
  1561. *
  1562. * This should be the path for all ptrace stops.
  1563. * We always set current->last_siginfo while stopped here.
  1564. * That makes it a way to test a stopped process for
  1565. * being ptrace-stopped vs being job-control-stopped.
  1566. *
  1567. * If we actually decide not to stop at all because the tracer
  1568. * is gone, we keep current->exit_code unless clear_code.
  1569. */
  1570. static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
  1571. __releases(&current->sighand->siglock)
  1572. __acquires(&current->sighand->siglock)
  1573. {
  1574. bool gstop_done = false;
  1575. if (arch_ptrace_stop_needed(exit_code, info)) {
  1576. /*
  1577. * The arch code has something special to do before a
  1578. * ptrace stop. This is allowed to block, e.g. for faults
  1579. * on user stack pages. We can't keep the siglock while
  1580. * calling arch_ptrace_stop, so we must release it now.
  1581. * To preserve proper semantics, we must do this before
  1582. * any signal bookkeeping like checking group_stop_count.
  1583. * Meanwhile, a SIGKILL could come in before we retake the
  1584. * siglock. That must prevent us from sleeping in TASK_TRACED.
  1585. * So after regaining the lock, we must check for SIGKILL.
  1586. */
  1587. spin_unlock_irq(&current->sighand->siglock);
  1588. arch_ptrace_stop(exit_code, info);
  1589. spin_lock_irq(&current->sighand->siglock);
  1590. if (sigkill_pending(current))
  1591. return;
  1592. }
  1593. /*
  1594. * We're committing to trapping. TRACED should be visible before
  1595. * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
  1596. * Also, transition to TRACED and updates to ->jobctl should be
  1597. * atomic with respect to siglock and should be done after the arch
  1598. * hook as siglock is released and regrabbed across it.
  1599. */
  1600. set_current_state(TASK_TRACED);
  1601. current->last_siginfo = info;
  1602. current->exit_code = exit_code;
  1603. /*
  1604. * If @why is CLD_STOPPED, we're trapping to participate in a group
  1605. * stop. Do the bookkeeping. Note that if SIGCONT was delievered
  1606. * across siglock relocks since INTERRUPT was scheduled, PENDING
  1607. * could be clear now. We act as if SIGCONT is received after
  1608. * TASK_TRACED is entered - ignore it.
  1609. */
  1610. if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
  1611. gstop_done = task_participate_group_stop(current);
  1612. /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
  1613. task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
  1614. if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
  1615. task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
  1616. /* entering a trap, clear TRAPPING */
  1617. task_clear_jobctl_trapping(current);
  1618. spin_unlock_irq(&current->sighand->siglock);
  1619. read_lock(&tasklist_lock);
  1620. if (may_ptrace_stop()) {
  1621. /*
  1622. * Notify parents of the stop.
  1623. *
  1624. * While ptraced, there are two parents - the ptracer and
  1625. * the real_parent of the group_leader. The ptracer should
  1626. * know about every stop while the real parent is only
  1627. * interested in the completion of group stop. The states
  1628. * for the two don't interact with each other. Notify
  1629. * separately unless they're gonna be duplicates.
  1630. */
  1631. do_notify_parent_cldstop(current, true, why);
  1632. if (gstop_done && ptrace_reparented(current))
  1633. do_notify_parent_cldstop(current, false, why);
  1634. /*
  1635. * Don't want to allow preemption here, because
  1636. * sys_ptrace() needs this task to be inactive.
  1637. *
  1638. * XXX: implement read_unlock_no_resched().
  1639. */
  1640. preempt_disable();
  1641. read_unlock(&tasklist_lock);
  1642. preempt_enable_no_resched();
  1643. freezable_schedule();
  1644. } else {
  1645. /*
  1646. * By the time we got the lock, our tracer went away.
  1647. * Don't drop the lock yet, another tracer may come.
  1648. *
  1649. * If @gstop_done, the ptracer went away between group stop
  1650. * completion and here. During detach, it would have set
  1651. * JOBCTL_STOP_PENDING on us and we'll re-enter
  1652. * TASK_STOPPED in do_signal_stop() on return, so notifying
  1653. * the real parent of the group stop completion is enough.
  1654. */
  1655. if (gstop_done)
  1656. do_notify_parent_cldstop(current, false, why);
  1657. /* tasklist protects us from ptrace_freeze_traced() */
  1658. __set_current_state(TASK_RUNNING);
  1659. if (clear_code)
  1660. current->exit_code = 0;
  1661. read_unlock(&tasklist_lock);
  1662. }
  1663. /*
  1664. * We are back. Now reacquire the siglock before touching
  1665. * last_siginfo, so that we are sure to have synchronized with
  1666. * any signal-sending on another CPU that wants to examine it.
  1667. */
  1668. spin_lock_irq(&current->sighand->siglock);
  1669. current->last_siginfo = NULL;
  1670. /* LISTENING can be set only during STOP traps, clear it */
  1671. current->jobctl &= ~JOBCTL_LISTENING;
  1672. /*
  1673. * Queued signals ignored us while we were stopped for tracing.
  1674. * So check for any that we should take before resuming user mode.
  1675. * This sets TIF_SIGPENDING, but never clears it.
  1676. */
  1677. recalc_sigpending_tsk(current);
  1678. }
  1679. static void ptrace_do_notify(int signr, int exit_code, int why)
  1680. {
  1681. siginfo_t info;
  1682. memset(&info, 0, sizeof info);
  1683. info.si_signo = signr;
  1684. info.si_code = exit_code;
  1685. info.si_pid = task_pid_vnr(current);
  1686. info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
  1687. /* Let the debugger run. */
  1688. ptrace_stop(exit_code, why, 1, &info);
  1689. }
  1690. void ptrace_notify(int exit_code)
  1691. {
  1692. BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
  1693. if (unlikely(current->task_works))
  1694. task_work_run();
  1695. spin_lock_irq(&current->sighand->siglock);
  1696. ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
  1697. spin_unlock_irq(&current->sighand->siglock);
  1698. }
  1699. /**
  1700. * do_signal_stop - handle group stop for SIGSTOP and other stop signals
  1701. * @signr: signr causing group stop if initiating
  1702. *
  1703. * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
  1704. * and participate in it. If already set, participate in the existing
  1705. * group stop. If participated in a group stop (and thus slept), %true is
  1706. * returned with siglock released.
  1707. *
  1708. * If ptraced, this function doesn't handle stop itself. Instead,
  1709. * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
  1710. * untouched. The caller must ensure that INTERRUPT trap handling takes
  1711. * places afterwards.
  1712. *
  1713. * CONTEXT:
  1714. * Must be called with @current->sighand->siglock held, which is released
  1715. * on %true return.
  1716. *
  1717. * RETURNS:
  1718. * %false if group stop is already cancelled or ptrace trap is scheduled.
  1719. * %true if participated in group stop.
  1720. */
  1721. static bool do_signal_stop(int signr)
  1722. __releases(&current->sighand->siglock)
  1723. {
  1724. struct signal_struct *sig = current->signal;
  1725. if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
  1726. unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
  1727. struct task_struct *t;
  1728. /* signr will be recorded in task->jobctl for retries */
  1729. WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
  1730. if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
  1731. unlikely(signal_group_exit(sig)))
  1732. return false;
  1733. /*
  1734. * There is no group stop already in progress. We must
  1735. * initiate one now.
  1736. *
  1737. * While ptraced, a task may be resumed while group stop is
  1738. * still in effect and then receive a stop signal and
  1739. * initiate another group stop. This deviates from the
  1740. * usual behavior as two consecutive stop signals can't
  1741. * cause two group stops when !ptraced. That is why we
  1742. * also check !task_is_stopped(t) below.
  1743. *
  1744. * The condition can be distinguished by testing whether
  1745. * SIGNAL_STOP_STOPPED is already set. Don't generate
  1746. * group_exit_code in such case.
  1747. *
  1748. * This is not necessary for SIGNAL_STOP_CONTINUED because
  1749. * an intervening stop signal is required to cause two
  1750. * continued events regardless of ptrace.
  1751. */
  1752. if (!(sig->flags & SIGNAL_STOP_STOPPED))
  1753. sig->group_exit_code = signr;
  1754. sig->group_stop_count = 0;
  1755. if (task_set_jobctl_pending(current, signr | gstop))
  1756. sig->group_stop_count++;
  1757. t = current;
  1758. while_each_thread(current, t) {
  1759. /*
  1760. * Setting state to TASK_STOPPED for a group
  1761. * stop is always done with the siglock held,
  1762. * so this check has no races.
  1763. */
  1764. if (!task_is_stopped(t) &&
  1765. task_set_jobctl_pending(t, signr | gstop)) {
  1766. sig->group_stop_count++;
  1767. if (likely(!(t->ptrace & PT_SEIZED)))
  1768. signal_wake_up(t, 0);
  1769. else
  1770. ptrace_trap_notify(t);
  1771. }
  1772. }
  1773. }
  1774. if (likely(!current->ptrace)) {
  1775. int notify = 0;
  1776. /*
  1777. * If there are no other threads in the group, or if there
  1778. * is a group stop in progress and we are the last to stop,
  1779. * report to the parent.
  1780. */
  1781. if (task_participate_group_stop(current))
  1782. notify = CLD_STOPPED;
  1783. __set_current_state(TASK_STOPPED);
  1784. spin_unlock_irq(&current->sighand->siglock);
  1785. /*
  1786. * Notify the parent of the group stop completion. Because
  1787. * we're not holding either the siglock or tasklist_lock
  1788. * here, ptracer may attach inbetween; however, this is for
  1789. * group stop and should always be delivered to the real
  1790. * parent of the group leader. The new ptracer will get
  1791. * its notification when this task transitions into
  1792. * TASK_TRACED.
  1793. */
  1794. if (notify) {
  1795. read_lock(&tasklist_lock);
  1796. do_notify_parent_cldstop(current, false, notify);
  1797. read_unlock(&tasklist_lock);
  1798. }
  1799. /* Now we don't run again until woken by SIGCONT or SIGKILL */
  1800. freezable_schedule();
  1801. return true;
  1802. } else {
  1803. /*
  1804. * While ptraced, group stop is handled by STOP trap.
  1805. * Schedule it and let the caller deal with it.
  1806. */
  1807. task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
  1808. return false;
  1809. }
  1810. }
  1811. /**
  1812. * do_jobctl_trap - take care of ptrace jobctl traps
  1813. *
  1814. * When PT_SEIZED, it's used for both group stop and explicit
  1815. * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
  1816. * accompanying siginfo. If stopped, lower eight bits of exit_code contain
  1817. * the stop signal; otherwise, %SIGTRAP.
  1818. *
  1819. * When !PT_SEIZED, it's used only for group stop trap with stop signal
  1820. * number as exit_code and no siginfo.
  1821. *
  1822. * CONTEXT:
  1823. * Must be called with @current->sighand->siglock held, which may be
  1824. * released and re-acquired before returning with intervening sleep.
  1825. */
  1826. static void do_jobctl_trap(void)
  1827. {
  1828. struct signal_struct *signal = current->signal;
  1829. int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
  1830. if (current->ptrace & PT_SEIZED) {
  1831. if (!signal->group_stop_count &&
  1832. !(signal->flags & SIGNAL_STOP_STOPPED))
  1833. signr = SIGTRAP;
  1834. WARN_ON_ONCE(!signr);
  1835. ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
  1836. CLD_STOPPED);
  1837. } else {
  1838. WARN_ON_ONCE(!signr);
  1839. ptrace_stop(signr, CLD_STOPPED, 0, NULL);
  1840. current->exit_code = 0;
  1841. }
  1842. }
  1843. static int ptrace_signal(int signr, siginfo_t *info)
  1844. {
  1845. ptrace_signal_deliver();
  1846. /*
  1847. * We do not check sig_kernel_stop(signr) but set this marker
  1848. * unconditionally because we do not know whether debugger will
  1849. * change signr. This flag has no meaning unless we are going
  1850. * to stop after return from ptrace_stop(). In this case it will
  1851. * be checked in do_signal_stop(), we should only stop if it was
  1852. * not cleared by SIGCONT while we were sleeping. See also the
  1853. * comment in dequeue_signal().
  1854. */
  1855. current->jobctl |= JOBCTL_STOP_DEQUEUED;
  1856. ptrace_stop(signr, CLD_TRAPPED, 0, info);
  1857. /* We're back. Did the debugger cancel the sig? */
  1858. signr = current->exit_code;
  1859. if (signr == 0)
  1860. return signr;
  1861. current->exit_code = 0;
  1862. /*
  1863. * Update the siginfo structure if the signal has
  1864. * changed. If the debugger wanted something
  1865. * specific in the siginfo structure then it should
  1866. * have updated *info via PTRACE_SETSIGINFO.
  1867. */
  1868. if (signr != info->si_signo) {
  1869. info->si_signo = signr;
  1870. info->si_errno = 0;
  1871. info->si_code = SI_USER;
  1872. rcu_read_lock();
  1873. info->si_pid = task_pid_vnr(current->parent);
  1874. info->si_uid = from_kuid_munged(current_user_ns(),
  1875. task_uid(current->parent));
  1876. rcu_read_unlock();
  1877. }
  1878. /* If the (new) signal is now blocked, requeue it. */
  1879. if (sigismember(&current->blocked, signr)) {
  1880. specific_send_sig_info(signr, info, current);
  1881. signr = 0;
  1882. }
  1883. return signr;
  1884. }
  1885. int get_signal(struct ksignal *ksig)
  1886. {
  1887. struct sighand_struct *sighand = current->sighand;
  1888. struct signal_struct *signal = current->signal;
  1889. int signr;
  1890. if (unlikely(current->task_works))
  1891. task_work_run();
  1892. if (unlikely(uprobe_deny_signal()))
  1893. return 0;
  1894. /*
  1895. * Do this once, we can't return to user-mode if freezing() == T.
  1896. * do_signal_stop() and ptrace_stop() do freezable_schedule() and
  1897. * thus do not need another check after return.
  1898. */
  1899. try_to_freeze();
  1900. relock:
  1901. spin_lock_irq(&sighand->siglock);
  1902. /*
  1903. * Every stopped thread goes here after wakeup. Check to see if
  1904. * we should notify the parent, prepare_signal(SIGCONT) encodes
  1905. * the CLD_ si_code into SIGNAL_CLD_MASK bits.
  1906. */
  1907. if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
  1908. int why;
  1909. if (signal->flags & SIGNAL_CLD_CONTINUED)
  1910. why = CLD_CONTINUED;
  1911. else
  1912. why = CLD_STOPPED;
  1913. signal->flags &= ~SIGNAL_CLD_MASK;
  1914. spin_unlock_irq(&sighand->siglock);
  1915. /*
  1916. * Notify the parent that we're continuing. This event is
  1917. * always per-process and doesn't make whole lot of sense
  1918. * for ptracers, who shouldn't consume the state via
  1919. * wait(2) either, but, for backward compatibility, notify
  1920. * the ptracer of the group leader too unless it's gonna be
  1921. * a duplicate.
  1922. */
  1923. read_lock(&tasklist_lock);
  1924. do_notify_parent_cldstop(current, false, why);
  1925. if (ptrace_reparented(current->group_leader))
  1926. do_notify_parent_cldstop(current->group_leader,
  1927. true, why);
  1928. read_unlock(&tasklist_lock);
  1929. goto relock;
  1930. }
  1931. for (;;) {
  1932. struct k_sigaction *ka;
  1933. if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
  1934. do_signal_stop(0))
  1935. goto relock;
  1936. if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
  1937. do_jobctl_trap();
  1938. spin_unlock_irq(&sighand->siglock);
  1939. goto relock;
  1940. }
  1941. signr = dequeue_signal(current, &current->blocked, &ksig->info);
  1942. if (!signr)
  1943. break; /* will return 0 */
  1944. if (unlikely(current->ptrace) && signr != SIGKILL) {
  1945. signr = ptrace_signal(signr, &ksig->info);
  1946. if (!signr)
  1947. continue;
  1948. }
  1949. ka = &sighand->action[signr-1];
  1950. /* Trace actually delivered signals. */
  1951. trace_signal_deliver(signr, &ksig->info, ka);
  1952. if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
  1953. continue;
  1954. if (ka->sa.sa_handler != SIG_DFL) {
  1955. /* Run the handler. */
  1956. ksig->ka = *ka;
  1957. if (ka->sa.sa_flags & SA_ONESHOT)
  1958. ka->sa.sa_handler = SIG_DFL;
  1959. break; /* will return non-zero "signr" value */
  1960. }
  1961. /*
  1962. * Now we are doing the default action for this signal.
  1963. */
  1964. if (sig_kernel_ignore(signr)) /* Default is nothing. */
  1965. continue;
  1966. /*
  1967. * Global init gets no signals it doesn't want.
  1968. * Container-init gets no signals it doesn't want from same
  1969. * container.
  1970. *
  1971. * Note that if global/container-init sees a sig_kernel_only()
  1972. * signal here, the signal must have been generated internally
  1973. * or must have come from an ancestor namespace. In either
  1974. * case, the signal cannot be dropped.
  1975. */
  1976. if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
  1977. !sig_kernel_only(signr))
  1978. continue;
  1979. if (sig_kernel_stop(signr)) {
  1980. /*
  1981. * The default action is to stop all threads in
  1982. * the thread group. The job control signals
  1983. * do nothing in an orphaned pgrp, but SIGSTOP
  1984. * always works. Note that siglock needs to be
  1985. * dropped during the call to is_orphaned_pgrp()
  1986. * because of lock ordering with tasklist_lock.
  1987. * This allows an intervening SIGCONT to be posted.
  1988. * We need to check for that and bail out if necessary.
  1989. */
  1990. if (signr != SIGSTOP) {
  1991. spin_unlock_irq(&sighand->siglock);
  1992. /* signals can be posted during this window */
  1993. if (is_current_pgrp_orphaned())
  1994. goto relock;
  1995. spin_lock_irq(&sighand->siglock);
  1996. }
  1997. if (likely(do_signal_stop(ksig->info.si_signo))) {
  1998. /* It released the siglock. */
  1999. goto relock;
  2000. }
  2001. /*
  2002. * We didn't actually stop, due to a race
  2003. * with SIGCONT or something like that.
  2004. */
  2005. continue;
  2006. }
  2007. spin_unlock_irq(&sighand->siglock);
  2008. /*
  2009. * Anything else is fatal, maybe with a core dump.
  2010. */
  2011. current->flags |= PF_SIGNALED;
  2012. if (sig_kernel_coredump(signr)) {
  2013. if (print_fatal_signals)
  2014. print_fatal_signal(ksig->info.si_signo);
  2015. proc_coredump_connector(current);
  2016. /*
  2017. * If it was able to dump core, this kills all
  2018. * other threads in the group and synchronizes with
  2019. * their demise. If we lost the race with another
  2020. * thread getting here, it set group_exit_code
  2021. * first and our do_group_exit call below will use
  2022. * that value and ignore the one we pass it.
  2023. */
  2024. do_coredump(&ksig->info);
  2025. }
  2026. /*
  2027. * Death signals, no core dump.
  2028. */
  2029. do_group_exit(ksig->info.si_signo);
  2030. /* NOTREACHED */
  2031. }
  2032. spin_unlock_irq(&sighand->siglock);
  2033. ksig->sig = signr;
  2034. return ksig->sig > 0;
  2035. }
  2036. /**
  2037. * signal_delivered -
  2038. * @ksig: kernel signal struct
  2039. * @stepping: nonzero if debugger single-step or block-step in use
  2040. *
  2041. * This function should be called when a signal has successfully been
  2042. * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
  2043. * is always blocked, and the signal itself is blocked unless %SA_NODEFER
  2044. * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
  2045. */
  2046. static void signal_delivered(struct ksignal *ksig, int stepping)
  2047. {
  2048. sigset_t blocked;
  2049. /* A signal was successfully delivered, and the
  2050. saved sigmask was stored on the signal frame,
  2051. and will be restored by sigreturn. So we can
  2052. simply clear the restore sigmask flag. */
  2053. clear_restore_sigmask();
  2054. sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
  2055. if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
  2056. sigaddset(&blocked, ksig->sig);
  2057. set_current_blocked(&blocked);
  2058. tracehook_signal_handler(stepping);
  2059. }
  2060. void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
  2061. {
  2062. if (failed)
  2063. force_sigsegv(ksig->sig, current);
  2064. else
  2065. signal_delivered(ksig, stepping);
  2066. }
  2067. /*
  2068. * It could be that complete_signal() picked us to notify about the
  2069. * group-wide signal. Other threads should be notified now to take
  2070. * the shared signals in @which since we will not.
  2071. */
  2072. static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
  2073. {
  2074. sigset_t retarget;
  2075. struct task_struct *t;
  2076. sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
  2077. if (sigisemptyset(&retarget))
  2078. return;
  2079. t = tsk;
  2080. while_each_thread(tsk, t) {
  2081. if (t->flags & PF_EXITING)
  2082. continue;
  2083. if (!has_pending_signals(&retarget, &t->blocked))
  2084. continue;
  2085. /* Remove the signals this thread can handle. */
  2086. sigandsets(&retarget, &retarget, &t->blocked);
  2087. if (!signal_pending(t))
  2088. signal_wake_up(t, 0);
  2089. if (sigisemptyset(&retarget))
  2090. break;
  2091. }
  2092. }
  2093. void exit_signals(struct task_struct *tsk)
  2094. {
  2095. int group_stop = 0;
  2096. sigset_t unblocked;
  2097. /*
  2098. * @tsk is about to have PF_EXITING set - lock out users which
  2099. * expect stable threadgroup.
  2100. */
  2101. threadgroup_change_begin(tsk);
  2102. if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
  2103. tsk->flags |= PF_EXITING;
  2104. threadgroup_change_end(tsk);
  2105. return;
  2106. }
  2107. spin_lock_irq(&tsk->sighand->siglock);
  2108. /*
  2109. * From now this task is not visible for group-wide signals,
  2110. * see wants_signal(), do_signal_stop().
  2111. */
  2112. tsk->flags |= PF_EXITING;
  2113. threadgroup_change_end(tsk);
  2114. if (!signal_pending(tsk))
  2115. goto out;
  2116. unblocked = tsk->blocked;
  2117. signotset(&unblocked);
  2118. retarget_shared_pending(tsk, &unblocked);
  2119. if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
  2120. task_participate_group_stop(tsk))
  2121. group_stop = CLD_STOPPED;
  2122. out:
  2123. spin_unlock_irq(&tsk->sighand->siglock);
  2124. /*
  2125. * If group stop has completed, deliver the notification. This
  2126. * should always go to the real parent of the group leader.
  2127. */
  2128. if (unlikely(group_stop)) {
  2129. read_lock(&tasklist_lock);
  2130. do_notify_parent_cldstop(tsk, false, group_stop);
  2131. read_unlock(&tasklist_lock);
  2132. }
  2133. }
  2134. EXPORT_SYMBOL(recalc_sigpending);
  2135. EXPORT_SYMBOL_GPL(dequeue_signal);
  2136. EXPORT_SYMBOL(flush_signals);
  2137. EXPORT_SYMBOL(force_sig);
  2138. EXPORT_SYMBOL(send_sig);
  2139. EXPORT_SYMBOL(send_sig_info);
  2140. EXPORT_SYMBOL(sigprocmask);
  2141. /*
  2142. * System call entry points.
  2143. */
  2144. /**
  2145. * sys_restart_syscall - restart a system call
  2146. */
  2147. SYSCALL_DEFINE0(restart_syscall)
  2148. {
  2149. struct restart_block *restart = &current->restart_block;
  2150. return restart->fn(restart);
  2151. }
  2152. long do_no_restart_syscall(struct restart_block *param)
  2153. {
  2154. return -EINTR;
  2155. }
  2156. static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
  2157. {
  2158. if (signal_pending(tsk) && !thread_group_empty(tsk)) {
  2159. sigset_t newblocked;
  2160. /* A set of now blocked but previously unblocked signals. */
  2161. sigandnsets(&newblocked, newset, &current->blocked);
  2162. retarget_shared_pending(tsk, &newblocked);
  2163. }
  2164. tsk->blocked = *newset;
  2165. recalc_sigpending();
  2166. }
  2167. /**
  2168. * set_current_blocked - change current->blocked mask
  2169. * @newset: new mask
  2170. *
  2171. * It is wrong to change ->blocked directly, this helper should be used
  2172. * to ensure the process can't miss a shared signal we are going to block.
  2173. */
  2174. void set_current_blocked(sigset_t *newset)
  2175. {
  2176. sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
  2177. __set_current_blocked(newset);
  2178. }
  2179. void __set_current_blocked(const sigset_t *newset)
  2180. {
  2181. struct task_struct *tsk = current;
  2182. /*
  2183. * In case the signal mask hasn't changed, there is nothing we need
  2184. * to do. The current->blocked shouldn't be modified by other task.
  2185. */
  2186. if (sigequalsets(&tsk->blocked, newset))
  2187. return;
  2188. spin_lock_irq(&tsk->sighand->siglock);
  2189. __set_task_blocked(tsk, newset);
  2190. spin_unlock_irq(&tsk->sighand->siglock);
  2191. }
  2192. /*
  2193. * This is also useful for kernel threads that want to temporarily
  2194. * (or permanently) block certain signals.
  2195. *
  2196. * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
  2197. * interface happily blocks "unblockable" signals like SIGKILL
  2198. * and friends.
  2199. */
  2200. int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
  2201. {
  2202. struct task_struct *tsk = current;
  2203. sigset_t newset;
  2204. /* Lockless, only current can change ->blocked, never from irq */
  2205. if (oldset)
  2206. *oldset = tsk->blocked;
  2207. switch (how) {
  2208. case SIG_BLOCK:
  2209. sigorsets(&newset, &tsk->blocked, set);
  2210. break;
  2211. case SIG_UNBLOCK:
  2212. sigandnsets(&newset, &tsk->blocked, set);
  2213. break;
  2214. case SIG_SETMASK:
  2215. newset = *set;
  2216. break;
  2217. default:
  2218. return -EINVAL;
  2219. }
  2220. __set_current_blocked(&newset);
  2221. return 0;
  2222. }
  2223. /**
  2224. * sys_rt_sigprocmask - change the list of currently blocked signals
  2225. * @how: whether to add, remove, or set signals
  2226. * @nset: stores pending signals
  2227. * @oset: previous value of signal mask if non-null
  2228. * @sigsetsize: size of sigset_t type
  2229. */
  2230. SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
  2231. sigset_t __user *, oset, size_t, sigsetsize)
  2232. {
  2233. sigset_t old_set, new_set;
  2234. int error;
  2235. /* XXX: Don't preclude handling different sized sigset_t's. */
  2236. if (sigsetsize != sizeof(sigset_t))
  2237. return -EINVAL;
  2238. old_set = current->blocked;
  2239. if (nset) {
  2240. if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
  2241. return -EFAULT;
  2242. sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2243. error = sigprocmask(how, &new_set, NULL);
  2244. if (error)
  2245. return error;
  2246. }
  2247. if (oset) {
  2248. if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
  2249. return -EFAULT;
  2250. }
  2251. return 0;
  2252. }
  2253. #ifdef CONFIG_COMPAT
  2254. COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
  2255. compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
  2256. {
  2257. #ifdef __BIG_ENDIAN
  2258. sigset_t old_set = current->blocked;
  2259. /* XXX: Don't preclude handling different sized sigset_t's. */
  2260. if (sigsetsize != sizeof(sigset_t))
  2261. return -EINVAL;
  2262. if (nset) {
  2263. compat_sigset_t new32;
  2264. sigset_t new_set;
  2265. int error;
  2266. if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
  2267. return -EFAULT;
  2268. sigset_from_compat(&new_set, &new32);
  2269. sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2270. error = sigprocmask(how, &new_set, NULL);
  2271. if (error)
  2272. return error;
  2273. }
  2274. if (oset) {
  2275. compat_sigset_t old32;
  2276. sigset_to_compat(&old32, &old_set);
  2277. if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
  2278. return -EFAULT;
  2279. }
  2280. return 0;
  2281. #else
  2282. return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
  2283. (sigset_t __user *)oset, sigsetsize);
  2284. #endif
  2285. }
  2286. #endif
  2287. static int do_sigpending(void *set, unsigned long sigsetsize)
  2288. {
  2289. if (sigsetsize > sizeof(sigset_t))
  2290. return -EINVAL;
  2291. spin_lock_irq(&current->sighand->siglock);
  2292. sigorsets(set, &current->pending.signal,
  2293. &current->signal->shared_pending.signal);
  2294. spin_unlock_irq(&current->sighand->siglock);
  2295. /* Outside the lock because only this thread touches it. */
  2296. sigandsets(set, &current->blocked, set);
  2297. return 0;
  2298. }
  2299. /**
  2300. * sys_rt_sigpending - examine a pending signal that has been raised
  2301. * while blocked
  2302. * @uset: stores pending signals
  2303. * @sigsetsize: size of sigset_t type or larger
  2304. */
  2305. SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
  2306. {
  2307. sigset_t set;
  2308. int err = do_sigpending(&set, sigsetsize);
  2309. if (!err && copy_to_user(uset, &set, sigsetsize))
  2310. err = -EFAULT;
  2311. return err;
  2312. }
  2313. #ifdef CONFIG_COMPAT
  2314. COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
  2315. compat_size_t, sigsetsize)
  2316. {
  2317. #ifdef __BIG_ENDIAN
  2318. sigset_t set;
  2319. int err = do_sigpending(&set, sigsetsize);
  2320. if (!err) {
  2321. compat_sigset_t set32;
  2322. sigset_to_compat(&set32, &set);
  2323. /* we can get here only if sigsetsize <= sizeof(set) */
  2324. if (copy_to_user(uset, &set32, sigsetsize))
  2325. err = -EFAULT;
  2326. }
  2327. return err;
  2328. #else
  2329. return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
  2330. #endif
  2331. }
  2332. #endif
  2333. #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
  2334. int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
  2335. {
  2336. int err;
  2337. if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
  2338. return -EFAULT;
  2339. if (from->si_code < 0)
  2340. return __copy_to_user(to, from, sizeof(siginfo_t))
  2341. ? -EFAULT : 0;
  2342. /*
  2343. * If you change siginfo_t structure, please be sure
  2344. * this code is fixed accordingly.
  2345. * Please remember to update the signalfd_copyinfo() function
  2346. * inside fs/signalfd.c too, in case siginfo_t changes.
  2347. * It should never copy any pad contained in the structure
  2348. * to avoid security leaks, but must copy the generic
  2349. * 3 ints plus the relevant union member.
  2350. */
  2351. err = __put_user(from->si_signo, &to->si_signo);
  2352. err |= __put_user(from->si_errno, &to->si_errno);
  2353. err |= __put_user((short)from->si_code, &to->si_code);
  2354. switch (from->si_code & __SI_MASK) {
  2355. case __SI_KILL:
  2356. err |= __put_user(from->si_pid, &to->si_pid);
  2357. err |= __put_user(from->si_uid, &to->si_uid);
  2358. break;
  2359. case __SI_TIMER:
  2360. err |= __put_user(from->si_tid, &to->si_tid);
  2361. err |= __put_user(from->si_overrun, &to->si_overrun);
  2362. err |= __put_user(from->si_ptr, &to->si_ptr);
  2363. break;
  2364. case __SI_POLL:
  2365. err |= __put_user(from->si_band, &to->si_band);
  2366. err |= __put_user(from->si_fd, &to->si_fd);
  2367. break;
  2368. case __SI_FAULT:
  2369. err |= __put_user(from->si_addr, &to->si_addr);
  2370. #ifdef __ARCH_SI_TRAPNO
  2371. err |= __put_user(from->si_trapno, &to->si_trapno);
  2372. #endif
  2373. #ifdef BUS_MCEERR_AO
  2374. /*
  2375. * Other callers might not initialize the si_lsb field,
  2376. * so check explicitly for the right codes here.
  2377. */
  2378. if (from->si_signo == SIGBUS &&
  2379. (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
  2380. err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
  2381. #endif
  2382. #ifdef SEGV_BNDERR
  2383. if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
  2384. err |= __put_user(from->si_lower, &to->si_lower);
  2385. err |= __put_user(from->si_upper, &to->si_upper);
  2386. }
  2387. #endif
  2388. #ifdef SEGV_PKUERR
  2389. if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
  2390. err |= __put_user(from->si_pkey, &to->si_pkey);
  2391. #endif
  2392. break;
  2393. case __SI_CHLD:
  2394. err |= __put_user(from->si_pid, &to->si_pid);
  2395. err |= __put_user(from->si_uid, &to->si_uid);
  2396. err |= __put_user(from->si_status, &to->si_status);
  2397. err |= __put_user(from->si_utime, &to->si_utime);
  2398. err |= __put_user(from->si_stime, &to->si_stime);
  2399. break;
  2400. case __SI_RT: /* This is not generated by the kernel as of now. */
  2401. case __SI_MESGQ: /* But this is */
  2402. err |= __put_user(from->si_pid, &to->si_pid);
  2403. err |= __put_user(from->si_uid, &to->si_uid);
  2404. err |= __put_user(from->si_ptr, &to->si_ptr);
  2405. break;
  2406. #ifdef __ARCH_SIGSYS
  2407. case __SI_SYS:
  2408. err |= __put_user(from->si_call_addr, &to->si_call_addr);
  2409. err |= __put_user(from->si_syscall, &to->si_syscall);
  2410. err |= __put_user(from->si_arch, &to->si_arch);
  2411. break;
  2412. #endif
  2413. default: /* this is just in case for now ... */
  2414. err |= __put_user(from->si_pid, &to->si_pid);
  2415. err |= __put_user(from->si_uid, &to->si_uid);
  2416. break;
  2417. }
  2418. return err;
  2419. }
  2420. #endif
  2421. /**
  2422. * do_sigtimedwait - wait for queued signals specified in @which
  2423. * @which: queued signals to wait for
  2424. * @info: if non-null, the signal's siginfo is returned here
  2425. * @ts: upper bound on process time suspension
  2426. */
  2427. int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
  2428. const struct timespec *ts)
  2429. {
  2430. ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
  2431. struct task_struct *tsk = current;
  2432. sigset_t mask = *which;
  2433. int sig, ret = 0;
  2434. if (ts) {
  2435. if (!timespec_valid(ts))
  2436. return -EINVAL;
  2437. timeout = timespec_to_ktime(*ts);
  2438. to = &timeout;
  2439. }
  2440. /*
  2441. * Invert the set of allowed signals to get those we want to block.
  2442. */
  2443. sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
  2444. signotset(&mask);
  2445. spin_lock_irq(&tsk->sighand->siglock);
  2446. sig = dequeue_signal(tsk, &mask, info);
  2447. if (!sig && timeout.tv64) {
  2448. /*
  2449. * None ready, temporarily unblock those we're interested
  2450. * while we are sleeping in so that we'll be awakened when
  2451. * they arrive. Unblocking is always fine, we can avoid
  2452. * set_current_blocked().
  2453. */
  2454. tsk->real_blocked = tsk->blocked;
  2455. sigandsets(&tsk->blocked, &tsk->blocked, &mask);
  2456. recalc_sigpending();
  2457. spin_unlock_irq(&tsk->sighand->siglock);
  2458. __set_current_state(TASK_INTERRUPTIBLE);
  2459. ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
  2460. HRTIMER_MODE_REL);
  2461. spin_lock_irq(&tsk->sighand->siglock);
  2462. __set_task_blocked(tsk, &tsk->real_blocked);
  2463. sigemptyset(&tsk->real_blocked);
  2464. sig = dequeue_signal(tsk, &mask, info);
  2465. }
  2466. spin_unlock_irq(&tsk->sighand->siglock);
  2467. if (sig)
  2468. return sig;
  2469. return ret ? -EINTR : -EAGAIN;
  2470. }
  2471. /**
  2472. * sys_rt_sigtimedwait - synchronously wait for queued signals specified
  2473. * in @uthese
  2474. * @uthese: queued signals to wait for
  2475. * @uinfo: if non-null, the signal's siginfo is returned here
  2476. * @uts: upper bound on process time suspension
  2477. * @sigsetsize: size of sigset_t type
  2478. */
  2479. SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
  2480. siginfo_t __user *, uinfo, const struct timespec __user *, uts,
  2481. size_t, sigsetsize)
  2482. {
  2483. sigset_t these;
  2484. struct timespec ts;
  2485. siginfo_t info;
  2486. int ret;
  2487. /* XXX: Don't preclude handling different sized sigset_t's. */
  2488. if (sigsetsize != sizeof(sigset_t))
  2489. return -EINVAL;
  2490. if (copy_from_user(&these, uthese, sizeof(these)))
  2491. return -EFAULT;
  2492. if (uts) {
  2493. if (copy_from_user(&ts, uts, sizeof(ts)))
  2494. return -EFAULT;
  2495. }
  2496. ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
  2497. if (ret > 0 && uinfo) {
  2498. if (copy_siginfo_to_user(uinfo, &info))
  2499. ret = -EFAULT;
  2500. }
  2501. return ret;
  2502. }
  2503. /**
  2504. * sys_kill - send a signal to a process
  2505. * @pid: the PID of the process
  2506. * @sig: signal to be sent
  2507. */
  2508. SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
  2509. {
  2510. struct siginfo info;
  2511. info.si_signo = sig;
  2512. info.si_errno = 0;
  2513. info.si_code = SI_USER;
  2514. info.si_pid = task_tgid_vnr(current);
  2515. info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
  2516. return kill_something_info(sig, &info, pid);
  2517. }
  2518. static int
  2519. do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
  2520. {
  2521. struct task_struct *p;
  2522. int error = -ESRCH;
  2523. rcu_read_lock();
  2524. p = find_task_by_vpid(pid);
  2525. if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
  2526. error = check_kill_permission(sig, info, p);
  2527. /*
  2528. * The null signal is a permissions and process existence
  2529. * probe. No signal is actually delivered.
  2530. */
  2531. if (!error && sig) {
  2532. error = do_send_sig_info(sig, info, p, false);
  2533. /*
  2534. * If lock_task_sighand() failed we pretend the task
  2535. * dies after receiving the signal. The window is tiny,
  2536. * and the signal is private anyway.
  2537. */
  2538. if (unlikely(error == -ESRCH))
  2539. error = 0;
  2540. }
  2541. }
  2542. rcu_read_unlock();
  2543. return error;
  2544. }
  2545. static int do_tkill(pid_t tgid, pid_t pid, int sig)
  2546. {
  2547. struct siginfo info = {};
  2548. info.si_signo = sig;
  2549. info.si_errno = 0;
  2550. info.si_code = SI_TKILL;
  2551. info.si_pid = task_tgid_vnr(current);
  2552. info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
  2553. return do_send_specific(tgid, pid, sig, &info);
  2554. }
  2555. /**
  2556. * sys_tgkill - send signal to one specific thread
  2557. * @tgid: the thread group ID of the thread
  2558. * @pid: the PID of the thread
  2559. * @sig: signal to be sent
  2560. *
  2561. * This syscall also checks the @tgid and returns -ESRCH even if the PID
  2562. * exists but it's not belonging to the target process anymore. This
  2563. * method solves the problem of threads exiting and PIDs getting reused.
  2564. */
  2565. SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
  2566. {
  2567. /* This is only valid for single tasks */
  2568. if (pid <= 0 || tgid <= 0)
  2569. return -EINVAL;
  2570. return do_tkill(tgid, pid, sig);
  2571. }
  2572. /**
  2573. * sys_tkill - send signal to one specific task
  2574. * @pid: the PID of the task
  2575. * @sig: signal to be sent
  2576. *
  2577. * Send a signal to only one task, even if it's a CLONE_THREAD task.
  2578. */
  2579. SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
  2580. {
  2581. /* This is only valid for single tasks */
  2582. if (pid <= 0)
  2583. return -EINVAL;
  2584. return do_tkill(0, pid, sig);
  2585. }
  2586. static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
  2587. {
  2588. /* Not even root can pretend to send signals from the kernel.
  2589. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2590. */
  2591. if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
  2592. (task_pid_vnr(current) != pid))
  2593. return -EPERM;
  2594. info->si_signo = sig;
  2595. /* POSIX.1b doesn't mention process groups. */
  2596. return kill_proc_info(sig, info, pid);
  2597. }
  2598. /**
  2599. * sys_rt_sigqueueinfo - send signal information to a signal
  2600. * @pid: the PID of the thread
  2601. * @sig: signal to be sent
  2602. * @uinfo: signal info to be sent
  2603. */
  2604. SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
  2605. siginfo_t __user *, uinfo)
  2606. {
  2607. siginfo_t info;
  2608. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2609. return -EFAULT;
  2610. return do_rt_sigqueueinfo(pid, sig, &info);
  2611. }
  2612. #ifdef CONFIG_COMPAT
  2613. COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
  2614. compat_pid_t, pid,
  2615. int, sig,
  2616. struct compat_siginfo __user *, uinfo)
  2617. {
  2618. siginfo_t info = {};
  2619. int ret = copy_siginfo_from_user32(&info, uinfo);
  2620. if (unlikely(ret))
  2621. return ret;
  2622. return do_rt_sigqueueinfo(pid, sig, &info);
  2623. }
  2624. #endif
  2625. static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
  2626. {
  2627. /* This is only valid for single tasks */
  2628. if (pid <= 0 || tgid <= 0)
  2629. return -EINVAL;
  2630. /* Not even root can pretend to send signals from the kernel.
  2631. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2632. */
  2633. if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
  2634. (task_pid_vnr(current) != pid))
  2635. return -EPERM;
  2636. info->si_signo = sig;
  2637. return do_send_specific(tgid, pid, sig, info);
  2638. }
  2639. SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
  2640. siginfo_t __user *, uinfo)
  2641. {
  2642. siginfo_t info;
  2643. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2644. return -EFAULT;
  2645. return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
  2646. }
  2647. #ifdef CONFIG_COMPAT
  2648. COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
  2649. compat_pid_t, tgid,
  2650. compat_pid_t, pid,
  2651. int, sig,
  2652. struct compat_siginfo __user *, uinfo)
  2653. {
  2654. siginfo_t info = {};
  2655. if (copy_siginfo_from_user32(&info, uinfo))
  2656. return -EFAULT;
  2657. return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
  2658. }
  2659. #endif
  2660. /*
  2661. * For kthreads only, must not be used if cloned with CLONE_SIGHAND
  2662. */
  2663. void kernel_sigaction(int sig, __sighandler_t action)
  2664. {
  2665. spin_lock_irq(&current->sighand->siglock);
  2666. current->sighand->action[sig - 1].sa.sa_handler = action;
  2667. if (action == SIG_IGN) {
  2668. sigset_t mask;
  2669. sigemptyset(&mask);
  2670. sigaddset(&mask, sig);
  2671. flush_sigqueue_mask(&mask, &current->signal->shared_pending);
  2672. flush_sigqueue_mask(&mask, &current->pending);
  2673. recalc_sigpending();
  2674. }
  2675. spin_unlock_irq(&current->sighand->siglock);
  2676. }
  2677. EXPORT_SYMBOL(kernel_sigaction);
  2678. void __weak sigaction_compat_abi(struct k_sigaction *act,
  2679. struct k_sigaction *oact)
  2680. {
  2681. }
  2682. int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
  2683. {
  2684. struct task_struct *p = current, *t;
  2685. struct k_sigaction *k;
  2686. sigset_t mask;
  2687. if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
  2688. return -EINVAL;
  2689. k = &p->sighand->action[sig-1];
  2690. spin_lock_irq(&p->sighand->siglock);
  2691. if (oact)
  2692. *oact = *k;
  2693. sigaction_compat_abi(act, oact);
  2694. if (act) {
  2695. sigdelsetmask(&act->sa.sa_mask,
  2696. sigmask(SIGKILL) | sigmask(SIGSTOP));
  2697. *k = *act;
  2698. /*
  2699. * POSIX 3.3.1.3:
  2700. * "Setting a signal action to SIG_IGN for a signal that is
  2701. * pending shall cause the pending signal to be discarded,
  2702. * whether or not it is blocked."
  2703. *
  2704. * "Setting a signal action to SIG_DFL for a signal that is
  2705. * pending and whose default action is to ignore the signal
  2706. * (for example, SIGCHLD), shall cause the pending signal to
  2707. * be discarded, whether or not it is blocked"
  2708. */
  2709. if (sig_handler_ignored(sig_handler(p, sig), sig)) {
  2710. sigemptyset(&mask);
  2711. sigaddset(&mask, sig);
  2712. flush_sigqueue_mask(&mask, &p->signal->shared_pending);
  2713. for_each_thread(p, t)
  2714. flush_sigqueue_mask(&mask, &t->pending);
  2715. }
  2716. }
  2717. spin_unlock_irq(&p->sighand->siglock);
  2718. return 0;
  2719. }
  2720. static int
  2721. do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
  2722. {
  2723. stack_t oss;
  2724. int error;
  2725. oss.ss_sp = (void __user *) current->sas_ss_sp;
  2726. oss.ss_size = current->sas_ss_size;
  2727. oss.ss_flags = sas_ss_flags(sp) |
  2728. (current->sas_ss_flags & SS_FLAG_BITS);
  2729. if (uss) {
  2730. void __user *ss_sp;
  2731. size_t ss_size;
  2732. unsigned ss_flags;
  2733. int ss_mode;
  2734. error = -EFAULT;
  2735. if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
  2736. goto out;
  2737. error = __get_user(ss_sp, &uss->ss_sp) |
  2738. __get_user(ss_flags, &uss->ss_flags) |
  2739. __get_user(ss_size, &uss->ss_size);
  2740. if (error)
  2741. goto out;
  2742. error = -EPERM;
  2743. if (on_sig_stack(sp))
  2744. goto out;
  2745. ss_mode = ss_flags & ~SS_FLAG_BITS;
  2746. error = -EINVAL;
  2747. if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
  2748. ss_mode != 0)
  2749. goto out;
  2750. if (ss_mode == SS_DISABLE) {
  2751. ss_size = 0;
  2752. ss_sp = NULL;
  2753. } else {
  2754. error = -ENOMEM;
  2755. if (ss_size < MINSIGSTKSZ)
  2756. goto out;
  2757. }
  2758. current->sas_ss_sp = (unsigned long) ss_sp;
  2759. current->sas_ss_size = ss_size;
  2760. current->sas_ss_flags = ss_flags;
  2761. }
  2762. error = 0;
  2763. if (uoss) {
  2764. error = -EFAULT;
  2765. if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
  2766. goto out;
  2767. error = __put_user(oss.ss_sp, &uoss->ss_sp) |
  2768. __put_user(oss.ss_size, &uoss->ss_size) |
  2769. __put_user(oss.ss_flags, &uoss->ss_flags);
  2770. }
  2771. out:
  2772. return error;
  2773. }
  2774. SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
  2775. {
  2776. return do_sigaltstack(uss, uoss, current_user_stack_pointer());
  2777. }
  2778. int restore_altstack(const stack_t __user *uss)
  2779. {
  2780. int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
  2781. /* squash all but EFAULT for now */
  2782. return err == -EFAULT ? err : 0;
  2783. }
  2784. int __save_altstack(stack_t __user *uss, unsigned long sp)
  2785. {
  2786. struct task_struct *t = current;
  2787. int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
  2788. __put_user(t->sas_ss_flags, &uss->ss_flags) |
  2789. __put_user(t->sas_ss_size, &uss->ss_size);
  2790. if (err)
  2791. return err;
  2792. if (t->sas_ss_flags & SS_AUTODISARM)
  2793. sas_ss_reset(t);
  2794. return 0;
  2795. }
  2796. #ifdef CONFIG_COMPAT
  2797. COMPAT_SYSCALL_DEFINE2(sigaltstack,
  2798. const compat_stack_t __user *, uss_ptr,
  2799. compat_stack_t __user *, uoss_ptr)
  2800. {
  2801. stack_t uss, uoss;
  2802. int ret;
  2803. mm_segment_t seg;
  2804. if (uss_ptr) {
  2805. compat_stack_t uss32;
  2806. memset(&uss, 0, sizeof(stack_t));
  2807. if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
  2808. return -EFAULT;
  2809. uss.ss_sp = compat_ptr(uss32.ss_sp);
  2810. uss.ss_flags = uss32.ss_flags;
  2811. uss.ss_size = uss32.ss_size;
  2812. }
  2813. seg = get_fs();
  2814. set_fs(KERNEL_DS);
  2815. ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
  2816. (stack_t __force __user *) &uoss,
  2817. compat_user_stack_pointer());
  2818. set_fs(seg);
  2819. if (ret >= 0 && uoss_ptr) {
  2820. if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
  2821. __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
  2822. __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
  2823. __put_user(uoss.ss_size, &uoss_ptr->ss_size))
  2824. ret = -EFAULT;
  2825. }
  2826. return ret;
  2827. }
  2828. int compat_restore_altstack(const compat_stack_t __user *uss)
  2829. {
  2830. int err = compat_sys_sigaltstack(uss, NULL);
  2831. /* squash all but -EFAULT for now */
  2832. return err == -EFAULT ? err : 0;
  2833. }
  2834. int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
  2835. {
  2836. int err;
  2837. struct task_struct *t = current;
  2838. err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
  2839. &uss->ss_sp) |
  2840. __put_user(t->sas_ss_flags, &uss->ss_flags) |
  2841. __put_user(t->sas_ss_size, &uss->ss_size);
  2842. if (err)
  2843. return err;
  2844. if (t->sas_ss_flags & SS_AUTODISARM)
  2845. sas_ss_reset(t);
  2846. return 0;
  2847. }
  2848. #endif
  2849. #ifdef __ARCH_WANT_SYS_SIGPENDING
  2850. /**
  2851. * sys_sigpending - examine pending signals
  2852. * @set: where mask of pending signal is returned
  2853. */
  2854. SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
  2855. {
  2856. return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
  2857. }
  2858. #endif
  2859. #ifdef __ARCH_WANT_SYS_SIGPROCMASK
  2860. /**
  2861. * sys_sigprocmask - examine and change blocked signals
  2862. * @how: whether to add, remove, or set signals
  2863. * @nset: signals to add or remove (if non-null)
  2864. * @oset: previous value of signal mask if non-null
  2865. *
  2866. * Some platforms have their own version with special arguments;
  2867. * others support only sys_rt_sigprocmask.
  2868. */
  2869. SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
  2870. old_sigset_t __user *, oset)
  2871. {
  2872. old_sigset_t old_set, new_set;
  2873. sigset_t new_blocked;
  2874. old_set = current->blocked.sig[0];
  2875. if (nset) {
  2876. if (copy_from_user(&new_set, nset, sizeof(*nset)))
  2877. return -EFAULT;
  2878. new_blocked = current->blocked;
  2879. switch (how) {
  2880. case SIG_BLOCK:
  2881. sigaddsetmask(&new_blocked, new_set);
  2882. break;
  2883. case SIG_UNBLOCK:
  2884. sigdelsetmask(&new_blocked, new_set);
  2885. break;
  2886. case SIG_SETMASK:
  2887. new_blocked.sig[0] = new_set;
  2888. break;
  2889. default:
  2890. return -EINVAL;
  2891. }
  2892. set_current_blocked(&new_blocked);
  2893. }
  2894. if (oset) {
  2895. if (copy_to_user(oset, &old_set, sizeof(*oset)))
  2896. return -EFAULT;
  2897. }
  2898. return 0;
  2899. }
  2900. #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
  2901. #ifndef CONFIG_ODD_RT_SIGACTION
  2902. /**
  2903. * sys_rt_sigaction - alter an action taken by a process
  2904. * @sig: signal to be sent
  2905. * @act: new sigaction
  2906. * @oact: used to save the previous sigaction
  2907. * @sigsetsize: size of sigset_t type
  2908. */
  2909. SYSCALL_DEFINE4(rt_sigaction, int, sig,
  2910. const struct sigaction __user *, act,
  2911. struct sigaction __user *, oact,
  2912. size_t, sigsetsize)
  2913. {
  2914. struct k_sigaction new_sa, old_sa;
  2915. int ret = -EINVAL;
  2916. /* XXX: Don't preclude handling different sized sigset_t's. */
  2917. if (sigsetsize != sizeof(sigset_t))
  2918. goto out;
  2919. if (act) {
  2920. if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
  2921. return -EFAULT;
  2922. }
  2923. ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
  2924. if (!ret && oact) {
  2925. if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
  2926. return -EFAULT;
  2927. }
  2928. out:
  2929. return ret;
  2930. }
  2931. #ifdef CONFIG_COMPAT
  2932. COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
  2933. const struct compat_sigaction __user *, act,
  2934. struct compat_sigaction __user *, oact,
  2935. compat_size_t, sigsetsize)
  2936. {
  2937. struct k_sigaction new_ka, old_ka;
  2938. compat_sigset_t mask;
  2939. #ifdef __ARCH_HAS_SA_RESTORER
  2940. compat_uptr_t restorer;
  2941. #endif
  2942. int ret;
  2943. /* XXX: Don't preclude handling different sized sigset_t's. */
  2944. if (sigsetsize != sizeof(compat_sigset_t))
  2945. return -EINVAL;
  2946. if (act) {
  2947. compat_uptr_t handler;
  2948. ret = get_user(handler, &act->sa_handler);
  2949. new_ka.sa.sa_handler = compat_ptr(handler);
  2950. #ifdef __ARCH_HAS_SA_RESTORER
  2951. ret |= get_user(restorer, &act->sa_restorer);
  2952. new_ka.sa.sa_restorer = compat_ptr(restorer);
  2953. #endif
  2954. ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
  2955. ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
  2956. if (ret)
  2957. return -EFAULT;
  2958. sigset_from_compat(&new_ka.sa.sa_mask, &mask);
  2959. }
  2960. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  2961. if (!ret && oact) {
  2962. sigset_to_compat(&mask, &old_ka.sa.sa_mask);
  2963. ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
  2964. &oact->sa_handler);
  2965. ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
  2966. ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
  2967. #ifdef __ARCH_HAS_SA_RESTORER
  2968. ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
  2969. &oact->sa_restorer);
  2970. #endif
  2971. }
  2972. return ret;
  2973. }
  2974. #endif
  2975. #endif /* !CONFIG_ODD_RT_SIGACTION */
  2976. #ifdef CONFIG_OLD_SIGACTION
  2977. SYSCALL_DEFINE3(sigaction, int, sig,
  2978. const struct old_sigaction __user *, act,
  2979. struct old_sigaction __user *, oact)
  2980. {
  2981. struct k_sigaction new_ka, old_ka;
  2982. int ret;
  2983. if (act) {
  2984. old_sigset_t mask;
  2985. if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
  2986. __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
  2987. __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
  2988. __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
  2989. __get_user(mask, &act->sa_mask))
  2990. return -EFAULT;
  2991. #ifdef __ARCH_HAS_KA_RESTORER
  2992. new_ka.ka_restorer = NULL;
  2993. #endif
  2994. siginitset(&new_ka.sa.sa_mask, mask);
  2995. }
  2996. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  2997. if (!ret && oact) {
  2998. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
  2999. __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
  3000. __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
  3001. __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
  3002. __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
  3003. return -EFAULT;
  3004. }
  3005. return ret;
  3006. }
  3007. #endif
  3008. #ifdef CONFIG_COMPAT_OLD_SIGACTION
  3009. COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
  3010. const struct compat_old_sigaction __user *, act,
  3011. struct compat_old_sigaction __user *, oact)
  3012. {
  3013. struct k_sigaction new_ka, old_ka;
  3014. int ret;
  3015. compat_old_sigset_t mask;
  3016. compat_uptr_t handler, restorer;
  3017. if (act) {
  3018. if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
  3019. __get_user(handler, &act->sa_handler) ||
  3020. __get_user(restorer, &act->sa_restorer) ||
  3021. __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
  3022. __get_user(mask, &act->sa_mask))
  3023. return -EFAULT;
  3024. #ifdef __ARCH_HAS_KA_RESTORER
  3025. new_ka.ka_restorer = NULL;
  3026. #endif
  3027. new_ka.sa.sa_handler = compat_ptr(handler);
  3028. new_ka.sa.sa_restorer = compat_ptr(restorer);
  3029. siginitset(&new_ka.sa.sa_mask, mask);
  3030. }
  3031. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  3032. if (!ret && oact) {
  3033. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
  3034. __put_user(ptr_to_compat(old_ka.sa.sa_handler),
  3035. &oact->sa_handler) ||
  3036. __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
  3037. &oact->sa_restorer) ||
  3038. __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
  3039. __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
  3040. return -EFAULT;
  3041. }
  3042. return ret;
  3043. }
  3044. #endif
  3045. #ifdef CONFIG_SGETMASK_SYSCALL
  3046. /*
  3047. * For backwards compatibility. Functionality superseded by sigprocmask.
  3048. */
  3049. SYSCALL_DEFINE0(sgetmask)
  3050. {
  3051. /* SMP safe */
  3052. return current->blocked.sig[0];
  3053. }
  3054. SYSCALL_DEFINE1(ssetmask, int, newmask)
  3055. {
  3056. int old = current->blocked.sig[0];
  3057. sigset_t newset;
  3058. siginitset(&newset, newmask);
  3059. set_current_blocked(&newset);
  3060. return old;
  3061. }
  3062. #endif /* CONFIG_SGETMASK_SYSCALL */
  3063. #ifdef __ARCH_WANT_SYS_SIGNAL
  3064. /*
  3065. * For backwards compatibility. Functionality superseded by sigaction.
  3066. */
  3067. SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
  3068. {
  3069. struct k_sigaction new_sa, old_sa;
  3070. int ret;
  3071. new_sa.sa.sa_handler = handler;
  3072. new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
  3073. sigemptyset(&new_sa.sa.sa_mask);
  3074. ret = do_sigaction(sig, &new_sa, &old_sa);
  3075. return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
  3076. }
  3077. #endif /* __ARCH_WANT_SYS_SIGNAL */
  3078. #ifdef __ARCH_WANT_SYS_PAUSE
  3079. SYSCALL_DEFINE0(pause)
  3080. {
  3081. while (!signal_pending(current)) {
  3082. __set_current_state(TASK_INTERRUPTIBLE);
  3083. schedule();
  3084. }
  3085. return -ERESTARTNOHAND;
  3086. }
  3087. #endif
  3088. static int sigsuspend(sigset_t *set)
  3089. {
  3090. current->saved_sigmask = current->blocked;
  3091. set_current_blocked(set);
  3092. while (!signal_pending(current)) {
  3093. __set_current_state(TASK_INTERRUPTIBLE);
  3094. schedule();
  3095. }
  3096. set_restore_sigmask();
  3097. return -ERESTARTNOHAND;
  3098. }
  3099. /**
  3100. * sys_rt_sigsuspend - replace the signal mask for a value with the
  3101. * @unewset value until a signal is received
  3102. * @unewset: new signal mask value
  3103. * @sigsetsize: size of sigset_t type
  3104. */
  3105. SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
  3106. {
  3107. sigset_t newset;
  3108. /* XXX: Don't preclude handling different sized sigset_t's. */
  3109. if (sigsetsize != sizeof(sigset_t))
  3110. return -EINVAL;
  3111. if (copy_from_user(&newset, unewset, sizeof(newset)))
  3112. return -EFAULT;
  3113. return sigsuspend(&newset);
  3114. }
  3115. #ifdef CONFIG_COMPAT
  3116. COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
  3117. {
  3118. #ifdef __BIG_ENDIAN
  3119. sigset_t newset;
  3120. compat_sigset_t newset32;
  3121. /* XXX: Don't preclude handling different sized sigset_t's. */
  3122. if (sigsetsize != sizeof(sigset_t))
  3123. return -EINVAL;
  3124. if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
  3125. return -EFAULT;
  3126. sigset_from_compat(&newset, &newset32);
  3127. return sigsuspend(&newset);
  3128. #else
  3129. /* on little-endian bitmaps don't care about granularity */
  3130. return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
  3131. #endif
  3132. }
  3133. #endif
  3134. #ifdef CONFIG_OLD_SIGSUSPEND
  3135. SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
  3136. {
  3137. sigset_t blocked;
  3138. siginitset(&blocked, mask);
  3139. return sigsuspend(&blocked);
  3140. }
  3141. #endif
  3142. #ifdef CONFIG_OLD_SIGSUSPEND3
  3143. SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
  3144. {
  3145. sigset_t blocked;
  3146. siginitset(&blocked, mask);
  3147. return sigsuspend(&blocked);
  3148. }
  3149. #endif
  3150. __weak const char *arch_vma_name(struct vm_area_struct *vma)
  3151. {
  3152. return NULL;
  3153. }
  3154. void __init signals_init(void)
  3155. {
  3156. /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
  3157. BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
  3158. != offsetof(struct siginfo, _sifields._pad));
  3159. sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
  3160. }
  3161. #ifdef CONFIG_KGDB_KDB
  3162. #include <linux/kdb.h>
  3163. /*
  3164. * kdb_send_sig_info - Allows kdb to send signals without exposing
  3165. * signal internals. This function checks if the required locks are
  3166. * available before calling the main signal code, to avoid kdb
  3167. * deadlocks.
  3168. */
  3169. void
  3170. kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
  3171. {
  3172. static struct task_struct *kdb_prev_t;
  3173. int sig, new_t;
  3174. if (!spin_trylock(&t->sighand->siglock)) {
  3175. kdb_printf("Can't do kill command now.\n"
  3176. "The sigmask lock is held somewhere else in "
  3177. "kernel, try again later\n");
  3178. return;
  3179. }
  3180. spin_unlock(&t->sighand->siglock);
  3181. new_t = kdb_prev_t != t;
  3182. kdb_prev_t = t;
  3183. if (t->state != TASK_RUNNING && new_t) {
  3184. kdb_printf("Process is not RUNNING, sending a signal from "
  3185. "kdb risks deadlock\n"
  3186. "on the run queue locks. "
  3187. "The signal has _not_ been sent.\n"
  3188. "Reissue the kill command if you want to risk "
  3189. "the deadlock.\n");
  3190. return;
  3191. }
  3192. sig = info->si_signo;
  3193. if (send_sig_info(sig, info, t))
  3194. kdb_printf("Fail to deliver Signal %d to process %d.\n",
  3195. sig, t->pid);
  3196. else
  3197. kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
  3198. }
  3199. #endif /* CONFIG_KGDB_KDB */