12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397 |
- /*
- * linux/fs/pipe.c
- *
- * Copyright (C) 1991, 1992, 1999 Linus Torvalds
- */
- #include <linux/mm.h>
- #include <linux/file.h>
- #include <linux/poll.h>
- #include <linux/slab.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/fs.h>
- #include <linux/log2.h>
- #include <linux/mount.h>
- #include <linux/magic.h>
- #include <linux/pipe_fs_i.h>
- #include <linux/uio.h>
- #include <linux/highmem.h>
- #include <linux/pagemap.h>
- #include <linux/audit.h>
- #include <linux/syscalls.h>
- #include <linux/fcntl.h>
- #include <asm/uaccess.h>
- #include <asm/ioctls.h>
- /*
- * The max size that a non-root user is allowed to grow the pipe. Can
- * be set by root in /proc/sys/fs/pipe-max-size
- */
- unsigned int pipe_max_size = 1048576;
- /*
- * Minimum pipe size, as required by POSIX
- */
- unsigned int pipe_min_size = PAGE_SIZE;
- /* Maximum allocatable pages per user. Hard limit is unset by default, soft
- * matches default values.
- */
- unsigned long pipe_user_pages_hard;
- unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
- /*
- * We use a start+len construction, which provides full use of the
- * allocated memory.
- * -- Florian Coosmann (FGC)
- *
- * Reads with count = 0 should always return 0.
- * -- Julian Bradfield 1999-06-07.
- *
- * FIFOs and Pipes now generate SIGIO for both readers and writers.
- * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
- *
- * pipe_read & write cleanup
- * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
- */
- static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
- {
- if (pipe->inode)
- mutex_lock_nested(&pipe->inode->i_mutex, subclass);
- }
- void pipe_lock(struct pipe_inode_info *pipe)
- {
- /*
- * pipe_lock() nests non-pipe inode locks (for writing to a file)
- */
- pipe_lock_nested(pipe, I_MUTEX_PARENT);
- }
- EXPORT_SYMBOL(pipe_lock);
- void pipe_unlock(struct pipe_inode_info *pipe)
- {
- if (pipe->inode)
- mutex_unlock(&pipe->inode->i_mutex);
- }
- EXPORT_SYMBOL(pipe_unlock);
- void pipe_double_lock(struct pipe_inode_info *pipe1,
- struct pipe_inode_info *pipe2)
- {
- BUG_ON(pipe1 == pipe2);
- if (pipe1 < pipe2) {
- pipe_lock_nested(pipe1, I_MUTEX_PARENT);
- pipe_lock_nested(pipe2, I_MUTEX_CHILD);
- } else {
- pipe_lock_nested(pipe2, I_MUTEX_PARENT);
- pipe_lock_nested(pipe1, I_MUTEX_CHILD);
- }
- }
- /* Drop the inode semaphore and wait for a pipe event, atomically */
- void pipe_wait(struct pipe_inode_info *pipe)
- {
- DEFINE_WAIT(wait);
- /*
- * Pipes are system-local resources, so sleeping on them
- * is considered a noninteractive wait:
- */
- prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
- pipe_unlock(pipe);
- schedule();
- finish_wait(&pipe->wait, &wait);
- pipe_lock(pipe);
- }
- static int
- pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
- size_t *remaining, int atomic)
- {
- unsigned long copy;
- while (*remaining > 0) {
- while (!iov->iov_len)
- iov++;
- copy = min_t(unsigned long, *remaining, iov->iov_len);
- if (atomic) {
- if (__copy_from_user_inatomic(addr + *offset,
- iov->iov_base, copy))
- return -EFAULT;
- } else {
- if (copy_from_user(addr + *offset,
- iov->iov_base, copy))
- return -EFAULT;
- }
- *offset += copy;
- *remaining -= copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
- return 0;
- }
- static int
- pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
- size_t *remaining, int atomic)
- {
- unsigned long copy;
- while (*remaining > 0) {
- while (!iov->iov_len)
- iov++;
- copy = min_t(unsigned long, *remaining, iov->iov_len);
- if (atomic) {
- if (__copy_to_user_inatomic(iov->iov_base,
- addr + *offset, copy))
- return -EFAULT;
- } else {
- if (copy_to_user(iov->iov_base,
- addr + *offset, copy))
- return -EFAULT;
- }
- *offset += copy;
- *remaining -= copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
- return 0;
- }
- /*
- * Attempt to pre-fault in the user memory, so we can use atomic copies.
- * Returns the number of bytes not faulted in.
- */
- static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
- {
- while (!iov->iov_len)
- iov++;
- while (len > 0) {
- unsigned long this_len;
- this_len = min_t(unsigned long, len, iov->iov_len);
- if (fault_in_pages_writeable(iov->iov_base, this_len))
- break;
- len -= this_len;
- iov++;
- }
- return len;
- }
- /*
- * Pre-fault in the user memory, so we can use atomic copies.
- */
- static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
- {
- while (!iov->iov_len)
- iov++;
- while (len > 0) {
- unsigned long this_len;
- this_len = min_t(unsigned long, len, iov->iov_len);
- fault_in_pages_readable(iov->iov_base, this_len);
- len -= this_len;
- iov++;
- }
- }
- static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
- {
- struct page *page = buf->page;
- /*
- * If nobody else uses this page, and we don't already have a
- * temporary page, let's keep track of it as a one-deep
- * allocation cache. (Otherwise just release our reference to it)
- */
- if (page_count(page) == 1 && !pipe->tmp_page)
- pipe->tmp_page = page;
- else
- page_cache_release(page);
- }
- /**
- * generic_pipe_buf_map - virtually map a pipe buffer
- * @pipe: the pipe that the buffer belongs to
- * @buf: the buffer that should be mapped
- * @atomic: whether to use an atomic map
- *
- * Description:
- * This function returns a kernel virtual address mapping for the
- * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
- * and the caller has to be careful not to fault before calling
- * the unmap function.
- *
- * Note that this function occupies KM_USER0 if @atomic != 0.
- */
- void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf, int atomic)
- {
- if (atomic) {
- buf->flags |= PIPE_BUF_FLAG_ATOMIC;
- return kmap_atomic(buf->page);
- }
- return kmap(buf->page);
- }
- EXPORT_SYMBOL(generic_pipe_buf_map);
- /**
- * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
- * @pipe: the pipe that the buffer belongs to
- * @buf: the buffer that should be unmapped
- * @map_data: the data that the mapping function returned
- *
- * Description:
- * This function undoes the mapping that ->map() provided.
- */
- void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf, void *map_data)
- {
- if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
- buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
- kunmap_atomic(map_data);
- } else
- kunmap(buf->page);
- }
- EXPORT_SYMBOL(generic_pipe_buf_unmap);
- /**
- * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
- * @pipe: the pipe that the buffer belongs to
- * @buf: the buffer to attempt to steal
- *
- * Description:
- * This function attempts to steal the &struct page attached to
- * @buf. If successful, this function returns 0 and returns with
- * the page locked. The caller may then reuse the page for whatever
- * he wishes; the typical use is insertion into a different file
- * page cache.
- */
- int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
- {
- struct page *page = buf->page;
- /*
- * A reference of one is golden, that means that the owner of this
- * page is the only one holding a reference to it. lock the page
- * and return OK.
- */
- if (page_count(page) == 1) {
- lock_page(page);
- return 0;
- }
- return 1;
- }
- EXPORT_SYMBOL(generic_pipe_buf_steal);
- /**
- * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
- * @pipe: the pipe that the buffer belongs to
- * @buf: the buffer to get a reference to
- *
- * Description:
- * This function grabs an extra reference to @buf. It's used in
- * in the tee() system call, when we duplicate the buffers in one
- * pipe into another.
- */
- void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
- {
- page_cache_get(buf->page);
- }
- EXPORT_SYMBOL(generic_pipe_buf_get);
- /**
- * generic_pipe_buf_confirm - verify contents of the pipe buffer
- * @info: the pipe that the buffer belongs to
- * @buf: the buffer to confirm
- *
- * Description:
- * This function does nothing, because the generic pipe code uses
- * pages that are always good when inserted into the pipe.
- */
- int generic_pipe_buf_confirm(struct pipe_inode_info *info,
- struct pipe_buffer *buf)
- {
- return 0;
- }
- EXPORT_SYMBOL(generic_pipe_buf_confirm);
- /**
- * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
- * @pipe: the pipe that the buffer belongs to
- * @buf: the buffer to put a reference to
- *
- * Description:
- * This function releases a reference to @buf.
- */
- void generic_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
- {
- page_cache_release(buf->page);
- }
- EXPORT_SYMBOL(generic_pipe_buf_release);
- static const struct pipe_buf_operations anon_pipe_buf_ops = {
- .can_merge = 1,
- .map = generic_pipe_buf_map,
- .unmap = generic_pipe_buf_unmap,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
- .get = generic_pipe_buf_get,
- };
- static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
- .can_merge = 0,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
- .get = generic_pipe_buf_get,
- };
- static const struct pipe_buf_operations packet_pipe_buf_ops = {
- .can_merge = 0,
- .map = generic_pipe_buf_map,
- .unmap = generic_pipe_buf_unmap,
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
- .get = generic_pipe_buf_get,
- };
- void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
- {
- if (buf->ops == &anon_pipe_buf_ops)
- buf->ops = &anon_pipe_buf_nomerge_ops;
- }
- static ssize_t
- pipe_read(struct kiocb *iocb, const struct iovec *_iov,
- unsigned long nr_segs, loff_t pos)
- {
- struct file *filp = iocb->ki_filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
- struct pipe_inode_info *pipe;
- int do_wakeup;
- ssize_t ret;
- struct iovec *iov = (struct iovec *)_iov;
- size_t total_len;
- total_len = iov_length(iov, nr_segs);
- /* Null read succeeds. */
- if (unlikely(total_len == 0))
- return 0;
- do_wakeup = 0;
- ret = 0;
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
- for (;;) {
- int bufs = pipe->nrbufs;
- if (bufs) {
- int curbuf = pipe->curbuf;
- struct pipe_buffer *buf = pipe->bufs + curbuf;
- const struct pipe_buf_operations *ops = buf->ops;
- void *addr;
- size_t chars = buf->len, remaining;
- int error, atomic;
- int offset;
- if (chars > total_len)
- chars = total_len;
- error = ops->confirm(pipe, buf);
- if (error) {
- if (!ret)
- ret = error;
- break;
- }
- atomic = !iov_fault_in_pages_write(iov, chars);
- remaining = chars;
- offset = buf->offset;
- redo:
- addr = ops->map(pipe, buf, atomic);
- error = pipe_iov_copy_to_user(iov, addr, &offset,
- &remaining, atomic);
- ops->unmap(pipe, buf, addr);
- if (unlikely(error)) {
- /*
- * Just retry with the slow path if we failed.
- */
- if (atomic) {
- atomic = 0;
- goto redo;
- }
- if (!ret)
- ret = error;
- break;
- }
- ret += chars;
- buf->offset += chars;
- buf->len -= chars;
- /* Was it a packet buffer? Clean up and exit */
- if (buf->flags & PIPE_BUF_FLAG_PACKET) {
- total_len = chars;
- buf->len = 0;
- }
- if (!buf->len) {
- buf->ops = NULL;
- ops->release(pipe, buf);
- curbuf = (curbuf + 1) & (pipe->buffers - 1);
- pipe->curbuf = curbuf;
- pipe->nrbufs = --bufs;
- do_wakeup = 1;
- }
- total_len -= chars;
- if (!total_len)
- break; /* common path: read succeeded */
- }
- if (bufs) /* More to do? */
- continue;
- if (!pipe->writers)
- break;
- if (!pipe->waiting_writers) {
- /* syscall merging: Usually we must not sleep
- * if O_NONBLOCK is set, or if we got some data.
- * But if a writer sleeps in kernel space, then
- * we can wait for that data without violating POSIX.
- */
- if (ret)
- break;
- if (filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
- }
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
- pipe_wait(pipe);
- }
- mutex_unlock(&inode->i_mutex);
- /* Signal writers asynchronously that there is more room. */
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
- if (ret > 0)
- file_accessed(filp);
- return ret;
- }
- static inline int is_packetized(struct file *file)
- {
- return (file->f_flags & O_DIRECT) != 0;
- }
- static ssize_t
- pipe_write(struct kiocb *iocb, const struct iovec *_iov,
- unsigned long nr_segs, loff_t ppos)
- {
- struct file *filp = iocb->ki_filp;
- struct inode *inode = filp->f_path.dentry->d_inode;
- struct pipe_inode_info *pipe;
- ssize_t ret;
- int do_wakeup;
- struct iovec *iov = (struct iovec *)_iov;
- size_t total_len;
- ssize_t chars;
- total_len = iov_length(iov, nr_segs);
- /* Null write succeeds. */
- if (unlikely(total_len == 0))
- return 0;
- do_wakeup = 0;
- ret = 0;
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- ret = -EPIPE;
- goto out;
- }
- /* We try to merge small writes */
- chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
- if (pipe->nrbufs && chars != 0) {
- int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
- (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + lastbuf;
- const struct pipe_buf_operations *ops = buf->ops;
- int offset = buf->offset + buf->len;
- if (ops->can_merge && offset + chars <= PAGE_SIZE) {
- int error, atomic = 1;
- void *addr;
- size_t remaining = chars;
- error = ops->confirm(pipe, buf);
- if (error)
- goto out;
- iov_fault_in_pages_read(iov, chars);
- redo1:
- addr = ops->map(pipe, buf, atomic);
- error = pipe_iov_copy_from_user(addr, &offset, iov,
- &remaining, atomic);
- ops->unmap(pipe, buf, addr);
- ret = error;
- do_wakeup = 1;
- if (error) {
- if (atomic) {
- atomic = 0;
- goto redo1;
- }
- goto out;
- }
- buf->len += chars;
- total_len -= chars;
- ret = chars;
- if (!total_len)
- goto out;
- }
- }
- for (;;) {
- int bufs;
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
- break;
- }
- bufs = pipe->nrbufs;
- if (bufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
- struct page *page = pipe->tmp_page;
- char *src;
- int error, atomic = 1;
- int offset = 0;
- size_t remaining;
- if (!page) {
- page = alloc_page(GFP_HIGHUSER);
- if (unlikely(!page)) {
- ret = ret ? : -ENOMEM;
- break;
- }
- pipe->tmp_page = page;
- }
- /* Always wake up, even if the copy fails. Otherwise
- * we lock up (O_NONBLOCK-)readers that sleep due to
- * syscall merging.
- * FIXME! Is this really true?
- */
- do_wakeup = 1;
- chars = PAGE_SIZE;
- if (chars > total_len)
- chars = total_len;
- iov_fault_in_pages_read(iov, chars);
- remaining = chars;
- redo2:
- if (atomic)
- src = kmap_atomic(page);
- else
- src = kmap(page);
- error = pipe_iov_copy_from_user(src, &offset, iov,
- &remaining, atomic);
- if (atomic)
- kunmap_atomic(src);
- else
- kunmap(page);
- if (unlikely(error)) {
- if (atomic) {
- atomic = 0;
- goto redo2;
- }
- if (!ret)
- ret = error;
- break;
- }
- ret += chars;
- /* Insert it into the buffer array */
- buf->page = page;
- buf->ops = &anon_pipe_buf_ops;
- buf->offset = 0;
- buf->len = chars;
- buf->flags = 0;
- if (is_packetized(filp)) {
- buf->ops = &packet_pipe_buf_ops;
- buf->flags = PIPE_BUF_FLAG_PACKET;
- }
- pipe->nrbufs = ++bufs;
- pipe->tmp_page = NULL;
- total_len -= chars;
- if (!total_len)
- break;
- }
- if (bufs < pipe->buffers)
- continue;
- if (filp->f_flags & O_NONBLOCK) {
- if (!ret)
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
- pipe->waiting_writers++;
- pipe_wait(pipe);
- pipe->waiting_writers--;
- }
- out:
- mutex_unlock(&inode->i_mutex);
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- }
- if (ret > 0) {
- int err = file_update_time(filp);
- if (err)
- ret = err;
- }
- return ret;
- }
- static ssize_t
- bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
- {
- return -EBADF;
- }
- static ssize_t
- bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
- {
- return -EBADF;
- }
- static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- struct inode *inode = filp->f_path.dentry->d_inode;
- struct pipe_inode_info *pipe;
- int count, buf, nrbufs;
- switch (cmd) {
- case FIONREAD:
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
- count = 0;
- buf = pipe->curbuf;
- nrbufs = pipe->nrbufs;
- while (--nrbufs >= 0) {
- count += pipe->bufs[buf].len;
- buf = (buf+1) & (pipe->buffers - 1);
- }
- mutex_unlock(&inode->i_mutex);
- return put_user(count, (int __user *)arg);
- default:
- return -ENOIOCTLCMD;
- }
- }
- /* No kernel lock held - fine */
- static unsigned int
- pipe_poll(struct file *filp, poll_table *wait)
- {
- unsigned int mask;
- struct inode *inode = filp->f_path.dentry->d_inode;
- struct pipe_inode_info *pipe = inode->i_pipe;
- int nrbufs;
- poll_wait(filp, &pipe->wait, wait);
- /* Reading only -- no need for acquiring the semaphore. */
- nrbufs = pipe->nrbufs;
- mask = 0;
- if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
- if (!pipe->writers && filp->f_version != pipe->w_counter)
- mask |= POLLHUP;
- }
- if (filp->f_mode & FMODE_WRITE) {
- mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
- /*
- * Most Unices do not set POLLERR for FIFOs but on Linux they
- * behave exactly like pipes for poll().
- */
- if (!pipe->readers)
- mask |= POLLERR;
- }
- return mask;
- }
- static int
- pipe_release(struct inode *inode, int decr, int decw)
- {
- struct pipe_inode_info *pipe;
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
- pipe->readers -= decr;
- pipe->writers -= decw;
- if (!pipe->readers && !pipe->writers) {
- free_pipe_info(inode);
- } else {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
- mutex_unlock(&inode->i_mutex);
- return 0;
- }
- static int
- pipe_read_fasync(int fd, struct file *filp, int on)
- {
- struct inode *inode = filp->f_path.dentry->d_inode;
- int retval;
- mutex_lock(&inode->i_mutex);
- retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
- mutex_unlock(&inode->i_mutex);
- return retval;
- }
- static int
- pipe_write_fasync(int fd, struct file *filp, int on)
- {
- struct inode *inode = filp->f_path.dentry->d_inode;
- int retval;
- mutex_lock(&inode->i_mutex);
- retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
- mutex_unlock(&inode->i_mutex);
- return retval;
- }
- static int
- pipe_rdwr_fasync(int fd, struct file *filp, int on)
- {
- struct inode *inode = filp->f_path.dentry->d_inode;
- struct pipe_inode_info *pipe = inode->i_pipe;
- int retval;
- mutex_lock(&inode->i_mutex);
- retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
- if (retval >= 0) {
- retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
- if (retval < 0) /* this can happen only if on == T */
- fasync_helper(-1, filp, 0, &pipe->fasync_readers);
- }
- mutex_unlock(&inode->i_mutex);
- return retval;
- }
- static int
- pipe_read_release(struct inode *inode, struct file *filp)
- {
- return pipe_release(inode, 1, 0);
- }
- static int
- pipe_write_release(struct inode *inode, struct file *filp)
- {
- return pipe_release(inode, 0, 1);
- }
- static int
- pipe_rdwr_release(struct inode *inode, struct file *filp)
- {
- int decr, decw;
- decr = (filp->f_mode & FMODE_READ) != 0;
- decw = (filp->f_mode & FMODE_WRITE) != 0;
- return pipe_release(inode, decr, decw);
- }
- static int
- pipe_read_open(struct inode *inode, struct file *filp)
- {
- int ret = -ENOENT;
- mutex_lock(&inode->i_mutex);
- if (inode->i_pipe) {
- ret = 0;
- inode->i_pipe->readers++;
- }
- mutex_unlock(&inode->i_mutex);
- return ret;
- }
- static int
- pipe_write_open(struct inode *inode, struct file *filp)
- {
- int ret = -ENOENT;
- mutex_lock(&inode->i_mutex);
- if (inode->i_pipe) {
- ret = 0;
- inode->i_pipe->writers++;
- }
- mutex_unlock(&inode->i_mutex);
- return ret;
- }
- static int
- pipe_rdwr_open(struct inode *inode, struct file *filp)
- {
- int ret = -ENOENT;
- if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
- return -EINVAL;
- mutex_lock(&inode->i_mutex);
- if (inode->i_pipe) {
- ret = 0;
- if (filp->f_mode & FMODE_READ)
- inode->i_pipe->readers++;
- if (filp->f_mode & FMODE_WRITE)
- inode->i_pipe->writers++;
- }
- mutex_unlock(&inode->i_mutex);
- return ret;
- }
- /*
- * The file_operations structs are not static because they
- * are also used in linux/fs/fifo.c to do operations on FIFOs.
- *
- * Pipes reuse fifos' file_operations structs.
- */
- const struct file_operations read_pipefifo_fops = {
- .llseek = no_llseek,
- .read = do_sync_read,
- .aio_read = pipe_read,
- .write = bad_pipe_w,
- .poll = pipe_poll,
- .unlocked_ioctl = pipe_ioctl,
- .open = pipe_read_open,
- .release = pipe_read_release,
- .fasync = pipe_read_fasync,
- };
- const struct file_operations write_pipefifo_fops = {
- .llseek = no_llseek,
- .read = bad_pipe_r,
- .write = do_sync_write,
- .aio_write = pipe_write,
- .poll = pipe_poll,
- .unlocked_ioctl = pipe_ioctl,
- .open = pipe_write_open,
- .release = pipe_write_release,
- .fasync = pipe_write_fasync,
- };
- const struct file_operations rdwr_pipefifo_fops = {
- .llseek = no_llseek,
- .read = do_sync_read,
- .aio_read = pipe_read,
- .write = do_sync_write,
- .aio_write = pipe_write,
- .poll = pipe_poll,
- .unlocked_ioctl = pipe_ioctl,
- .open = pipe_rdwr_open,
- .release = pipe_rdwr_release,
- .fasync = pipe_rdwr_fasync,
- };
- static void account_pipe_buffers(struct pipe_inode_info *pipe,
- unsigned long old, unsigned long new)
- {
- atomic_long_add(new - old, &pipe->user->pipe_bufs);
- }
- static bool too_many_pipe_buffers_soft(struct user_struct *user)
- {
- return pipe_user_pages_soft &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
- }
- static bool too_many_pipe_buffers_hard(struct user_struct *user)
- {
- return pipe_user_pages_hard &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
- }
- struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
- {
- struct pipe_inode_info *pipe;
- pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
- if (pipe) {
- unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
- struct user_struct *user = get_current_user();
- if (!too_many_pipe_buffers_hard(user)) {
- if (too_many_pipe_buffers_soft(user))
- pipe_bufs = 1;
- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
- }
- if (pipe->bufs) {
- init_waitqueue_head(&pipe->wait);
- pipe->r_counter = pipe->w_counter = 1;
- pipe->inode = inode;
- pipe->buffers = pipe_bufs;
- pipe->user = user;
- account_pipe_buffers(pipe, 0, pipe_bufs);
- return pipe;
- }
- free_uid(user);
- kfree(pipe);
- }
- return NULL;
- }
- void __free_pipe_info(struct pipe_inode_info *pipe)
- {
- int i;
- account_pipe_buffers(pipe, pipe->buffers, 0);
- free_uid(pipe->user);
- for (i = 0; i < pipe->buffers; i++) {
- struct pipe_buffer *buf = pipe->bufs + i;
- if (buf->ops)
- buf->ops->release(pipe, buf);
- }
- if (pipe->tmp_page)
- __free_page(pipe->tmp_page);
- kfree(pipe->bufs);
- kfree(pipe);
- }
- void free_pipe_info(struct inode *inode)
- {
- __free_pipe_info(inode->i_pipe);
- inode->i_pipe = NULL;
- }
- static struct vfsmount *pipe_mnt __read_mostly;
- /*
- * pipefs_dname() is called from d_path().
- */
- static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
- {
- return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
- dentry->d_inode->i_ino);
- }
- static const struct dentry_operations pipefs_dentry_operations = {
- .d_dname = pipefs_dname,
- };
- static struct inode * get_pipe_inode(void)
- {
- struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
- struct pipe_inode_info *pipe;
- if (!inode)
- goto fail_inode;
- inode->i_ino = get_next_ino();
- pipe = alloc_pipe_info(inode);
- if (!pipe)
- goto fail_iput;
- inode->i_pipe = pipe;
- pipe->readers = pipe->writers = 1;
- inode->i_fop = &rdwr_pipefifo_fops;
- /*
- * Mark the inode dirty from the very beginning,
- * that way it will never be moved to the dirty
- * list because "mark_inode_dirty()" will think
- * that it already _is_ on the dirty list.
- */
- inode->i_state = I_DIRTY;
- inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- return inode;
- fail_iput:
- iput(inode);
- fail_inode:
- return NULL;
- }
- struct file *create_write_pipe(int flags)
- {
- int err;
- struct inode *inode;
- struct file *f;
- struct path path;
- struct qstr name = { .name = "" };
- err = -ENFILE;
- inode = get_pipe_inode();
- if (!inode)
- goto err;
- err = -ENOMEM;
- path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
- if (!path.dentry)
- goto err_inode;
- path.mnt = mntget(pipe_mnt);
- d_instantiate(path.dentry, inode);
- err = -ENFILE;
- f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
- if (!f)
- goto err_dentry;
- f->f_mapping = inode->i_mapping;
- f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
- f->f_version = 0;
- return f;
- err_dentry:
- free_pipe_info(inode);
- path_put(&path);
- return ERR_PTR(err);
- err_inode:
- free_pipe_info(inode);
- iput(inode);
- err:
- return ERR_PTR(err);
- }
- void free_write_pipe(struct file *f)
- {
- free_pipe_info(f->f_dentry->d_inode);
- path_put(&f->f_path);
- put_filp(f);
- }
- struct file *create_read_pipe(struct file *wrf, int flags)
- {
- /* Grab pipe from the writer */
- struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
- &read_pipefifo_fops);
- if (!f)
- return ERR_PTR(-ENFILE);
- path_get(&wrf->f_path);
- f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
- return f;
- }
- int do_pipe_flags(int *fd, int flags)
- {
- struct file *fw, *fr;
- int error;
- int fdw, fdr;
- if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
- return -EINVAL;
- fw = create_write_pipe(flags);
- if (IS_ERR(fw))
- return PTR_ERR(fw);
- fr = create_read_pipe(fw, flags);
- error = PTR_ERR(fr);
- if (IS_ERR(fr))
- goto err_write_pipe;
- error = get_unused_fd_flags(flags);
- if (error < 0)
- goto err_read_pipe;
- fdr = error;
- error = get_unused_fd_flags(flags);
- if (error < 0)
- goto err_fdr;
- fdw = error;
- audit_fd_pair(fdr, fdw);
- fd_install(fdr, fr);
- fd_install(fdw, fw);
- fd[0] = fdr;
- fd[1] = fdw;
- return 0;
- err_fdr:
- put_unused_fd(fdr);
- err_read_pipe:
- path_put(&fr->f_path);
- put_filp(fr);
- err_write_pipe:
- free_write_pipe(fw);
- return error;
- }
- /*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way Unix traditionally does this, though.
- */
- SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
- {
- int fd[2];
- int error;
- error = do_pipe_flags(fd, flags);
- if (!error) {
- if (copy_to_user(fildes, fd, sizeof(fd))) {
- sys_close(fd[0]);
- sys_close(fd[1]);
- error = -EFAULT;
- }
- }
- return error;
- }
- SYSCALL_DEFINE1(pipe, int __user *, fildes)
- {
- return sys_pipe2(fildes, 0);
- }
- /*
- * Allocate a new array of pipe buffers and copy the info over. Returns the
- * pipe size if successful, or return -ERROR on error.
- */
- static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
- {
- struct pipe_buffer *bufs;
- /*
- * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
- * expect a lot of shrink+grow operations, just free and allocate
- * again like we would do for growing. If the pipe currently
- * contains more buffers than arg, then return busy.
- */
- if (nr_pages < pipe->nrbufs)
- return -EBUSY;
- bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
- if (unlikely(!bufs))
- return -ENOMEM;
- /*
- * The pipe array wraps around, so just start the new one at zero
- * and adjust the indexes.
- */
- if (pipe->nrbufs) {
- unsigned int tail;
- unsigned int head;
- tail = pipe->curbuf + pipe->nrbufs;
- if (tail < pipe->buffers)
- tail = 0;
- else
- tail &= (pipe->buffers - 1);
- head = pipe->nrbufs - tail;
- if (head)
- memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
- if (tail)
- memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
- }
- account_pipe_buffers(pipe, pipe->buffers, nr_pages);
- pipe->curbuf = 0;
- kfree(pipe->bufs);
- pipe->bufs = bufs;
- pipe->buffers = nr_pages;
- return nr_pages * PAGE_SIZE;
- }
- /*
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
- static inline unsigned int round_pipe_size(unsigned int size)
- {
- unsigned long nr_pages;
- nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
- }
- /*
- * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
- * will return an error.
- */
- int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
- size_t *lenp, loff_t *ppos)
- {
- int ret;
- ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
- if (ret < 0 || !write)
- return ret;
- pipe_max_size = round_pipe_size(pipe_max_size);
- return ret;
- }
- /*
- * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
- * location, so checking ->i_pipe is not enough to verify that this is a
- * pipe.
- */
- struct pipe_inode_info *get_pipe_info(struct file *file)
- {
- struct inode *i = file->f_path.dentry->d_inode;
- return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
- }
- long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
- {
- struct pipe_inode_info *pipe;
- long ret;
- pipe = get_pipe_info(file);
- if (!pipe)
- return -EBADF;
- mutex_lock(&pipe->inode->i_mutex);
- switch (cmd) {
- case F_SETPIPE_SZ: {
- unsigned int size, nr_pages;
- size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
- ret = -EINVAL;
- if (!nr_pages)
- goto out;
- if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
- ret = -EPERM;
- goto out;
- } else if ((too_many_pipe_buffers_hard(pipe->user) ||
- too_many_pipe_buffers_soft(pipe->user)) &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto out;
- }
- ret = pipe_set_size(pipe, nr_pages);
- break;
- }
- case F_GETPIPE_SZ:
- ret = pipe->buffers * PAGE_SIZE;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- out:
- mutex_unlock(&pipe->inode->i_mutex);
- return ret;
- }
- static const struct super_operations pipefs_ops = {
- .destroy_inode = free_inode_nonrcu,
- .statfs = simple_statfs,
- };
- /*
- * pipefs should _never_ be mounted by userland - too much of security hassle,
- * no real gain from having the whole whorehouse mounted. So we don't need
- * any operations on the root directory. However, we need a non-trivial
- * d_name - pipe: will go nicely and kill the special-casing in procfs.
- */
- static struct dentry *pipefs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
- {
- return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
- &pipefs_dentry_operations, PIPEFS_MAGIC);
- }
- static struct file_system_type pipe_fs_type = {
- .name = "pipefs",
- .mount = pipefs_mount,
- .kill_sb = kill_anon_super,
- };
- static int __init init_pipe_fs(void)
- {
- int err = register_filesystem(&pipe_fs_type);
- if (!err) {
- pipe_mnt = kern_mount(&pipe_fs_type);
- if (IS_ERR(pipe_mnt)) {
- err = PTR_ERR(pipe_mnt);
- unregister_filesystem(&pipe_fs_type);
- }
- }
- return err;
- }
- fs_initcall(init_pipe_fs);
|