summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/fs/sockfs/socksyscalls.c152
-rw-r--r--usr/src/uts/common/syscall/sendfile.c17
2 files changed, 74 insertions, 95 deletions
diff --git a/usr/src/uts/common/fs/sockfs/socksyscalls.c b/usr/src/uts/common/fs/sockfs/socksyscalls.c
index 2329f4837e..2d8515c776 100644
--- a/usr/src/uts/common/fs/sockfs/socksyscalls.c
+++ b/usr/src/uts/common/fs/sockfs/socksyscalls.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
@@ -2176,7 +2174,7 @@ typedef struct {
void
snf_smap_desbfree(snf_smap_desbinfo *snfi)
{
- if (!segmap_kpm) {
+ if (! IS_KPM_ADDR(snfi->snfi_base)) {
/*
* We don't need to call segmap_fault(F_SOFTUNLOCK) for
* segmap_kpm as long as the latter never falls back to
@@ -2196,9 +2194,8 @@ snf_smap_desbfree(snf_smap_desbinfo *snfi)
}
/*
- * Use segmap instead of bcopy to send down a chain of desballoca'ed, mblks.
- * Each mblk contains a segmap slot of no more than MAXBSIZE. The total
- * length of a chain is no more than sd_qn_maxpsz.
+ * Use segmap instead of bcopy to send down a desballoca'ed, mblk. The mblk
+ * contains a segmap slot of no more than MAXBSIZE.
*
* At the end of the whole sendfile() operation, we wait till the data from
* the last mblk is ack'ed by the transport before returning so that the
@@ -2206,13 +2203,13 @@ snf_smap_desbfree(snf_smap_desbinfo *snfi)
*/
int
snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
- uint_t maxpsz, ssize_t *count, boolean_t nowait)
+ ssize_t *count, boolean_t nowait)
{
caddr_t base;
int mapoff;
vnode_t *vp;
- mblk_t *mp, *mp1;
- int iosize, iosize1;
+ mblk_t *mp;
+ int iosize;
int error;
short fflag;
int ksize;
@@ -2228,80 +2225,69 @@ snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
error = EINTR;
break;
}
- iosize = 0;
- mp = NULL;
- do {
- mapoff = fileoff & MAXBOFFSET;
- iosize1 = MAXBSIZE - mapoff;
- if (iosize1 > size)
- iosize1 = size;
- /*
- * we don't forcefault because we'll call
- * segmap_fault(F_SOFTLOCK) next.
- *
- * S_READ will get the ref bit set (by either
- * segmap_getmapflt() or segmap_fault()) and page
- * shared locked.
- */
- base = segmap_getmapflt(segkmap, fvp, fileoff, iosize1,
- segmap_kpm ? SM_FAULT : 0, S_READ);
- snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP);
- snfi->snfi_len = (size_t)roundup(mapoff+iosize1,
- PAGESIZE)- (mapoff & PAGEMASK);
- /*
- * We must call segmap_fault() even for segmap_kpm
- * because that's how error gets returned.
- * (segmap_getmapflt() never fails but segmap_fault()
- * does.)
- */
- if (segmap_fault(kas.a_hat, segkmap,
- (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff) &
- PAGEMASK), snfi->snfi_len, F_SOFTLOCK,
- S_READ) != 0) {
- (void) segmap_release(segkmap, base, 0);
- kmem_free(snfi, sizeof (*snfi));
- freemsg(mp);
- error = EIO;
- goto out;
- }
- snfi->snfi_frtn.free_func = snf_smap_desbfree;
- snfi->snfi_frtn.free_arg = (caddr_t)snfi;
- snfi->snfi_base = base;
- snfi->snfi_mapoff = mapoff;
- mp1 = esballoca((uchar_t *)base + mapoff,
- iosize1, BPRI_HI, &snfi->snfi_frtn);
-
- if (mp1 == NULL) {
- (void) segmap_fault(kas.a_hat, segkmap,
- (caddr_t)(uintptr_t)(((uintptr_t)base +
- mapoff) & PAGEMASK), snfi->snfi_len,
- F_SOFTUNLOCK, S_OTHER);
- (void) segmap_release(segkmap, base, 0);
- kmem_free(snfi, sizeof (*snfi));
- freemsg(mp);
- error = EAGAIN;
- goto out;
- }
- VN_HOLD(fvp);
- snfi->snfi_vp = fvp;
- mp1->b_wptr += iosize1;
-
- /* Mark this dblk with the zero-copy flag */
- mp1->b_datap->db_struioflag |= STRUIO_ZC;
- if (mp == NULL)
- mp = mp1;
- else
- linkb(mp, mp1);
- iosize += iosize1;
- fileoff += iosize1;
- size -= iosize1;
- } while (iosize < maxpsz && size != 0);
+ mapoff = fileoff & MAXBOFFSET;
+ iosize = MAXBSIZE - mapoff;
+ if (iosize > size)
+ iosize = size;
+ /*
+ * we don't forcefault because we'll call
+ * segmap_fault(F_SOFTLOCK) next.
+ *
+ * S_READ will get the ref bit set (by either
+ * segmap_getmapflt() or segmap_fault()) and page
+ * shared locked.
+ */
+ base = segmap_getmapflt(segkmap, fvp, fileoff, iosize,
+ segmap_kpm ? SM_FAULT : 0, S_READ);
+
+ snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP);
+ snfi->snfi_len = (size_t)roundup(mapoff+iosize,
+ PAGESIZE)- (mapoff & PAGEMASK);
+ /*
+ * We must call segmap_fault() even for segmap_kpm
+ * because that's how error gets returned.
+ * (segmap_getmapflt() never fails but segmap_fault()
+ * does.)
+ */
+ if (segmap_fault(kas.a_hat, segkmap,
+ (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff) & PAGEMASK),
+ snfi->snfi_len, F_SOFTLOCK, S_READ) != 0) {
+ (void) segmap_release(segkmap, base, 0);
+ kmem_free(snfi, sizeof (*snfi));
+ error = EIO;
+ goto out;
+ }
+ snfi->snfi_frtn.free_func = snf_smap_desbfree;
+ snfi->snfi_frtn.free_arg = (caddr_t)snfi;
+ snfi->snfi_base = base;
+ snfi->snfi_mapoff = mapoff;
+ mp = esballoca((uchar_t *)base + mapoff, iosize, BPRI_HI,
+ &snfi->snfi_frtn);
+
+ if (mp == NULL) {
+ (void) segmap_fault(kas.a_hat, segkmap,
+ (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff)
+ & PAGEMASK), snfi->snfi_len, F_SOFTUNLOCK, S_OTHER);
+ (void) segmap_release(segkmap, base, 0);
+ kmem_free(snfi, sizeof (*snfi));
+ freemsg(mp);
+ error = EAGAIN;
+ goto out;
+ }
+ VN_HOLD(fvp);
+ snfi->snfi_vp = fvp;
+ mp->b_wptr += iosize;
+
+ /* Mark this dblk with the zero-copy flag */
+ mp->b_datap->db_struioflag |= STRUIO_ZC;
+ fileoff += iosize;
+ size -= iosize;
if (size == 0 && !nowait) {
ASSERT(!dowait);
dowait = B_TRUE;
- mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
+ mp->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
}
VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
@@ -2511,10 +2497,6 @@ sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv,
vp = fp->f_vnode;
stp = vp->v_stream;
- if (stp->sd_qn_maxpsz == INFPSZ)
- maxpsz = maxphys;
- else
- maxpsz = roundup(stp->sd_qn_maxpsz, MAXBSIZE);
/*
* When the NOWAIT flag is not set, we enable zero-copy only if the
* transfer size is large enough. This prevents performance loss
@@ -2536,8 +2518,12 @@ sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv,
if (dozcopy) {
sf_stats.ss_file_segmap++;
error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len,
- maxpsz, &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0));
+ &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0));
} else {
+ if (stp->sd_qn_maxpsz == INFPSZ)
+ maxpsz = maxphys;
+ else
+ maxpsz = roundup(stp->sd_qn_maxpsz, MAXBSIZE);
sf_stats.ss_file_cached++;
error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len,
maxpsz, &count);
diff --git a/usr/src/uts/common/syscall/sendfile.c b/usr/src/uts/common/syscall/sendfile.c
index 8c41e03599..5883ba7700 100644
--- a/usr/src/uts/common/syscall/sendfile.c
+++ b/usr/src/uts/common/syscall/sendfile.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
@@ -75,8 +73,8 @@ extern int sosendfile64(file_t *, file_t *, const struct ksendfilevec64 *,
ssize32_t *);
extern int nl7c_sendfilev(struct sonode *, u_offset_t *, struct sendfilevec *,
int, ssize_t *);
-extern int snf_segmap(file_t *, vnode_t *, u_offset_t, u_offset_t, uint_t,
- ssize_t *, boolean_t);
+extern int snf_segmap(file_t *, vnode_t *, u_offset_t, u_offset_t, ssize_t *,
+ boolean_t);
#define readflg (V_WRITELOCK_FALSE)
#define rwflag (V_WRITELOCK_TRUE)
@@ -975,16 +973,11 @@ sendvec_chunk(file_t *fp, u_offset_t *fileoff, struct sendfilevec *sfv,
if (segmapit) {
boolean_t nowait;
- uint_t maxpsz;
nowait = (sfv->sfv_flag & SFV_NOWAIT) != 0;
- maxpsz = stp->sd_qn_maxpsz;
- if (maxpsz == INFPSZ)
- maxpsz = maxphys;
- maxpsz = roundup(maxpsz, MAXBSIZE);
error = snf_segmap(fp, readvp, sfv_off,
- (u_offset_t)sfv_len, maxpsz,
- (ssize_t *)&cnt, nowait);
+ (u_offset_t)sfv_len, (ssize_t *)&cnt,
+ nowait);
releasef(sfv->sfv_fd);
*count += cnt;
if (error)
@@ -1323,7 +1316,7 @@ sendfilev(int opcode, int fildes, const struct sendfilevec *vec, int sfvcnt,
* i) latency is important for smaller files. So if the
* data is smaller than 'tcp_slow_start_initial' times
* maxblk, then use sendvec_small_chunk which creates
- * maxblk size mblks and chains then together and sends
+ * maxblk size mblks and chains them together and sends
* them to TCP in one shot. It also leaves 'wroff' size
* space for the headers in each mblk.
*