/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/* Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved. */
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#include <sys/inttypes.h>
#include <sys/sysmacros.h>
#include <sys/tuneable.h>
#include <sys/lwpchan_impl.h>
/*
* If set, we will not randomize mappings where the 'addr' argument is
* non-NULL and not an alignment.
*/
{
int error;
/*
* Serialize brk operations on an address space.
* This also serves as the lock protecting p_brksize
* and p_brkpageszc.
*/
as_rangelock(p->p_as);
/*
* As a special case to aid the implementation of sbrk(3C), if given a
* new brk of 0, return the current brk. We'll hide this in brk(3C).
*/
if (nva == 0) {
as_rangeunlock(p->p_as);
return (base);
}
} else {
}
as_rangeunlock(p->p_as);
}
/*
* Algorithm: call arch-specific map_pgsz to get best page size to use,
* then call brk_internal().
* Returns 0 on success.
*/
static int
{
int err;
oszc = p->p_brkpageszc;
/*
* If p_brkbase has not yet been set, the first call
* to brk_internal() will initialize it.
*/
if (brkbase == 0) {
}
/*
* Covers two cases:
* 1. page_szc() returns -1 for invalid page size, so we want to
* ignore it in that case.
* 2. By design we never decrease page size, as it is more stable.
*/
/* If failed, back off to base page size. */
}
return (err);
}
/* If using szc failed, map with base page size and return. */
if (err != 0) {
if (szc != 0) {
}
return (err);
}
/*
* Round up brk base to a large page boundary and remap
* anything in the segment already faulted in beyond that
* point.
*/
/* Check that len is not negative. Update page size code for heap. */
p->p_brkpageszc = szc;
}
return (err); /* should always be 0 */
}
/*
* Returns 0 on success.
*/
int
{
int error;
/*
* extend heap to brkszc alignment but use current p->p_brkpageszc
* for the newly created segment. This allows the new extension
* segment to be concatenated successfully with the existing brk
* segment.
*/
} else {
}
mutex_enter(&p->p_lock);
p->p_rctls, p);
mutex_exit(&p->p_lock);
/*
* If p_brkbase has not yet been set, the first call
* to brk() will initialize it.
*/
if (p->p_brkbase == 0)
/*
* Before multiple page size support existed p_brksize was the value
* not rounded to the pagesize (i.e. it stored the exact user request
* for heap size). If pgsz is greater than PAGESIZE calculate the
* heap size as the real new heap size by rounding it up to pgsz.
* This is useful since we may want to know where the heap ends
* without knowing heap pagesize (e.g. some old code) and also if
* heap pagesize changes we can update p_brkpageszc but delay adding
* new mapping yet still know from p_brksize where the heap really
* ends. The user requested heap end is stored in libc variable.
*/
szc = 0;
}
} else {
}
/*
* use PAGESIZE to roundup ova because we want to know the real value
* of the current heap end in case p_brkpageszc changes since the last
* p_brksize was computed.
*/
PAGESIZE);
mutex_enter(&p->p_lock);
RCA_SAFE);
mutex_exit(&p->p_lock);
return (ENOMEM);
}
}
/*
* Add new zfod mapping to extend UNIX data segment
* AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
* via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
* page sizes if ova is not aligned to szc's pgsz.
*/
if (szc > 0) {
pgsz);
} else {
}
} else {
}
&crargs);
if (error) {
return (error);
}
/*
* Release mapping to shrink UNIX data segment.
*/
}
return (0);
}
/*
* Grow the stack to include sp. Return 1 if successful, 0 otherwise.
* This routine assumes that the stack grows downward.
*/
int
{
int err;
/*
* Serialize grow operations on an address space.
* This also serves as the lock protecting p_stksize
* and p_stkpageszc.
*/
} else {
}
/*
* Set up translations so the process doesn't have to fault in
* the stack pages we just gave it.
*/
}
return ((err == 0 ? 1 : 0));
}
/*
* Algorithm: call arch-specific map_pgsz to get best page size to use,
* then call grow_internal().
* Returns 0 on success.
*/
static int
{
int err;
oszc = p->p_stkpageszc;
/*
* Covers two cases:
* 1. page_szc() returns -1 for invalid page size, so we want to
* ignore it in that case.
* 2. By design we never decrease page size, as it is more stable.
* This shouldn't happen as the stack never shrinks.
*/
/* failed, fall back to base page size */
}
return (err);
}
/*
* We've grown sufficiently to switch to a new page size.
* So we are going to remap the whole segment with the new page size.
*/
/* The grow with szc failed, so fall back to base page size. */
if (err != 0) {
if (szc != 0) {
}
return (err);
}
/*
* Round up stack pointer to a large page boundary and remap
* any pgsz pages in the segment already faulted in beyond that
* point.
*/
/* Check that len is not negative. Update page size code for stack. */
p->p_stkpageszc = szc;
}
return (err); /* should always be 0 */
}
/*
* This routine assumes that the stack grows downward.
* Returns 0 on success, errno on failure.
*/
int
{
int error;
/*
* grow to growszc alignment but use current p->p_stkpageszc for
* the segvn_crargs szc passed to segvn_create. For memcntl to
* increase the szc, this allows the new extension segment to be
* concatenated successfully with the existing stack segment.
*/
szc = 0;
}
} else {
}
return (ENOMEM);
}
return (0);
}
}
/*
* extend stack with the proposed new growszc, which is different
* than p_stkpageszc only on a memcntl to increase the stack pagesize.
* AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
* map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
* if not aligned to szc's pgsz.
*/
if (szc > 0) {
pgsz);
} else {
}
} else {
}
segvn_create, &crargs)) != 0) {
}
return (error);
}
return (0);
}
/*
* Find address for user to map. If MAP_FIXED is not specified, we can pick
* any address we want, but we will first try the value in *addrp if it is
* non-NULL and _MAP_RANDOMIZE is not set. Thus this is implementing a way to
* try and get a preferred address.
*/
int
{
return (0);
/* User supplied address was available */
} else {
/*
* No user supplied address or the address supplied was not
* available.
*/
}
return (ENOMEM);
return (0);
}
/*
* Used for MAP_ANON - fast way to get anonymous pages
*/
static int
{
int error;
return (EACCES);
/*
* Use the user address. First verify that
* the address to be used is page aligned.
* Then make some simple bounds checks.
*/
return (EINVAL);
case RANGE_OKAY:
break;
case RANGE_BADPROT:
return (ENOTSUP);
case RANGE_BADADDR:
default:
return (ENOMEM);
}
}
/*
* No need to worry about vac alignment for anonymous
* pages since this is a "clone" object that doesn't
* yet exist.
*/
if (error != 0) {
return (error);
}
/*
* Use the seg_vn segment driver; passing in the NULL amp
* gives the desired "cloning" effect.
*/
}
static int
{
int error;
int in_crit = 0;
MAP_TEXT | MAP_INITDATA)) != 0) {
/* | MAP_RENAME */ /* not implemented, let user know */
return (EINVAL);
}
return (EINVAL);
}
return (EINVAL);
}
(MAP_FIXED | _MAP_RANDOMIZE)) {
return (EINVAL);
}
/*
* If it's not a fixed allocation and mmap ASLR is enabled, randomize
* it.
*/
flags |= _MAP_RANDOMIZE;
#if defined(__sparc)
/*
* See if this is an "old mmap call". If so, remember this
* fact and convert the flags value given to mmap to indicate
* the specified address in the system call must be used.
* _MAP_NEW is turned set by all new uses of mmap.
*/
#endif
return (EINVAL);
return (EINVAL);
/* alignment needs to be a power of 2 >= page size */
return (EINVAL);
}
/*
* Check for bad lengths and file position.
* We let the VOP_MAP routine check for negative lengths
* since on some vnode types this might be appropriate.
*/
return (EINVAL);
/* discard lwpchan mappings, like munmap() */
/*
* Tell machine specific code that lwp has mapped shared memory
*/
/* EMPTY */
}
return (error);
return (EINVAL);
/* Can't execute code from "noexec" mounted filesystem. */
/*
* These checks were added as part of large files.
*
* Return ENXIO if the initial position is negative; return EOVERFLOW
* if (offset + len) would overflow the maximum allowed offset for the
* type of file descriptor being used.
*/
if (pos < 0)
return (ENXIO);
return (EOVERFLOW);
}
/* no write access allowed */
maxprot &= ~PROT_WRITE;
}
/*
* XXX - Do we also adjust maxprot based on protections
* of the vnode? E.g. if no execute permission is given
* on the vnode for the current user, maxprot probably
* should disallow PROT_EXEC also? This is different
* from the write access as this would be a per vnode
* test as opposed to a per fd test for writability.
*/
/*
* Verify that the specified protections are not greater than
* the maximum allowable protections. Also test to make sure
* that the file descriptor does allows for read access since
* "write only" mappings are hard to do since normally we do
* the read from the file before the page can be written.
*/
return (EACCES);
/*
* If the user specified an address, do some simple checks here
*/
/*
* Use the user address. First verify that
* the address to be used is page aligned.
* Then make some simple bounds checks.
*/
return (EINVAL);
case RANGE_OKAY:
break;
case RANGE_BADPROT:
return (ENOTSUP);
case RANGE_BADADDR:
default:
return (ENOMEM);
}
}
nbl_need_check(vp)) {
int svmand;
in_crit = 1;
if (error != 0)
goto done;
nop = NBL_READWRITE;
} else {
}
} else {
}
goto done;
}
}
/* discard lwpchan mappings, like munmap() */
/*
* Ok, now let the vnode map routine do its thing to set things up.
*/
if (error == 0) {
/*
* Tell machine specific code that lwp has mapped shared memory
*/
if (flags & MAP_SHARED) {
/* EMPTY */
}
/*
* Mark this as an executable vnode
*/
}
}
done:
if (in_crit)
return (error);
}
#ifdef _LP64
/*
* LP64 mmap(2) system call: 64-bit offset, 64-bit address.
*
* The "large file" mmap routine mmap64(2) is also mapped to this routine
* by the 64-bit version of libc.
*
* Eventually, this should be the only version, and have smmap_common()
* folded back into it again. Some day.
*/
{
int error;
} else
}
#endif /* _LP64 */
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
/*
* ILP32 mmap(2) system call: 32-bit offset, 32-bit address.
*/
{
int error;
if (flags & _MAP_LOW32)
} else
}
/*
* ILP32 mmap64(2) system call: 64-bit offset, 32-bit address.
*
* Now things really get ugly because we can't use the C-style
* calling convention for more than 6 args, and 64-bit parameter
* passing on 32-bit systems is less than clean.
*/
struct mmaplf32a {
#ifdef _LP64
/*
* 32-bit contents, 64-bit cells
*/
#else
/*
* 32-bit contents, 32-bit cells
*/
#endif
};
int
{
int error;
#ifdef _BIG_ENDIAN
#else
#endif
if (flags & _MAP_LOW32)
} else
if (error == 0)
return (error);
}
#endif /* _SYSCALL32_IMPL || _ILP32 */
int
{
/*
* Discard lwpchan mappings.
*/
return (0);
}
int
{
int error;
case RANGE_OKAY:
break;
case RANGE_BADPROT:
case RANGE_BADADDR:
default:
}
if (error)
return (0);
}
int
{
int error;
long llen;
model = get_udatamodel();
/*
* Validate form of address parameters.
*/
if (model == DATAMODEL_NATIVE) {
} else {
}
/*
* Loop over subranges of interval [addr : addr + len), recovering
* results internally and then copying them out to caller. Subrange
* is based on the size of MC_CACHE, defined above.
*/
if (rl != 0) {
}
if (error != 0)
}
return (0);
}