/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009, 2010 Free Software Foundation, Inc.
*
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/relocator.h>
#include <grub/relocator_private.h>
#include <grub/mm_private.h>
GRUB_MOD_LICENSE ("GPLv3+");
struct grub_relocator
{
};
struct grub_relocator_subchunk
{
#endif
} type;
#endif
};
struct grub_relocator_chunk
{
void *srcv;
unsigned nsubchunks;
};
struct grub_relocator_extra_block
{
};
struct grub_relocator_fw_leftover
{
};
#endif
void *
{
}
{
}
struct grub_relocator *
grub_relocator_new (void)
{
if (!ret)
return NULL;
(unsigned long) ret->relocators_size);
return ret;
}
#define max(a, b) (((a) > (b)) ? (a) : (b))
#define min(a, b) (((a) < (b)) ? (a) : (b))
static inline int
{
}
static void
{
#endif
- (grub_addr_t) rb)
{
- (newreg_start
}
else
{
if (new_header == hb)
}
{
if (newregfirst == hb)
*regancestor = newreg;
{
do
{
if ((void *) h < (void *) (newreg + 1))
grub_fatal ("Failed to adjust memory region: %p, %p, %p, %p, %p",
if ((void *) h == (void *) (newreg + 1))
grub_dprintf ("relocator",
"Free start memory region: %p, %p, %p, %p, %p",
#endif
hp = h;
h = h->next;
}
}
}
}
static void
{
" hb = %p, hbp = %p, rb = %p, vaddr = 0x%lx\n",
#endif
{
#endif
}
{
if (foll)
{
{
}
}
}
else
{
if (foll)
{
}
else
{
}
{
}
}
}
static void
{
unsigned i;
return;
}
#endif
static void
{
{
case CHUNK_TYPE_REGION_START:
{
if (*rp)
{
h->magic = GRUB_MM_FREE_MAGIC;
break;
{
*hp = h;
{
}
}
else
{
}
}
else
{
/* Find where to insert this region.
Put a smaller one before bigger ones,
to prevent fragmentation. */
break;
h->next = h;
h->magic = GRUB_MM_FREE_MAGIC;
}
break;
if (r2)
{
grub_free (g + 1);
}
break;
}
case CHUNK_TYPE_IN_REGION:
{
h->size
h->next = h;
h->magic = GRUB_MM_ALLOC_MAGIC;
grub_free (h + 1);
break;
}
case CHUNK_TYPE_FIRMWARE:
case CHUNK_TYPE_LEFTOVER:
{
{
}
{
}
#endif
}
break;
#endif
}
}
static int
int from_low_priv, int collisioncheck)
{
/* 128 is just in case of additional malloc (shouldn't happen). */
unsigned *counter;
int nallocs = 0;
unsigned j, N = 0;
grub_dprintf ("relocator",
"trying to allocate in 0x%lx-0x%lx aligned 0x%lx size 0x%lx\n",
return 0;
/* We have to avoid any allocations when filling scanline events.
Hence 2-stages.
*/
for (r = grub_mm_base; r; r = r->next)
{
p = r->first;
do
{
maxevents += 2;
p = p->next;
}
while (p != r->first);
maxevents += 4;
}
if (collisioncheck && rel)
{
maxevents += 2;
}
{
maxevents += 2;
}
for (r = grub_mm_base; r; r = r->next)
maxevents += 2;
#endif
{
{
int l = 0;
unsigned i;
for (i = 0; i < GRUB_RELOCATOR_FIRMWARE_REQUESTS_QUANT; i++)
{
maxevents++;
}
if (l)
maxevents++;
}
}
#endif
{
return 0;
}
if (collisioncheck && rel)
{
{
N++;
N++;
}
}
for (r = grub_mm_base; r; r = r->next)
{
(unsigned long) r - r->pre_size,
(unsigned long) (r + 1) + r->size);
N++;
N++;
}
{
{
N++;
N++;
}
}
N += grub_relocator_firmware_fill_events (events + N);
{
{
unsigned i;
int l = 0;
for (i = 0; i < GRUB_RELOCATOR_FIRMWARE_REQUESTS_QUANT; i++)
{
{
N++;
}
}
if (l)
{
N++;
}
}
}
#endif
#endif
/* No malloc from this point. */
grub_mm_base = NULL;
{
if (p->magic == GRUB_MM_ALLOC_MAGIC)
continue;
do
{
if (p->magic != GRUB_MM_FREE_MAGIC)
if (p == (grub_mm_header_t) (r + 1))
{
N++;
- sizeof (struct grub_mm_header);
N++;
}
else
{
N++;
N++;
}
pa = p;
}
}
/* Put ending events after starting events. */
{
for (j = 0; j < N; j++)
else
t = eventt;
events = t;
}
{
unsigned i;
i++)
{
for (j = 0; j < N; j++)
& DIGITSORT_MASK) + 1]++;
for (j = 0; j <= DIGITSORT_MASK; j++)
for (j = 0; j < N; j++)
& DIGITSORT_MASK)]++] = events[j];
t = eventt;
events = t;
}
}
#endif
/* Now events are nicely sorted. */
{
int nlefto = 0;
#else
const int nlefto = 0;
#endif
from_low_priv ? j++ : j--)
{
&& !nblockfw))));
{
case REG_FIRMWARE_START:
nstartedfw++;
break;
case REG_FIRMWARE_END:
nstartedfw--;
break;
case FIRMWARE_BLOCK_START:
nblockfw++;
break;
case FIRMWARE_BLOCK_END:
nblockfw--;
break;
#endif
case REG_LEFTOVER_START:
nlefto++;
break;
case REG_LEFTOVER_END:
nlefto--;
break;
#endif
case COLLISION_START:
ncollisions++;
break;
case COLLISION_END:
ncollisions--;
break;
case IN_REG_START:
case REG_BEG_START:
nstarted++;
break;
case IN_REG_END:
case REG_BEG_END:
nstarted--;
break;
}
&& !nblockfw)));
if (!isinsidebefore && isinsideafter)
{
/* Found an usable address. */
goto found;
}
{
goto found;
}
}
}
return 0;
{
#endif
int last_lo = 0;
#endif
int last_start = 0;
for (j = 0; j < N; j++)
{
int typepre;
if (ncol)
typepre = -1;
else if (regbeg)
else if (inreg)
#endif
else
typepre = -1;
{
if (alloc_end > alloc_start)
{
switch (typepre)
{
case CHUNK_TYPE_REGION_START:
/* TODO: maintain a reverse lookup tree for hancestor. */
{
unsigned k;
for (k = 0; k < N; k++)
}
break;
case CHUNK_TYPE_IN_REGION:
{
unsigned k;
for (k = 0; k < N; k++)
}
break;
case CHUNK_TYPE_FIRMWARE:
{
(unsigned long) fstart,
(unsigned long) fend);
#endif
/* The failure here can be very expensive. */
{
if (from_low_priv)
else
goto retry;
}
break;
}
#endif
case CHUNK_TYPE_LEFTOVER:
{
}
break;
#endif
}
nallocs++;
}
}
{
case REG_BEG_START:
case IN_REG_START:
regbeg++;
else
inreg++;
last_start = j;
break;
case REG_BEG_END:
case IN_REG_END:
if (regbeg)
regbeg--;
else
inreg--;
break;
case REG_FIRMWARE_START:
fwin++;
break;
case REG_FIRMWARE_END:
fwin--;
break;
case FIRMWARE_BLOCK_START:
fwb++;
break;
case FIRMWARE_BLOCK_END:
fwb--;
break;
#endif
case REG_LEFTOVER_START:
fwlefto++;
last_lo = j;
break;
case REG_LEFTOVER_END:
fwlefto--;
break;
#endif
case COLLISION_START:
ncol++;
break;
case COLLISION_END:
ncol--;
break;
}
}
}
/* Malloc is available again. */
{
int last_start = 0;
#endif
unsigned cural = 0;
int oom = 0;
oom = 1;
for (j = 0; j < N; j++)
{
int typepre;
if (ncol)
typepre = -1;
else if (regbeg)
else if (inreg)
#endif
else
typepre = -1;
{
if (!oom)
if (alloc_end > alloc_start)
{
(unsigned long) alloc_start,
if (typepre == CHUNK_TYPE_REGION_START
|| typepre == CHUNK_TYPE_IN_REGION)
{
}
|| typepre == CHUNK_TYPE_FIRMWARE
#endif
))
{
if (!ne)
{
oom = 1;
}
else
{
if (extra_blocks)
extra_blocks = ne;
}
}
{
{
if (fstart != alloc_start)
{
oom = 1;
ne = extra_blocks;
}
if (lo1)
{
if (leftovers)
}
if (lo2)
{
if (leftovers)
}
}
}
if (typepre == CHUNK_TYPE_LEFTOVER)
{
}
#endif
if (!oom)
cural++;
else
free_subchunk (&tofree);
}
}
{
case REG_BEG_START:
case IN_REG_START:
regbeg++;
else
inreg++;
last_start = j;
break;
case REG_BEG_END:
case IN_REG_END:
break;
case REG_FIRMWARE_START:
fwin++;
break;
case REG_FIRMWARE_END:
fwin--;
break;
case FIRMWARE_BLOCK_START:
fwb++;
break;
case FIRMWARE_BLOCK_END:
fwb--;
break;
#endif
case REG_LEFTOVER_START:
fwlefto++;
break;
case REG_LEFTOVER_END:
fwlefto--;
break;
#endif
case COLLISION_START:
ncol++;
break;
case COLLISION_END:
ncol--;
break;
}
}
if (oom)
{
unsigned i;
for (i = 0; i < cural; i++)
return 0;
}
}
(unsigned long) size);
return 1;
}
static void
{
*min_addr = 0;
/* Keep chunks in memory in the same order as they'll be after relocation. */
{
}
}
{
if (!chunk)
return grub_errno;
grub_dprintf ("relocator",
"min_addr = 0x%llx, max_addr = 0x%llx, target = 0x%llx\n",
(unsigned long long) target);
do
{
/* A trick to improve Linux allocation. */
#if defined (__i386__) || defined (__x86_64__)
if (target < 0x100000)
{
break;
}
#endif
break;
break;
{
break;
}
}
while (0);
{
}
(unsigned long) rel->relocators_size);
(unsigned long) rel->relocators_size);
#ifdef DEBUG_RELOCATOR
grub_mm_check ();
#endif
return GRUB_ERR_NONE;
}
int preference)
{
if (min_addr < 0x1000)
min_addr = 0x1000;
if (!chunk)
return grub_errno;
{
return GRUB_ERR_NONE;
}
do
{
break;
{
break;
}
}
while (0);
{
int found = 0;
{
if (type != GRUB_MEMORY_AVAILABLE)
return 0;
return 0;
found = 1;
return 0;
}
if (!found)
}
while (1)
{
{
else
break;
}
if (!chunk2)
break;
}
(unsigned long) rel->relocators_size);
(unsigned long) rel->relocators_size);
#ifdef DEBUG_RELOCATOR
grub_mm_check ();
#endif
return GRUB_ERR_NONE;
}
void
{
if (!rel)
return;
{
unsigned i;
for (i = 0; i < chunk->nsubchunks; i++)
}
}
{
unsigned j;
(unsigned long) rel->relocators_size);
if (relsize)
{
unsigned i;
{
{
nchunks++;
}
}
{
return grub_errno;
}
for (j = 0; j < 256; j++)
{
}
for (i = 1; i < GRUB_CPU_SIZEOF_VOID_P; i++)
{
for (j = 0; j < nchunks; j++)
for (j = 0; j < 256; j++)
for (j = 0; j < nchunks; j++)
}
}
for (j = 0; j < nchunks; j++)
{
{
grub_cpu_relocator_backward ((void *) rels,
}
{
grub_cpu_relocator_forward ((void *) rels,
}
}
return GRUB_ERR_NONE;
}
void
{
for (r = grub_mm_base; r; r = r->next)
{
if (p->magic == GRUB_MM_ALLOC_MAGIC)
continue;
do
{
if (p->magic != GRUB_MM_FREE_MAGIC)
pa = p;
}
}
}