/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms
* of the Common Development and Distribution License
* (the "License"). You may not use this file except
* in compliance with the License.
*
* You can obtain a copy of the license at
* See the License for the specific language governing
* permissions and limitations under the License.
*
* When distributing Covered Code, include this CDDL
* HEADER in each file and include the License file at
* usr/src/OPENSOLARIS.LICENSE. If applicable,
* add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your
* own identifying information: Portions Copyright [yyyy]
* [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* routine to benchmark cache-to-cache transfer times... uses
* solaris features to find and bind to cpus in the current
* processor set, so not likely to work elsewhere.
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <sys/processor.h>
#include <stdio.h>
#include <errno.h>
#include "libmicro.h"
typedef struct {
long **ts_data;
long ts_result;
} tsd_t;
int traverse_ptrchain(long **, int, int);
int
{
lm_tsdsize = sizeof (tsd_t);
" [-s size] size of access area in bytes"
" (default %ld)\n"
"notes: measures cache to cache transfer times on Solaris\n",
opts);
return (0);
}
int
{
switch (opt) {
case 's':
break;
default:
return (-1);
}
return (0);
}
int
{
perror("pset_info");
return (1);
}
return (0);
}
int
{
int i, j;
return (1);
}
NULL) < 0) {
perror("processor_bind:");
return (1);
}
/*
* use lmbench style backwards stride
*/
for (i = 0; i < opts / sizeof (long); i++) {
j = i - 128;
if (j < 0)
j = j + opts / sizeof (long);
}
return (0);
}
/*
* here we go in order for each thread, causing inherent serialization
* this is normally not a good idea, but in this case we're trying to
* measure cache-to-cache transfer times, and if we run threads in
* parallel we're likely to see saturation effects rather than cache-to-cache,
* esp. on wimpy memory platforms like P4.
*/
/*ARGSUSED*/
int
{
int i, j;
for (j = 0; j < lm_optB; j++)
for (i = 0; i < lm_optT; i++) {
ts = thread_data[i];
}
return (0);
}
int
{
int i;
for (i = 0; i < count; i += 10) {
}
return ((int)*ptr); /* bogus return */
}
char *
{
return (result);
}