mod_proxy_balancer.c revision ad9438d62e51f5785e0175c2d3e5308df5968a6c
"proxy: BALANCER: canonicalising URL %s",
url);
* We break the URL into host, port, path, search "error parsing URL %s: %s",
* now parse path/search args, according to rfc1738: * process the path. With proxy-noncanon set (by * mod_proxy) we use the raw, unparsed uri path =
url;
/* this is the raw path */ * If the worker is not initialized check whether its scoreboard * slot is already initialized. /* Set to the original configuration */ /* Set default number of attempts to the number of /* Retrieve the parameter with the given name * Something like 'JSESSIONID=12345...N' * Session path was found, get it's value * Session cookie was found, get it's value /* Find the worker that has the 'route' defined * If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try * We have a worker that is unusable. * It can be in error or disabled, but in case * it has a redirection set use that redirection worker. * This enables to safely remove the member from the * balancer. Of course you will need some kind of * session replication between those two remote. /* Check if the redirect worker is usable */ * If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try /* Try to find the sticky route inside url */ "proxy: BALANCER: Found value %s for " "proxy: BALANCER: Found value %s for " * If we found a value for sticksession, find the first '.' within. * Everything after '.' (if present) is our route. "proxy: BALANCER: Found route %s", *
route);
/* We have a route in path or in cookie * Find the worker that has this route defined. * Notice that the route of the worker chosen is different from * the route supplied by the client. "proxy: BALANCER: Route changed from %s to %s",
"proxy: BALANCER: (%s). Lock failed for find_best_worker()",
balancer->
name);
PROXY_THREAD_UNLOCK(balancer); "proxy: BALANCER: (%s). Unlock failed for find_best_worker()",
balancer->
name);
/* All the workers are in error state or disabled. * If the balancer has a timeout sleep for a while * and try again to find the worker. The chances are * that some other thread will release a connection. * By default the timeout is not set, and the server /* XXX: This can perhaps be build using some * smarter mechanism, like tread_cond. * But since the statuses can came from * different childs, use the provided algo. /* Set the timeout to 0 so that we don't /* restore the timeout */ /* we break the URL into host, port, uri */ "missing worker. URI cannot be parsed: ", *
url,
/* If all workers are in error state force the recovery. "proxy: BALANCER: (%s). Forcing recovery for worker (%s)",
/* Step 1: check if the url is for us * The url we can handle starts with 'balancer://' * If balancer is already provided skip the search * for balancer, because this is failover attempt. /* Step 2: Lock the LoadBalancer * XXX: perhaps we need the process lock here "proxy: BALANCER: (%s). Lock failed for pre_request",
/* Step 3: force recovery */ /* Step 4: find the session route */ /* We have a sticky load balancer * Update the workers status * so that even session routes get /* Take into calculation only the workers that are * not in error state or not disabled. * TODO: Abstract the below, since this is dependent * on the LB implementation * We have a route provided that doesn't match the * balancer name. See if the provider route is the * member of the same balancer in which case return 503 "proxy: BALANCER: (%s). All workers are in error state for route (%s)",
"proxy: BALANCER: (%s). Unlock failed for pre_request",
"proxy: BALANCER: (%s). Unlock failed for pre_request",
"proxy: BALANCER: (%s). All workers are in error state",
* This balancer has sticky sessions and the client either has not * supplied any routing information or all workers for this route * including possible redirect and hotstandby workers are in error * state, but we have found another working worker for this * balancer where we can send the request. Thus notice that we have * changed the route to the backend. /* Rewrite the url from 'balancer://url' * to the 'worker_scheme://worker_hostname[:worker_port]/url' * This replaces the balancers fictional name with the * real hostname of the elected worker. /* Add the session route to request notes if present */ /* Add session info to env. */ "BALANCER_SESSION_STICKY",
sticky);
"BALANCER_SESSION_ROUTE",
route);
"proxy: BALANCER (%s) worker (%s) rewritten to %s",
"proxy: BALANCER: (%s). Lock failed for post_request",
/* TODO: placeholder for post_request actions "proxy: BALANCER: (%s). Unlock failed for post_request",
/* Recalculate lbfactors */ /* Special case if there is only one worker it's * load factor will always be 1 /* Update the status entries */ /* balancer_init() will be called twice during startup. So, only * set up the static data the second time through. */ /* Retrieve a UUID and store the nonce for the lifetime of /* Manages the loadfactors and member status * Special case: workers are allowed path information /* Check that the supplied nonce matches this server's nonce; * otherwise ignore all parameters, to prevent a CSRF attack. */ /* First set the params */ * Note that it is not possible set the proxy_balancer because it is not ap_rputs(
"<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n", r);
"</httpd:scheme>\n",
NULL);
"</httpd:hostname>\n",
NULL);
ap_rprintf(r,
" <httpd:loadfactor>%d</httpd:loadfactor>\n",
"<html><head><title>Balancer Manager</title></head>\n", r);
ap_rputs(
"<body><h1>Load Balancer Manager for ", r);
ap_rputs(
"<hr />\n<h3>LoadBalancer Status for ", r);
ap_rputs(
"\n\n<table border=\"0\" style=\"text-align: left;\"><tr>" "<th>StickySession</th><th>Timeout</th><th>FailoverAttempts</th><th>Method</th>" ap_rputs(
"\n\n<table border=\"0\" style=\"text-align: left;\"><tr>" "<th>Route</th><th>RouteRedir</th>" "<th>Factor</th><th>Set</th><th>Status</th>" "<th>Elected</th><th>To</th><th>From</th>" ap_rputs(
"<h3>Edit worker settings for ", r);
ap_rputs(
"<table><tr><td>Load factor:</td><td><input name=\"lf\" type=text ", r);
ap_rputs(
"<tr><td>LB Set:</td><td><input name=\"ls\" type=text ", r);
ap_rputs(
"<tr><td>Route:</td><td><input name=\"wr\" type=text ", r);
ap_rputs(
"<tr><td>Route Redirect:</td><td><input name=\"rr\" type=text ", r);
ap_rputs(
"<tr><td>Status:</td><td>Disabled: <input name=\"dw\" value=\"Disable\" type=radio", r);
ap_rputs(
"> | Enabled: <input name=\"dw\" value=\"Enable\" type=radio", r);
ap_rputs(
"<tr><td colspan=2><input type=submit value=\"Submit\"></td></tr>\n", r);
ap_rvputs(r,
"<input type=hidden name=\"nonce\" value=\"",
/* Initialize shared scoreboard data */ * The idea behind the find_best_byrequests scheduler is the following: * lbfactor is "how much we expect this worker to work", or "the worker's * normalized work quota". * lbstatus is "how urgent this worker has to work to fulfill its quota * We distribute each worker's work quota to the worker, and then look * which of them needs to work most urgently (biggest lbstatus). This * worker is then selected for work, and its lbstatus reduced by the * total work quota we distributed to all workers. Thus the sum of all * lbstatus does not change.(*) * If some workers are disabled, the others will * still be scheduled correctly. * If a balancer is configured as follows: * And b gets disabled, the following schedule is produced: * Note that the above lbfactor setting is the *exact* same as: * Asymmetric configurations work as one would expect. For * would have a, b and c all handling about the same * amount of load with d handling twice what a or b * or c handles individually. So we could see: * b a d c d a c d b d ... "proxy: Entering byrequests for BALANCER (%s)",
/* First try to see if we have available candidate */ /* If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try /* Take into calculation only the workers that are * not in error state or not disabled. * The idea behind the find_best_bytraffic scheduler is the following: * We know the amount of traffic (bytes in and out) handled by each * worker. We normalize that traffic by each workers' weight. So assuming * the scheduler will allow worker c to handle 3 times the * same amount of traffic, then c would be accessed 3 times as * often as a or b. If, for example, a handled a request that * resulted in a large i/o bytecount, then b and c would be * chosen more often, to even things out. "proxy: Entering bytraffic for BALANCER (%s)",
/* First try to see if we have available candidate */ /* If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try /* Take into calculation only the workers that are * not in error state or not disabled. "proxy: Entering bybusyness for BALANCER (%s)",
/* First try to see if we have available candidate */ /* If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try /* Take into calculation only the workers that are * not in error state or not disabled. "proxy: selected worker \"%s\" by busy factor %i (request lbstatus %i)",
* How to add additional lbmethods: * 1. Create func which determines "best" candidate worker * (eg: find_best_bytraffic, above) * 2. Register it as a provider. /* Only the mpm_winnt has child init hook handler. * make sure that we are called after the mpm * initializes and after the mod_proxy NULL,
/* create per-directory config structure */ NULL,
/* merge per-directory config structures */ NULL,
/* create per-server config structure */ NULL,
/* merge per-server config structures */ NULL,
/* command apr_table_t */