Lines Matching defs:subcache

66     /* How many indexes each subcache's queue has */
68 /* How large each subcache is, including the queue and data */
70 /* How far into each subcache the data area is (optimisation) */
72 /* How large the data area in each subcache is (optimisation) */
77 * Subcache structure - the start of each subcache, followed by
88 * Index structure - each subcache has an array of these
93 /* location within the subcache's data area */
124 * Each subcache is prefixed by the SHMCBSubcache structure.
126 * The subcache's "Data" segment is a single cyclic data buffer, of
129 * buffer is subcache->data_pos; the buffer's length is
130 * subcache->data_used.
133 * which is used as a cyclic queue; subcache->idx_pos gives the array
134 * index of the first in use, subcache->idx_used gives the number in
138 * The ID and data segment are stored consecutively in the subcache's
153 * a pointer to the corresponding subcache. */
160 * pointer to the corresponding subcache. */
169 /* This macro takes a pointer to a subcache and a zero-based index and returns
176 /* This macro takes a pointer to the header and a subcache and returns a
251 /* Prototypes for low-level subcache operations */
256 SHMCBSubcache *subcache,
273 SHMCBSubcache *subcache,
282 * subcache internals are deferred to shmcb_subcache_*** functions lower down
431 /* Convert the subcache size (in bytes) to a value that is suitable for
459 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
460 subcache->idx_pos = subcache->idx_used = 0;
461 subcache->data_pos = subcache->data_used = 0;
486 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
490 "socache_shmcb_store (0x%02x -> subcache %d)",
498 tryreplace = shmcb_subcache_remove(s, header, subcache, id, idlen);
499 if (shmcb_subcache_store(s, header, subcache, encoded,
523 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
527 "socache_shmcb_retrieve (0x%02x -> subcache %d)",
531 rv = shmcb_subcache_retrieve(s, header, subcache, id, idlen,
548 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
552 "socache_shmcb_remove (0x%02x -> subcache %d)",
559 if (shmcb_subcache_remove(s, header, subcache, id, idlen) == 0) {
590 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
591 shmcb_subcache_expire(s, header, subcache, now);
592 total += subcache->idx_used;
593 cache_total += subcache->data_used;
594 if (subcache->idx_used) {
595 SHMCBIndex *idx = SHMCB_INDEX(subcache, subcache->idx_pos);
615 ap_rprintf(r, "subcaches: <b>%d</b>, indexes per subcache: <b>%d</b><br>",
693 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
694 rv = shmcb_subcache_iterate(instance, s, userctx, header, subcache,
705 SHMCBSubcache *subcache, apr_time_t now)
708 unsigned int new_idx_pos = subcache->idx_pos;
711 while (loop < subcache->idx_used) {
712 idx = SHMCB_INDEX(subcache, new_idx_pos);
729 if (loop == subcache->idx_used) {
731 subcache->idx_used = 0;
732 subcache->data_used = 0;
735 unsigned int diff = SHMCB_CYCLIC_SPACE(subcache->data_pos,
739 subcache->idx_used -= loop;
740 subcache->idx_pos = new_idx_pos;
742 subcache->data_used -= diff;
743 subcache->data_pos = idx->data_pos;
747 "we now have %u socache entries", subcache->idx_used);
751 SHMCBSubcache *subcache,
763 "inserting socache entry larger (%d) than subcache data area (%d)",
769 shmcb_subcache_expire(s, header, subcache, apr_time_now());
775 if (header->subcache_data_size - subcache->data_used < total_len
776 || subcache->idx_used == header->index_num) {
779 idx = SHMCB_INDEX(subcache, subcache->idx_pos);
781 "about to force-expire, subcache: idx_used=%d, "
782 "data_used=%d", subcache->idx_used, subcache->data_used);
787 subcache->idx_pos = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, 1,
789 subcache->idx_used--;
790 if (!subcache->idx_used) {
792 subcache->data_used = 0;
796 idx2 = SHMCB_INDEX(subcache, subcache->idx_pos);
797 subcache->data_used -= SHMCB_CYCLIC_SPACE(idx->data_pos, idx2->data_pos,
799 subcache->data_pos = idx2->data_pos;
805 } while (header->subcache_data_size - subcache->data_used < total_len);
808 "finished force-expire, subcache: idx_used=%d, "
809 "data_used=%d", subcache->idx_used, subcache->data_used);
820 id_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
823 SHMCB_DATA(header, subcache), id_offset,
825 subcache->data_used += id_len;
827 data_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
830 SHMCB_DATA(header, subcache), data_offset,
832 subcache->data_used += data_len;
834 new_idx = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, subcache->idx_used,
836 idx = SHMCB_INDEX(subcache, new_idx);
842 subcache->idx_used++;
847 "finished insert, subcache: idx_pos/idx_used=%d/%d, "
849 subcache->idx_pos, subcache->idx_used,
850 subcache->data_pos, subcache->data_used);
855 SHMCBSubcache *subcache,
863 pos = subcache->idx_pos;
865 while (loop < subcache->idx_used) {
866 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
877 SHMCB_DATA(header, subcache),
893 dest, SHMCB_DATA(header, subcache),
918 SHMCBSubcache *subcache,
925 pos = subcache->idx_pos;
926 while (loop < subcache->idx_used) {
927 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
933 SHMCB_DATA(header, subcache),
957 SHMCBSubcache *subcache,
968 pos = subcache->idx_pos;
969 while (loop < subcache->idx_used) {
970 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
1006 SHMCB_DATA(header, subcache),
1011 SHMCB_DATA(header, subcache),