better cache sizing

This commit is contained in:
John Cupitt 2017-02-22 20:11:52 +00:00
parent 4a9d10a0c8
commit 09774740a7
4 changed files with 40 additions and 19 deletions

17
TODO
View File

@ -1,20 +1,3 @@
- seq no longer stalls ahead threads ... instead, we rely on sinkdisk
interlocks to limit how far ahead or behind threads can get
reexamine cache sizing ... are they too large?
is thumbnail thinstrip or fatstrip? how does that affect seq cache sizing?
add something to linecache (and tilecache?) to get it to display cache size
sinkdisk could use a smaller wbuffer for thinstrip .. thinstrip * nthreads high
... this would reduce the size of the input caches we need during shrink
same for fatstrip ... would it be less than tile-height?
- vips linecache has access there twice! - vips linecache has access there twice!
$ vips linecache $ vips linecache

View File

@ -382,7 +382,7 @@ vips_block_cache_build( VipsObject *object )
build( object ) ) build( object ) )
return( -1 ); return( -1 );
VIPS_DEBUG_MSG( "vips_block_cache_build: max size = %g MB\n", g_info( "vips_block_cache_build: max size = %g MB",
(cache->max_tiles * cache->tile_width * cache->tile_height * (cache->max_tiles * cache->tile_width * cache->tile_height *
VIPS_IMAGE_SIZEOF_PEL( cache->in )) / (1024 * 1024.0) ); VIPS_IMAGE_SIZEOF_PEL( cache->in )) / (1024 * 1024.0) );

View File

@ -200,6 +200,9 @@ sink_free( Sink *sink )
void void
vips_sink_base_init( SinkBase *sink_base, VipsImage *image ) vips_sink_base_init( SinkBase *sink_base, VipsImage *image )
{ {
int tiles_across;
int tiles_down;
/* Always clear kill before we start looping. See the /* Always clear kill before we start looping. See the
* call to vips_image_iskilled() below. * call to vips_image_iskilled() below.
*/ */
@ -208,12 +211,42 @@ vips_sink_base_init( SinkBase *sink_base, VipsImage *image )
sink_base->im = image; sink_base->im = image;
sink_base->x = 0; sink_base->x = 0;
sink_base->y = 0; sink_base->y = 0;
sink_base->processed = 0;
vips_get_tile_size( image, vips_get_tile_size( image,
&sink_base->tile_width, &sink_base->tile_height, &sink_base->tile_width, &sink_base->tile_height,
&sink_base->nlines ); &sink_base->nlines );
sink_base->processed = 0; /* vips_get_tile_size() is very conservative about setting nlines. We
* know that we are the final element in the pipeline, so we can be
* much tighter.
*/
/* Number of tiles we need across to fill the image width. This will be
* 1 for fat/thin strip.
*/
tiles_across =
VIPS_ROUND_UP( image->Xsize, sink_base->tile_width ) /
sink_base->tile_width;
/* Number of rows of tiles we need to have at least 1 tile per thread.
*/
tiles_down =
VIPS_ROUND_UP( vips_concurrency_get(), tiles_across ) /
tiles_across;
/* Therefore the number of scanlines to have at least one tile per
* thread in a buffer.
*/
sink_base->nlines = tiles_down * sink_base->tile_height;
g_info( _( "%s %s: %d x %d pixels, %d threads, %d x %d tiles, "
"%d lines in buffer" ),
g_get_prgname(), image->filename,
image->Xsize, image->Ysize,
vips_concurrency_get(),
sink_base->tile_width, sink_base->tile_height,
sink_base->nlines );
} }
static int static int

View File

@ -238,6 +238,11 @@ wbuffer_new( Write *write )
return( NULL ); return( NULL );
} }
g_info( "wbuffer_new: %g MB",
(write->sink_base.nlines *
VIPS_IMAGE_SIZEOF_LINE( write->sink_base.im )) /
(1024 * 1024.0) );
return( wbuffer ); return( wbuffer );
} }