better rgb16 -> scrgb handling

helps new sharpen tests
This commit is contained in:
John Cupitt 2016-02-26 09:08:42 +00:00
parent 04cc13e321
commit d33306836f
3 changed files with 12 additions and 22 deletions

16
TODO
View File

@ -2,19 +2,9 @@
or was it 1703?
- try
$ vips cast babe.jpg x.v ushort
$ vips colourspace x.v x2.v labs
$ vips avg x2.v
27.403674
$ vips cast babe.jpg x.v short
$ vips colourspace x.v x2.v labs
$ vips avg x2.v
10356.775608
how odd! cause of break in test suite
- colour ops are doing cast + extract_band before every op ... for something
like colourspace, which can potentially do many ops in a row, this is a LOT
of casting and extracting
- could load pdf thumbnails?

View File

@ -15,6 +15,8 @@
* - cut about to make sRGB2scRGB.c
* 12/2/15
* - add 16-bit alpha handling
* 26/2/16
* - look for RGB16 tag, not just ushort, for the 16-bit path
*/
/*
@ -188,7 +190,7 @@ vips_sRGB2scRGB_build( VipsObject *object )
if( vips_check_bands_atleast( class->nickname, in, 3 ) )
return( -1 );
format = in->BandFmt == VIPS_FORMAT_USHORT ?
format = in->Type == VIPS_INTERPRETATION_RGB16 ?
VIPS_FORMAT_USHORT : VIPS_FORMAT_UCHAR;
if( vips_cast( in, &t[0], format, NULL ) )
return( -1 );
@ -256,11 +258,11 @@ vips_sRGB2scRGB_init( VipssRGB2scRGB *sRGB2scRGB )
* @out: output image
* @...: %NULL-terminated list of optional named arguments
*
* Convert an sRGB image to scRGB. The input image can be 8 or 16-bit
* unsigned int.
* Convert an sRGB image to scRGB. The input image can be 8 or 16-bit.
*
* If the input image is unsigned 16-bit, any extra channels after RGB are
* divided by 256. Thus, scRGB alpha is always 0 - 255.99.
* If the input image is tagged as #VIPS_INTERPRETATION_RGB16, any extra
* channels after RGB are divided by 256. Thus, scRGB alpha is
* always 0 - 255.99.
*
* See also: vips_scRGB2XYZ(), vips_scRGB2sRGB(), vips_rad2float().
*

View File

@ -97,7 +97,9 @@ class TestConvolution(unittest.TestCase):
def setUp(self):
im = Vips.Image.mask_ideal(100, 100, 0.5, reject = True, optical = True)
self.colour = im * [1, 2, 3] + [2, 3, 4]
self.colour = self.colour.copy(interpretation = Vips.Interpretation.SRGB)
self.mono = self.colour.extract_band(1)
self.mono = self.mono.copy(interpretation = Vips.Interpretation.B_W)
self.all_images = [self.mono, self.colour]
self.sharp = Vips.Image.new_from_array([[-1, -1, -1],
[-1, 16, -1],
@ -223,10 +225,6 @@ class TestConvolution(unittest.TestCase):
for sigma in [0.5, 1, 1.5, 2]:
im = im.cast(fmt)
if im.bands == 3:
im = im.copy(interpretation = Vips.Interpretation.SRGB)
elif im.bands == 1:
im = im.copy(interpretation = Vips.Interpretation.B_W)
sharp = im.sharpen(sigma = sigma)
# hard to test much more than this