/*
* processimage.c
*
* Definition of the function
*
* processImage() --
* Find the pixel counts, centers-of-mass, and bounding boxes of
* all the four-connected components in a binary (black-or-white)
* image.
* The "components" consist of regions of white pixels (with value
* 0xff). "Four-connected" means that white pixels that border
* horizontally or vertically are in the same component. There is
* no connection between obliquely neighbouring pixels.
*
* This file also documents the algorithm used by the function
* 'processImage()' [see "ALGORITHM" below].
*
*/
/****************************************************************************
* DEPENDENCIES:
*
* +------------------------------------------------------------+
* | main.c = main() function |
* +-------------------------------------------+----------------+
* | processimage.c, .h | testdata.c, .h |
* +---------------+--------------+------------+ |
* | lineacc.c,.h | lmap.c,.h | debug.c,.h | |
* +---------------+--------------+------------+----------------+
* | image.h, comp.h, label.h |
* +------------------------------------------------------------+
*
* A block uses the blocks vertically below it.
*
* processimage.c --- This file, contains the algorithm.
*
* lineacc.c, .h }-- Auxiliary datastructures used by processimage.c
* lmap.c, .h } for holding temporary data
*
* debug.c, .h --- Optional debug output to stdout
*
* testdata.c, .h --- Testcases, used by main.c
*
* image.h }-- Basic datastructures and typedefs used "everywhere"
* comp.h }
* label.h }
*
*/
/****************************************************************************
* ALGORITHM:
*
* >> Basic idea
*
* The algorithm is derived from the algorithms for "Connected Components
* Labeling" that I read about in Haralick and Shapiro, _Computer and
* Robot Vision_, Addison-Wesley, 1992.
*
* Connected Components Labeling means that each white pixel in the image
* is labeled with an integer ID number (called a LABEL) that is unique for
* one of the connected components. (Each component consisting of four-
* connected white pixels.)
* We start with an unlabeled image (all labels having the value
* LABEL_EMPTY). At the end, each white pixel should have a valid
* (nonempty) label value (the black pixels remain unlabeled).
*
* The basic idea of the algorithm is as follows. We run through the
* image line by line ("line" = horizontal row of pixels) from top to
* bottom, going through each individual line from left to right. During
* this loop, in each still-unlabeld white pixel P we encounter, we
* give this pixel P a label number derived from the labels on the two
* immediate neighbour pixels of P that were already processed in the loop,
* i.e. the left and upper neighbours of P.
* When our white pixel P (that is to be labeled) has no white pixels as
* its left of upper neighbours, then we give the pixel P a new unused
* label number. When our white pixel P does have a white pixel neighbour
* to its left or above it, then we propagate the label of this neighbour
* into our pixel P.
*
* Example input image:
* +---+---+---+---+---+---+
* | X | X | X | . | X | . | X = white pixel
* +---+---+---+---+---+---+ . = black pixel
* | . | . | X | . | X | . |
* +---+---+---+---+---+---+
* | . | . | X | X | X | . |
* +---+---+---+---+---+---+
*
* Running the labeling algorithm described above and issuing new label
* numbers in order 1, 2, ... produces:
* +---+---+---+---+---+---+
* | 1 | 1 | 1 | . | 2 | . |
* +---+---+---+---+---+---+
* | . | . | 1 | . | 2 | . |
* +---+---+---+---+---+---+
* | . | . | 1 | 1 | P | . |
* +---+---+---+---+---+---+
*
* In the last diagram, we see the case arising that the white pixel P
* to be labeled has a left AND an upper white-pixel neighbour with
* different label numbers. For Connected Components Labeling, where
* an explicit labeled output image is desired (with a unique label for each
* component), this case is troublesome, and needs at least a second pass
* over the image.
* However, as explained in detail below, for OUR purpose the above case
* turns out not to be troublesome; and for our purpose it turns out to
* be sufficient to iterate ONCE (and only once) over the input image in
* a way similar Connected Components Labeling, without storing or creating
* the complete labeled image that is the output of Connected Components
* Labeling.
*
*
* >> The output quantities we have to compute
*
* OUR GOAL is not the labeled output image that is created by
* Connected Components Labeling. Our goal is simpler, namely we only
* have to determine for each connected component: the number of pixels
* in it, its bounding box, and its center of mass.
*
* The total number of pixels in the component and the bounding box
* can be determined by a function that is fed all the pixels of the
* component one by one, where the function keeps "running sums" for
* the total and for min_x, min_y, max_x, max_y.
*
* The center of mass (xC,yC) of a component is
*
* xC = SUM_i [ x_i ] / n
* yC = SUM_i [ y_i ] / n
*
* where i in the sum runs over all pixels in the component, and where
* n is the total number of pixels in the component. The two sums
*
* xS = SUM_i [ x_i ]
* yS = SUM_i [ y_i ]
*
* can also be computed by a function that is fed all the pixels of the
* component one by one, keeping running sums for xS and yS. Just before
* the data for the component is written to output, we divide the sums xS
* and yS by n to obtain the desired (xC,yC).
*
* Therefore all output quantities we have to compute can be determined
* by a counting function that is given the pixels of a the components one
* by one. This function needs a memory, namely for each component it needs
* a separate set of running sums (for n, min_x, min_y, max_x, max_y, xS,
* and yS). My implementation uses comp_t for these running sums for
* a component (using bary_x and bary_y for holding xS and yS).
*
* All our output quantities can therefore be determined by looping once
* through the image pixels, and for each pixel giving the counting function
* the pixel's x and y, and its label (so that the function knows which
* counts to add to.)
*
*
* >> The component counting algorithm (introduction)
*
* For our purposes, we do NOT need to create the complete labeled image
* that is the output of Connected Components Labeling. The only thing we
* need the labels for is to identify which set of running sums (comp_t)
* each white pixel that we encounter should be added to as we loop through
* the pixels of the image. Consider again the earlier example:
* +---+---+---+---+---+---+
* | 1 | 1 | 1 | . | 2 | . |
* +---+---+---+---+---+---+ In P:
* | . | . | 1 | . | 2 | . | n1 = 6
* +---+---+---+---+---+---+ n2 = 2
* | . | . | 1 | 1 | P | . |
* +---+---+---+---+---+---+
*
* As we iterate through the pixels from left to right and from top
* to bottom, we add the labeled pixels (1, 2) to the comp_t running
* sums for the two components 1 and 2 respectively. The diagram
* above shows also the values of the running totals
* n1 = comp_t::pixels for component 1 and n2 = comp_t::pixels for
* component 2, just before reaching pixel P.
*
* Arriving at pixel P, where the left and upper neighbours have different
* labels, we discover that labels 1 and 2 are not for different components
* but for the same component. All we have to do here is to add the running
* sums (comp_t) for the components 1 and 2 together, store the result
* under one of these labels (in my implementation I always take the left
* label, here "1"), and continue processing with the label of the combined
* component (1). Component 2 has now been merged into the component 1,
* and the comp_t for label 2 should be thrown away.
*
* Finally we of course also have to add pixel P itself to the comp_t of
* the merged component 1.
*
* The result is:
* +---+---+---+---+---+---+
* | 1 | 1 | 1 | . | 2 | . |
* +---+---+---+---+---+---+ In P:
* | . | . | 1 | . | 2 | . | n1 = 6 + 2 + 1 = 9
* +---+---+---+---+---+---+
* | . | . | 1 | 1 | 1 | . |
* +---+---+---+---+---+---+
* We see that after processing pixel P, we have arrived at the correct
* count n1 = comp_t::pixel for the connected component.
*
*
* >> The component counting algorithm (completed)
*
* In the last diagram above, the labels "2" on the pixels above P are wrong.
* For that example image, this is irrelevant because in that example,
* these labels are not "visible" anymore (not inspected anymore by to the
* algorithm) after we proceed to the next pixel after P (if we only take
* one loop over the image).
*
* The next example shows that we can not simply forget about labels in the
* row above, and shows that the algorithm needs a small extra ingredient:
*
* +---+---+---+---+---+---+---+
* | X | X | . | X | X | X | . | X = white pixel
* +---+---+---+---+---+---+---+ . = black pixel
* | . | X | . | X | . | X | . |
* +---+---+---+---+---+---+---+
* | . | X | X | X | . | X | X |
* +---+---+---+---+---+---+---+
*
* After running our algorithm (as described so far) up to the pixel in the
* middle of the bottom row (pixel P) we have:
*
* +---+---+---+---+---+---+---+
* | 1 | 1 | . | 2 | 2 | 2 | . | At P:
* +---+---+---+---+---+---+---+ n1 = 5
* | . | 1 | . | 2 | . | 2 | . | n2 = 5
* +---+---+---+---+---+---+---+
* | . | 1 | 1 | P | . | X | X |
* +---+---+---+---+---+---+---+
*
* Pixel P now gets label "1", and we add the counts for component 2
* into the counts for component 1:
*
* +---+---+---+---+---+---+---+
* | 1 | 1 | . | 2 | 2 | 2 | . |
* +---+---+---+---+---+---+---+ At Q:
* | . | 1 | . | 2 | . | 2 | . | n1 = 5 + 5 + 1 = 11
* +---+---+---+---+---+---+---+
* | . | 1 | 1 | 1 | . | Q | X |
* +---+---+---+---+---+---+---+
*
* Continuing to the next white pixel, Q, we encounter in the pixel above
* Q the label "2", which is the label of a component that has been merged
* into the combined component 1.
*
* At pixel Q, the algorithm needs one more piece of information, namely
* the information that label 2 is no longer in use, and belongs to a
* component that has been merged into component 1. That is, we need the
* information that label 2 has to be mapped to label 1.
*
* This information must be created at pixel P. At that pixel, we have
* to register (store) the information that label 2 no longer has its own
* comp_t, and instead maps to label 1. Every time we merge the comp_t
* of label K into the comp_t of label L, we have to register the mapping
*
* label K --> label L
*
* of a label (K) that no longer has a comp_t, to the label (L) which does
* have a comp_t and into which which the component K has been merged.
* The algorithm therefore has to keep a list of mappings. This set
* of mappings is called the "set of equivalences" or the "equivalence
* mapping list".
*
* Considering again pixel Q, we see that every time we inspect a pixel's
* upper neighbour and retrieve its label (idUp), we have to look up this
* label in the equivalence mapping list, and (of found) replace it by the
* label it maps to.
*
* At pixel Q we therefore retrieve the mapping 2 --> 1 from the
* equivalence mapping list. With this new label number, we then
* continue normal processing: We give to pixel Q the label 1, and add
* the pixel to the comp_t of component 1.
*
* This ensures that the labels of all components that have been merged
* into other components have an entry in the equivalence mapping list.
* And it ensures that any time a label L is encountered that that no longer
* has a comp_t, it is replaced by the label of a component that does have
* a comp_t, namely the component into which L has been merged.
*
* The equivalence mapping list tells us how to remap the "old" labels in
* the line above to the latest current labels, i.e. the labels containing
* the comp_t in which the counts are stored. The equivalence mapping list
* "reroutes" things so that counts are added to the correct newest labels,
* instead of added to "old" labels.
*
* This gets us the correct counts (number of pixels, bounding box, xS,
* yS) for each connected component after ONE single loop through the image.
*
*/
/*****************************************************************************
* ALGORITHM OPTIMIZATIONS AND IMPLEMENTATION
*
* >> Algorithm optimization 1: lineAcc
*
* The diagrams for the examples above suggested that a separate image is
* used to store the label numbers for each pixel. However this is not
* necessary for our purpose. We only loop through the image once, line
* by line. At any pixel P during the loop, we only need the label of
* the pixel left of P and the label of the pixel above P. Therefore it
* suffices for us to store only ONE LINE of label data.
*
* I use an array of label numbers, called the "line accumulator" or lineAcc,
* of the same size as the width of the image. The input image is processed
* line by line, the lineAcc moving from top to bottom over the input image:
*
* +---------------------------+
* | |
* | |
* +-----------------------------+ |
* | | | lineAcc
* +-----------------------------+ V
* | |
* | |
* | |
* | |
* | |
* +---------------------------+
*
* lineAcc is empty initially (all elements set to LABEL_EMPTY).
*
* The lineAcc is moved over the image line by line, top to bottom. In
* each successive vertical position of lineAcc, the pixels of that
* line of the image are iterated through left to right.
*
* Consider the algorithm in pixel P, at pixel coordinates (iX,iY). The
* elements left of iX in lineAcc have been overwritten by the labels
* of the pixels on line iY left of pixel P. The elements in lineAcc
* at iX and further to the right still hold the labels of the pixels
* in the line iY-1 above. (lineAcc is represented by the hashes #.)
*
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---##################
* | | | # # # # # lineAcc
* ###############################
* lineAcc # # # # P | | | | --- iY
* ##############---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* |
* iX
*
* The new label value Lp of pixel P is determined from the label of the
* pixel to its left (which is lineAcc[iX-1], and from the label of the
* pixel above, which at that point still sits in lineAcc[iX], left from
* the processing of the previous line. The label value Lp is then
* written into lineAcc[iX], overwriting its previous contents (which
* isn't needed anymore). Then we advance to the next pixel Q on the
* right and repeat.
*
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---##############
* | | | | # # # # lineAcc
* ###############################
* lineAcc # # # # # Q | | | --- iY
* ##################---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* | | | | | | | |
* +---+---+---+---+---+---+---+
* |
* iX
*
*
** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
*
* >> Algorithm optimization 2:
* lm_t, and Recycling of label numbers
*
* The labels only serve as references to the data associated with
* the label. This data consists of:
*
* - The comp_t data of the component, holding the running
* sums for the quantities to be computed for the component
*
* - The equivalence mapping of the label to another label.
*
* For the algorithm to be fast, it is necessary that given a label number,
* the data for the label can be accessed very quickly.
*
* Logically, the functionality needed is a so-called MAP datastructure
* (associative array) where the key into the map is the label number, and
* where the data values stored for a given key are the ones listed above.
*
* The fastest map implementation for integer keys is a simple array,
* where the array index is the integer key.
*
* Images can be very big, with many components, and many instances of
* merging components. This can lead to "sparse" sets of labels for which
* data needs to be stored, even if the new labels issued are successive
* integers. This would make the array implementation wasteful of memory
* space, and would suggest that a more elaborate map implementation,
* e.g. with balanced trees, would be better. However, the latter lose
* some of the unmatched speed of the array.
*
* The array implementation can however be made to work, if we recycle
* label numbers. "Recycle" means that during the processing of the image,
* once it is certain the label number of a component is no longer needed,
* we reuse the label number for a new component. This limits the number
* different label numbers drastically (compared to using the successive
* integers 1, 2, ... for each new component encountered, throughout
* the image).
*
* In our line-by-line processing, we determine the labels of the
* current line from the labels of two lines, namely the current line
* and the line above it. Therefore largest number of DIFFERENT labels
* that the algorithm can have to keep account at any time is the
* maximum number of different components that can fit inside two
* successive lines. The diagram below shows that for our
* four-connected components, this maximum number equals the width
* SizeX of the image in pixels.
*
* +---+---+---+---+---+---+---+
* | X | . | X | . | X | . | X | X = white pixel
* +---+---+---+---+---+---+---+ . = black pixel
* | . | X | . | X | . | X | . |
* +---+---+---+---+---+---+---+
* | |
* |<-------SizeX pixels------>|
*
* All the label numbers that were once used in the lines further above
* can no longer influence the algorithm, and can therefore be recycled
* for use for new components. Before reusing an old label number, its
* comp_t (when not empty) is written to output as a completed component,
* and then cleared so that the new component starts with zero counts.
*
* With label recycling, SizeX different labels therefore suffice, which
* makes the array implementation very feasible (an array of size SizeX).
*
* Recycling of label numbers means that lists of free and used labels
* must be kept. The "used-list" must be random-access, i.e. it must
* be possible to return ANY of the of the used labels to the free list.
*
* Since the array implementation is the fastest possible, I chose to use
* that implementation. I use one and the same array for the for the map
* from labels to comp_t, for the equivalence mapping list (which is a map
* from labels to labels), and also for the free-list and the used-list.
* The resulting data structure I called lm_t (for Label Map), and is
* implemented in lmap.h and lmap.c. The header file 'lmap.h' contains
* documentation about the implementation details,
*
*/
#include "comp.h" /* comp_t */
#include "image.h" /* image_t, pix_t, pixel values. */
#include "label.h" /* label_t, LABEL_EMPTY */
#include "lineacc.h" /* lineAcc_t */
#include "lmap.h" /* lm_t */
#include "debug.h" /* DBG_print(), DBG_xxx_dump() */
#include
/* Advance declaration */
static
void processOneLine(
pix_t const * in_pStartOfLine,
int in_sizeX,
int in_iY,
lineAcc_t * io_pLineAcc,
lm_t * io_pLabelMap,
void (*usrOutComp)( comp_t const * ) );
/*
* processImage()
* -- Find the pixel counts, centers-of-mass, and bounding boxes of
* all the four-connected components in a binary (black-or-white)
* image.
*
* Parameters:
*
* in_pImg = The input image. Pixels must have value
* either 0x00 (blank) or 0xff (component pixel).
*
* io_pLineAcc = Pointer to lineAcc_t instance. Used internally
* in the function to hold component label IDs for
* the pixels in one line.
*
* io_pLabelMap = Pointer to lm_t instance. Used internally in the
* function to store all data for each component label.
*
* usrOutComp = User-defined function. Called once for each
* completed component. This is how the output
* components are transferred to the caller.
*
* io_pLineAcc and io_pLabelMap are only used internally by the function,
* and on exit do not contain data useful to the caller. However they must
* be instantiated by the caller (see accompanying 'main()' function). I
* chose to have the caller instantiate them because for images of the same
* width, both datastructures can be reused, which saves the allocation
* and deallocation time. Thus for processing a set of images (all of
* the same width), the user allocates io_pLineAcc and io_pLabelMap once,
* then processes all his images, and then deallocates the two temporary
* datastructures.
*
* If a more user-friendly function is desired for processing just one
* single image, then it's trivial to write wrapper function that takes only
* the two parameters in_pImg and usrOutComp, and that (instead of the
* input-output parameters io_pLineAcc and io_pLabelMap) has local lineAcc_t
* and lm_t variables and (de)allocates these locally.
*
*/
void processImage(
image_t const * in_pImg,
lineAcc_t * io_pLineAcc,
lm_t * io_pLabelMap,
void (*usrOutComp)( comp_t const * ) )
{
/* Begin the processing of the image with cleared LabelMap and
* LineAccumulator.
*/
lm_init( io_pLabelMap );
lineAcc_init( io_pLineAcc );
/*
* Loop over the successive horizontal lines of the image.
*
* Each iteration advances the lineAccumulator to the next line
* of the input image.
*/
pix_t const * pStartOfLine = in_pImg->data; /*init*/
int iY;
for ( iY = 0; iY < in_pImg->size_y; iY++ )
{
DBG_imgLine_dump( pStartOfLine, in_pImg->size_x );
/* Compute the value of lineAcc for this line (= iY) from
* - The current input line, and
* - The previous contents of lineAcc (containing the information
* from the upper part of the image needed for this line).
*/
processOneLine(
pStartOfLine,
in_pImg->size_x,
iY,
io_pLineAcc, io_pLabelMap,
usrOutComp );
DBG_lineAcc_dump( io_pLineAcc );
/*
* Purge from labelMap all labels that are no longer used,
* and write completed components to output.
*/
lm_purgeOldLabels( io_pLabelMap, iY, usrOutComp );
/* Advance to the next line of input pixels.
*/
pStartOfLine += in_pImg->size_x;
}
/*
* Finally: Write all components remaining in labelMap to output.
*/
lm_purgeOldLabels(
io_pLabelMap,
in_pImg->size_y, /*past the last line*/
usrOutComp );
}
/*
* processOneLine()
* -- Process one horizontal line of the input image.
*
* Used by: 'processImage()'.
*
* This function takes the following inputs:
*
* - The previous lineAcc, holding data for the previous line in the image;
* - The pixel data of the current line in the input image.
*
* and from this information, it updates lineAcc so that it lineAcc now
* holds data for the current line of the input image.
*
* The function moves from left to right through the input line; in each
* pixel considering the pixel left to it, and the pixel above it (stored
* in the input lineAcc).
*
* During this processing, the function also updates the data for the
* connected components (stored in io_pLabelMap) : it adds the scanned
* pixels to components, merges previous components together, and creates
* new labels and new components.
*
* Parameters:
*
* in_pStartOfLine = The input line.
* in_sizeX = Nr of pixels in input line.
* in_iY = Current line number within image
*
* io_pLineAcc = Pointer to lineAcc_t instance. On entering the
* function, this holds the lineAcc for the previous
* line. The function replaces its contents so that
* on exit it contains the data for the current line.
*
* io_pLabelMap = Pointer to lm_t instance, used to store all data
* for each component label, throughout the processing
* of the image.
*
* usrOutComp = User-defined function, for writing each completed
* component to output.
*/
static
void processOneLine(
pix_t const * in_pStartOfLine,
int in_sizeX,
int in_iY,
lineAcc_t * io_pLineAcc,
lm_t * io_pLabelMap,
void (*usrOutComp)( comp_t const * ) )
{
int iX;
for ( iX = 0; iX < in_sizeX; iX++ )
{
pix_t pixThis = in_pStartOfLine[iX];
if ( pixThis == PIX_BLACK )
{
lineAcc_put( io_pLineAcc, iX, LABEL_EMPTY );
}
else
if ( pixThis == PIX_WHITE )
{
/* Get the label idLeft of the pixel to the left. */
label_t idLeft = LABEL_EMPTY; /*init*/
if ( iX > 0 )
{
idLeft = lineAcc_get( io_pLineAcc, iX-1 );
}
/* Get the label idUp of the pixel above. */
label_t idUp = lineAcc_get( io_pLineAcc, iX );
/*
* Determine, from idLeft and idUp, the label 'idThis'
* of the current pixel.
*/
label_t idThis = LABEL_EMPTY; /*init*/
if ( (idLeft == LABEL_EMPTY) && (idUp == LABEL_EMPTY) )
/* Both Left and Up are black pixels
* ==> Give the current pixel a new label.
*/
{
idThis = lm_getFreeLabel( io_pLabelMap );
}
else
if ( idLeft == LABEL_EMPTY )
/* Left is black and Up is white
* ==> Propagate label idUp of Upper pixel.
* If idUp is in the equivalence mapping list, then
* first replace idUp by its equivalence.
*/
{
idThis = idUp;
/* Lookup idUp in Equivalences-List; replace by equivalence. */
label_t idEq = lm_getEquiv( io_pLabelMap, idUp );
if ( idEq != LABEL_EMPTY )
{
DBG_print( "\tiX=%d: retr %u --> %u\n", iX, idUp, idEq );
idThis = idEq;
}
}
else
if ( idUp == LABEL_EMPTY )
/* Left is white and Up is black
* ==> Propagate label idLeft of Left pixel.
*/
{
idThis = idLeft;
}
else
/* Both Left and Up are white pixels.
* ==> If idLeft != idUp, then merge component idUp into
* component idleft, and register that label idUp now
* maps to label idLeft.
* Unconditionally, the current pixel always gets the
* label idLeft.
*/
{
if ( idLeft != idUp )
{
DBG_print( "\tiX=%d: AddEq %u --> %u\n", iX, idUp, idLeft );
/* Remember that idUp is the same component as idLeft. */
lm_addEquiv( io_pLabelMap, idUp, idLeft );
/* Add all counts of component idUp into component idLeft;
* then delete component idLeft from the component list.
*/
lm_mergeCompIntoComp(
io_pLabelMap,
idLeft/*dst*/,
idUp /*src*/ );
}
idThis = idLeft;
}
/* Assign the label value idThis to the current pixel.
*/
lineAcc_put( io_pLineAcc, iX, idThis );
/* Add the current pixel to the comp_t for component 'idThis'.
*/
lm_addPixelToComp( io_pLabelMap, idThis, iX, in_iY );
/* Mark label 'idThis' as used in this line 'in_iY'.
* (On each label, we store the line number where the label
* was last seen. This allows us to detect label numbers that
* are no longer in use.)
*/
lm_updateLineNr( io_pLabelMap, idThis, in_iY );
}
else
{ /* Error: illegal pixel value in input */
assert( 0 );
}
}/*iX*/
}