#include "linalg.h"
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <stdio.h>

typedef struct {
    double summand;
    double correction;
} positive_sum_t;

void positive_sum_init( positive_sum_t *sum, double initial_value ) {
    assert( initial_value >= 0.0d );

    sum->summand = initial_value;
    sum->correction = 0.0d;
}

double positive_sum_value( positive_sum_t *sum ) {
    return sum->summand;
}

void positive_sum( positive_sum_t *sum, double addend ) {
    double corrected_smaller, corrected_sum;
    assert( (addend >= 0.0d) || isnan( addend ) );

    if ( sum->summand < addend ) {
        corrected_smaller = sum->summand - sum->correction;
        corrected_sum = addend + corrected_smaller;
        sum->correction = (corrected_sum - addend) - corrected_smaller;
    } else {
        corrected_smaller = addend - sum->correction;
        corrected_sum = sum->summand + corrected_smaller;
        sum->correction = (corrected_sum - sum->summand) - corrected_smaller;
    }

    sum->summand = corrected_sum;
}

typedef struct {
    double summand[2];
    double correction[2];
} mixed_sum_t;

void mixed_sum_init( mixed_sum_t *sum, double initial_value ) {
    if ( initial_value > 0 ) {
        sum->summand[0] = initial_value;
        sum->summand[1] = 0.0d;
    } else {
        sum->summand[0] = 0.0d;
        sum->summand[1] = initial_value;
    }

    sum->correction[0] = 0.0d;
    sum->correction[1] = 0.0d;
}

double mixed_sum_value( mixed_sum_t *sum ) {
    return (sum->summand[0] + sum->summand[1])
       - (sum->correction[0] + sum->correction[1]);
}

void mixed_sum( mixed_sum_t *sum, double addend ) {
    double corrected_smaller, corrected_sum;
    size_t index = (addend > 0) ? 0 : 1;

    if ( fabs( sum->summand[index] ) < fabs( addend ) ) {
        corrected_smaller = sum->summand[index] - sum->correction[index];
        corrected_sum = addend + corrected_smaller;
        sum->correction[index] = (corrected_sum - addend) - corrected_smaller;
    } else {
        corrected_smaller = addend - sum->correction[index];
        corrected_sum = sum->summand[index] + corrected_smaller;
        sum->correction[index] = (corrected_sum - sum->summand[index]) - corrected_smaller;
    }

    sum->summand[index] = corrected_sum;
}

/**********************************************************
 * Generate a random number
 *
 * Preconditions:
 *   - None
 *
 * Generate a random number that is either
 *   1. uniformly distributed on the interval (0, 1)
 *   2. uniformly distributed on the interval (-1, 1)
 *   3. standard-normally distributed
 *        - mean zero with a standard deviation of one
 *        - we use the Box-Muller transform
 **********************************************************/

double get_random( distribution_t dist ) {
    double return_value;
    static size_t n = 0;
    static double r, t;

    switch( dist ) {
        case UNIFORM_POSITIVE:
            do {
                return_value = drand48();
            } while ( return_value == 0.0d );

            break;
        case UNIFORM_SYMMETRIC:
            do {
                return_value = 2.0d*drand48() - 1.0d;
            } while ( return_value == -1.0d );

            break;
        case STANDARD_NORMAL:
            if ( n == 0 ) {
                r = sqrt( -2.0d*log( 1.0d - drand48() ) );
                t = 2.0d*M_PI*drand48();
                n = 1;
                return_value = r*cos(t);
            } else {
                n = 0;
                return_value = r*sin(t);
            }
            break;
        default:
            assert( false );
    }

    return return_value;
}

/**********************************************************
 * Select string to print based on language
 *
 * Preconditions:
 *   - None
 *
 * Select the format of printing based on the language
 * indicated.
 **********************************************************/

int vector_print_language( char *maple, char *matlab, char *c_mathematica, output_t language ) {
    int return_value = 0;

    switch( language ) {
        case MAPLE:
            return_value = printf( maple );
            break;
        case MATLAB:
            return_value = printf( matlab );
            break;
        case C:
        case MATHEMATICA:
            return_value = printf( c_mathematica );
            break;
        default:
            assert( false );
    }

    return return_value;
}

/**********************************************************
 * ****************************************************** *
 * *                                                    * *
 * *               Vector data structure                * *
 * *                                                    * *
 * ****************************************************** *
 **********************************************************/

/**********************************************************
 * Create a vector
 *
 * Preconditions:
 *   - The vector has not been previously initialized.
 *
 * Allocate the memory for the entries of a vector of
 * dimension 'n' but do not initialize any entries.
 **********************************************************/

void vector_init( vector_t *this, size_t n ) {
    this->dimension = n;
    this->a_values = (double *)malloc( n*sizeof( double ) );
}

/**********************************************************
 * Create a constant vector
 *
 * Preconditions:
 *   - The vector has not been previously initialized,
 *     which cannot be verified
 *
 * Allocate the memory for the entries of a vector of
 * dimension 'n' and assign each entry allocate the
 * sclar 'value':
 *
 *     this[j] <- value             for j = 0, ..., N - 1
 **********************************************************/

void vector_init_scalar( vector_t *this, size_t n, double value ) {
    size_t i;

    this->dimension = n;
    this->a_values = (double *)malloc( n*sizeof( double ) );

    for ( i = 0; i < n; ++i ) {
        this->a_values[i] = value;
    }
}

/**********************************************************
 * Create a copy of an existing vector
 *
 * Preconditions:
 *   - 'this' vector has not been previously initialized,
 *     which cannot be verified
 *
 * Allocate the memory for the entries of a vector of
 * dimension 'n' and assign each entry the corresponding
 * entry in 'v':
 *
 *     this[j] <- v[j]              for j = 0, ..., N - 1
 **********************************************************/

void vector_init_vector( vector_t *this, vector_t *v ) {
    size_t i;

    this->dimension = v->dimension;
    this->a_values = (double *)malloc( (v->dimension)*sizeof( double ) );

    for ( i = 0; i < v->dimension; ++i ) {
        this->a_values[i] = v->a_values[i];
    }
}

/**********************************************************
 * Destroy this vector
 *
 * Preconditions:
 *   - The data structure was properly initialized,
 *     which cannot be verified
 *
 * Deallocate the memory allocated for the array of
 * entries, and set the corresponding pointer to NULL.
 **********************************************************/

void vector_destroy( vector_t *this ) {
    free( this->a_values );
    this->a_values = NULL;
}

/**********************************************************
 * Dimension of a vector
 *
 * Preconditions:
 *   - None
 *
 * Return the dimension of the vector.
 **********************************************************/

size_t vector_dimension( vector_t *this ) {
    return this->dimension;
}

/**********************************************************
 * Access an entry of a vector
 *
 * Preconditions:
 *   - The index is less than the dimension of this vector
 *
 *             th
 * Return the i   entry of this vector:  this[i]
 **********************************************************/

double vector_get( vector_t *this, size_t i ) {
    assert( i < this->dimension );
    return this->a_values[i];
}

/**********************************************************
 * Assign an entry in a vector
 *
 * Preconditions:
 *   - The index is less than the dimension of this vector
 *
 *             th
 * Assign the i   entry of this vector the sclar 'value':
 *     this[i] <- value
 **********************************************************/

void vector_set( vector_t *this, size_t i, double value ) {
    assert( this->dimension > i );
    this->a_values[i] = value;
}

/**********************************************************
 * Vector assignment of a vector
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal
 *
 * For each entry in 'this' vector of dimesnion N,
 * assign it the value of the corresponding entry in 'v':
 *     this[j] <- v[j]               for j = 0, ..., N - 1
 **********************************************************/

void vector_vector_assign( vector_t *this, vector_t *v ) {
    size_t i;

    assert( this->dimension == v->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] = v->a_values[i];
    }
}

/**********************************************************
 * Vector assignment of a vector
 *
 * Preconditions:
 *   - None
 *
 * For each entry in 'this' vector of dimension N,
 * assign it the sclar 'value'
 *     this[j] <- value          for j = 0, ..., N - 1
 **********************************************************/

void vector_scalar_assign( vector_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] = value;
    }
}

/**********************************************************
 * Vector-scalar multiplication
 *
 * Preconditions:
 *   - None
 *
 * Multiply each entry of this vector of dimension
 * by 'value', so for each entry,
 *     this[j] <- value * this[j]   for j = 0, ..., N - 1
 **********************************************************/

void vector_scalar_multiply( vector_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] *= value;
    }
}

/**********************************************************
 * Vector-scalar addition
 *
 * Preconditions:
 *   - None
 *
 * Add the sclar 'value' onto each entry of this vector
 * of dimension N, so for each entry,
 *     this[j] <- this[j] + value    for j = 0, ..., N - 1
 **********************************************************/

void vector_scalar_add( vector_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] += value;
    }
}

/**********************************************************
 * Vector-vector addition
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal.
 *
 * Add a the vector 'v' onto this vector, each of
 * dimension N, so for each entry,
 *     this[j] <- this[j] + u[j]     for j = 0, ..., N - 1
 **********************************************************/

void vector_vector_add( vector_t *this, vector_t *v ) {
    size_t i;

    assert( this->dimension == v->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] += v->a_values[i];
    }
}

/**********************************************************
 * Addition of a scalar multiple of a vector to  vector
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal.
 *
 * Add a scalar multiple of the vector 'v' onto this
 * vector (each of dimension N), so for each entry,
 *     this[j] <- this[j] + value*u[j]
 *                                   for j = 0, ..., N - 1
 **********************************************************/

void vector_vector_scalar_add( vector_t *this, vector_t *v, double value ) {
    size_t i;

    assert( this->dimension == v->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] += value*v->a_values[i];
    }
}

/**********************************************************
 * Inner product
 *
 * Preconditions:
 *   - The dimensions of 'u' and 'v' are equal.
 *
 * Calculate the inner product of two vectors,
 * each of dimension N:
 *
 *                N
 *              -----
 *               \
 *     <u, v> =   )   u[j] v[j]
 *               /
 *              -----
 *              j = 1
 **********************************************************/

double inner_product( vector_t *u, vector_t *v ) {
    size_t i;
    mixed_sum_t sum;

    assert( u->dimension == v->dimension );

    mixed_sum_init( &sum, 0.0d );

    for ( i = 0; i < u->dimension; ++i ) {
        mixed_sum( &sum, u->a_values[i]*v->a_values[i] );
    }

    return mixed_sum_value( &sum );
}

/**********************************************************
 * Vector projection
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal.
 *
 * Project 'this' vector onto the vector 'v':
 *
 *             <this, v>
 *     this <- --------- v
 *              <v, v>
 *
 * If 'v' is the zero vector, then
 *         this <- 0
 **********************************************************/

void vector_project( vector_t *this, vector_t *v ) {
    size_t i;
    double multiplier;

    assert( this->dimension == v->dimension );

    multiplier = inner_product( v, v );

    if ( multiplier == 0.0d ) {
        for ( i = 0; i < this->dimension; ++i ) {
            this->a_values[i] = 0.0d;
        }
    } else {
        multiplier = inner_product( v, this )/multiplier;

        for ( i = 0; i < this->dimension; ++i ) {
            this->a_values[i] = multiplier*v->a_values[i];
        }
    }
}

/**********************************************************
 * Vector perpendicularization
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal.
 *
 * Find the perpendicular component of 'this' vector in
 * relation to the vector 'v'.  This is equivalent to
 * subtracting off the projection of 'this' onto 'v' from
 * 'this':
 *
 *                    <this, v>
 *     this <- this - --------- v
 *                     <v, v>
 *
 * If 'v' is the zero vector, then 'this' is unchanged.
 **********************************************************/

void vector_perpendicularize( vector_t *this, vector_t *v ) {
    size_t i;
    double multiplier;

    assert( this->dimension == v->dimension );

    multiplier = inner_product( v, v );

    if ( multiplier != 0.0d ) {
        multiplier = inner_product( v, this )/multiplier;

        for ( i = 0; i < this->dimension; ++i ) {
            this->a_values[i] -= multiplier*v->a_values[i];
        }
    }
}

/**********************************************************
 * Vector norms
 *
 * Preconditions:
 *   - None
 *
 * Given the veoctor u of dimension N, this function
 * calculates one of:
 *
 *
 *     The 1-norm        The mean error
 *      N                     N
 *    -----                 -----
 *     \                  1  \
 *      )   | u  |        -   )   | u  |
 *     /        j         N  /        j
 *    -----                 -----
 *    j = 1                 j = 1
 *      The 2-norm          The 2-norm squared
 *         ------------
 *        / N                     N
 *       /-----                 -----
 *      /  \          2          \          2
 *     /    )   | u  |            )   | u  |
 * \  /    /        j            /        j
 *  \/    -----                 -----
 *        j = 1                 j = 1
 *
 * Root mean squared error   Mean squared error
 *         --------------
 *        /   N                    N
 *       /  -----                -----
 *      / 1  \          2      1  \          2
 *     /  -   )   | u  |       -   )   | u  |
 * \  /   N  /        j        N  /        j
 *  \/      -----                -----
 *          j = 1                j = 1
 *
 *     The infinity norm
 *    max    { | u  | }
 *  j = 1..N       j
 *
 * Note:  The purpose of calculating the two-normed
 *        squared is that this can be more efficiently
 *        calculated than <u, u>.
 **********************************************************/

double vector_norm( vector_t *this, norm_t n ) {
    size_t i;
    double return_value, value;
    positive_sum_t sum;

    positive_sum_init( &sum, 0.0 );

    switch ( n ) {
        case ONE_NORM:
        case MEAN_ERROR:
            for ( i = 0; i < this->dimension; ++i ) {
                positive_sum( &sum, fabs( this->a_values[i] ) );
            }

            return_value = positive_sum_value( &sum );

            if ( n == MEAN_ERROR ) {
                return_value /= (double)(this->dimension);
            }

            break;
        case TWO_NORM:
        case TWO_NORM_SQUARED:
        case ROOT_MEAN_SQUARED_ERROR:
        case MEAN_SQUARED_ERROR:
            for ( i = 0; i < this->dimension; ++i ) {
                positive_sum( &sum, this->a_values[i] * this->a_values[i] );
            }

            return_value = positive_sum_value( &sum );

            if ( (n == ROOT_MEAN_SQUARED_ERROR) || (n == MEAN_SQUARED_ERROR) ) {
                return_value /= (double)(this->dimension);
            }

            if ((n == TWO_NORM) || (n == ROOT_MEAN_SQUARED_ERROR)) {
                return_value = sqrt( return_value );
            }

            break;
       case INFINITY_NORM:

            for ( i = 0; i < this->dimension; ++i ) {
                value = fabs( this->a_values[i] );

                if ( value > return_value ) {
                    return_value = value;
                }
            }

            break;
        default:
            assert( false );
    }

    return return_value;
}

/**********************************************************
 * Normalization
 *
 * Preconditions:
 *   - The 2-norm squared and the mean square error are
 *     not norms, and therefore it is nonsensical to
 *     make such a request
 *
 * Divide each value in the vector by the given norm:
 *
 *                this[j]
 *     this[j] <- --------
 *                ||this||
 *                        *
 *
 * where * is one of 1-, 2- or infinity norms; or the
 * mean error or the root mean squared error.
 *
 * If 'v' is the zero vector, then 'this' is unchanged.
 **********************************************************/

void vector_normalize( vector_t *this, norm_t n ) {
    size_t i;
    double nrm = vector_norm( this, n );

    assert( n != TWO_NORM_SQUARED );
    assert( n != MEAN_SQUARED_ERROR );

    if ( nrm != 0.0d ) {
        for ( i = 0; i < this->dimension; ++i ) {
            this->a_values[i] /= nrm;
        }
    }
}

/**********************************************************
 * Distance between two vectors
 *
 * Preconditions:
 *   - The dimension of u and v are the same
 *
 * Calculate the norm of the difference of the two
 * vectors without explicitly calculating the vector
 * difference.
 *  - This is more efficient in both memory and speed
 **********************************************************/

double vector_distance( vector_t *u, vector_t *v, norm_t n ) {
    size_t i;
    double delta, return_value;
    const size_t dimension = u->dimension;
    positive_sum_t sum;

    assert( u->dimension == v->dimension );

    positive_sum_init( &sum, 0.0d );

    switch ( n ) {
        case ONE_NORM:
        case MEAN_ERROR:
            for ( i = 0; i < dimension; ++i ) {
                positive_sum( &sum, fabs( u->a_values[i] - v->a_values[i] ) );
            }

            return_value = positive_sum_value( &sum );

            if ( n == MEAN_ERROR ) {
                return_value /= (double)(u->dimension);
            }

            break;
        case TWO_NORM:
        case TWO_NORM_SQUARED:
        case ROOT_MEAN_SQUARED_ERROR:
        case MEAN_SQUARED_ERROR:
            for ( i = 0; i < dimension; ++i ) {
                delta = u->a_values[i] - v->a_values[i];
                positive_sum( &sum, delta*delta );
            }

            return_value = positive_sum_value( &sum );

            if ( (n == ROOT_MEAN_SQUARED_ERROR) || (n == MEAN_SQUARED_ERROR) ) {
                return_value /= (double)(u->dimension);
            }

            if ((n == TWO_NORM) || (n == ROOT_MEAN_SQUARED_ERROR)) {
                return_value = sqrt( return_value );
            }

            break;
        case INFINITY_NORM:
            return_value = 0.0d;

            for ( i = 0; i < dimension; ++i ) {
                delta = fabs( u->a_values[i] - v->a_values[i]  );

                if ( delta > return_value ) {
                    return_value = delta;
                }
            }

            break;

        default:
            assert( false );
    }

    return return_value;
}

/**********************************************************
 * Perform the Gram-Schmidt method
 *
 * Preconditions:
 *   - The dimension of the vectors are all equal
 *
 * Perform the Gram-Schmidt method on an array of n
 * vectors producing an array of 'n' orthonormal vectors,
 * some of which may, of course, be zero.
 **********************************************************/

void gram_schmidt( vector_t **vs, size_t n ) {
    size_t i, j;

    for ( i = 0; i < n; ++i ) {
        assert( i == 0 || vs[i]->dimension == vs[0]->dimension );

        for ( j = 0; j < i; ++j ) {
            vector_perpendicularize( vs[i], vs[j] );
        }

        vector_normalize( vs[i], TWO_NORM );
    }
}

/**********************************************************
 * Generate a random vector
 *
 * Preconditions:
 *   - The vector has been initialized.
 *
 * Create a new vector where each entry comes from the
 * given distribution:
 *  UNIFORM_POSITIVE   from ( 0, 1)
 *  UNIFORM_SYMMETRIC  from (-1, 1)
 *  STANDARD_NORMAL    normally distributed with a mean
 *                     of 0 and a standard deviation of 1
 **********************************************************/

void random_vector( vector_t *this, distribution_t dist ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] = get_random( dist );
    }
}

/**********************************************************
 * Print a vector
 *
 * Preconditions:
 *   - None
 *
 * Print a vector so that it is valid input for one
 * of four programming languages, including:
 *   - a Maple column vector,
 *   - a Matlab column vector,
 *   - a Mathematica vector, or
 *   - a initializer for a C array.
 **********************************************************/

int vector_printf( vector_t *this, char *str, output_t language ) {
    int return_value;
    size_t i;

    if ( this->dimension == 0 ) {
        return_value = vector_print_language( "<NULL>", "[]", "{}", language );
    } else {
        return_value = vector_print_language( "<", "[\n ", "{", language );
        return_value += printf( str, this->a_values[0] );

        for ( i = 1; i < this->dimension; ++i ) {
            return_value += vector_print_language( ", ", "\n ", ", ", language );
            return_value += printf( str, this->a_values[i] );
        }

        return_value += vector_print_language( ">:", "];", "};", language );
    }

    return return_value;
}

/**********************************************************
 * ****************************************************** *
 * *                                                    * *
 * *    Sparse matrix data structure                    * *
 * *                                                    * *
 * *        based on the new Yale sparse matrix format  * *
 * *                                                    * *
 * ****************************************************** *
 **********************************************************/

/**********************************************************
 * Sparse-matrix initialization
 *
 * Preconditions:
 *   - None
 *
 * Initialize the sparse matrix
 * of four programming languages, including:
 *   - a Maple column vector,
 *   - a Matlab column vector,
 *   - a Mathematica vector, or
 *   - a initializer for a C array.
 **********************************************************/

/**********************************************************
 * Create a matrix
 *
 * Preconditions:
 *   - The matrix has not been previously initialized.
 *
 * Allocate the memory for the entries of a matirx of
 * dimension 'n' with a maximum of 'N' off-diagonal
 * entries, but do not initialize any diagonal entries.
 **********************************************************/

void matrix_init( sparse_matrix_t *this, size_t n, size_t N ) {
    size_t i;

    this->dimension = n;
    this->max_off_diagonal_values = N;
    this->a_row_indices         = (size_t *)malloc( (n + 1)*sizeof( size_t ) );
    this->a_column_indices      = (size_t *)malloc(    N   *sizeof( size_t ) );
    this->a_diagonal_values     = (double *)malloc(    n   *sizeof( double ) );
    this->a_off_diagonal_values = (double *)malloc(    N   *sizeof( double ) );

    for ( i = 0; i < n; ++i ) {
        this->a_row_indices[i] = 0;
    }

    this->a_row_indices[n] = 0;
}

/**********************************************************
 * Create a diagonal matrix
 *
 * Preconditions:
 *   - The matrix has not been previously initialized,
 *     which cannot be verified
 *
 * Allocate the memory for the entries of a matrix of
 * dimension equal to the dimension of the vector
 * with a maximum of 'N' off-diagonal entries, and assign
 * each diagonal entry the corresponding entry in the
 * vector 'v':
 *
 *     this[j][j] <- v[j]            for j = 0, ..., N - 1
 **********************************************************/

void matrix_init_vector( sparse_matrix_t *this, vector_t *v, size_t N ) {
    size_t i;

    this->dimension = v->dimension;
    this->max_off_diagonal_values = N;
    this->a_row_indices         = (size_t *)malloc( (v->dimension + 1)*sizeof( size_t ) );
    this->a_column_indices      = (size_t *)malloc(         N         *sizeof( size_t ) );
    this->a_diagonal_values     = (double *)malloc(    v->dimension   *sizeof( double ) );
    this->a_off_diagonal_values = (double *)malloc(         N         *sizeof( double ) );

    for ( i = 0; i < v->dimension; ++i ) {
        this->a_row_indices[i] = 0;
        this->a_diagonal_values[i] = v->a_values[i];
    }

    this->a_row_indices[v->dimension] = 0;
}

/**********************************************************
 * Create a constant diagonal matrix
 *
 * Preconditions:
 *   - The matrix has not been previously initialized,
 *     which cannot be verified
 *
 * Allocate the memory for the entries of a matrix of
 * dimension 'n' with a maximum of 'N' off-diagonal
 * entries, and assign each diagonal entry the
 * sclar 'value':
 *
 *     this[j][j] <- value           for j = 0, ..., N - 1
 **********************************************************/

void matrix_init_scalar( sparse_matrix_t *this, size_t n, size_t N, double value ) {
    size_t i;

    this->dimension = n;
    this->max_off_diagonal_values = N;
    this->a_row_indices         = (size_t *)malloc( (n + 1)*sizeof( size_t ) );
    this->a_column_indices      = (size_t *)malloc(    N   *sizeof( size_t ) );
    this->a_diagonal_values     = (double *)malloc(    n   *sizeof( double ) );
    this->a_off_diagonal_values = (double *)malloc(    N   *sizeof( double ) );

    for ( i = 0; i < n; ++i ) {
        this->a_row_indices[i] = 0;
        this->a_diagonal_values[i] = value;
    }

    this->a_row_indices[n] = 0;
}

/**********************************************************
 * Destroy this matrix
 *
 * Preconditions:
 *   - The data structure was properly initialized,
 *     which cannot be verified
 *
 * Deallocate the memory allocated for the arrays of
 * entries, and set the corresponding pointers to NULL.
 **********************************************************/

void matrix_destroy( sparse_matrix_t *this ) {
    // Free the dynamically allocated memory of each of the four arrays
    free( this->a_row_indices );
    free( this->a_column_indices );
    free( this->a_diagonal_values );
    free( this->a_off_diagonal_values );

    // Set the pointers to zero so that they are not accidentally reused
    this->a_row_indices         = NULL;
    this->a_column_indices      = NULL;
    this->a_diagonal_values     = NULL;
    this->a_off_diagonal_values = NULL;
}

/**********************************************************
 * Dimension of a matrix
 *
 * Preconditions:
 *   - None
 *
 * Return the dimension of the matrix.
 **********************************************************/

size_t matrix_dimension( sparse_matrix_t *this ) {
    return this->dimension;
}

/**********************************************************
 * Density of a matrix
 *
 * Preconditions:
 *   - None
 *
 * Return the density of the matrix under the assumption
 * that the diagonal entries are non-zero.
 **********************************************************/

double matrix_density( sparse_matrix_t *this ) {
    size_t i;
    double count;

    count = 0.0d;

    for ( i = 0; i < this->dimension; ++i ) {
        if ( this->a_diagonal_values[i] != 0.0d ) {
            ++count;
        }
    }

    return (
        count + (double)(this->a_column_indices[this->dimension])
    )/((double)(this->dimension) * (double)(this->dimension));
}

/**********************************************************
 * Validate the data structure
 *
 * Preconditions:
 *   - None
 *
 * Returns true if the data structure describes a matrix,
 * and false otherwise.
 **********************************************************/

bool is_valid( sparse_matrix_t *this ) {
    bool is_valid;
    size_t i, j;

    is_valid = true;

    if ( (this->dimension == 0) || (this->max_off_diagonal_values < this->a_row_indices[this->dimension]) ) {
        fprintf( stderr, "Too many entries (%d) off the diagonal (max %d)\n",
                 (int)(this->a_row_indices[this->dimension]), (int)(this->max_off_diagonal_values) );
        is_valid = false;
    }

    if ( this->a_row_indices[0] != 0 ) {
        fprintf( stderr, "The first row index entery is %d and not 0\n",
                 (int)(this->a_row_indices[0]) );
        is_valid = false;
    }

    for ( i = 0; is_valid && (i < this->dimension); ++i ) {
        if ( this->a_row_indices[i] > this->a_row_indices[i + 1] ) {
            fprintf( stderr, "Rows %d and %d are out of order: %d %d\n",
                (int)i, (int)(i + 1),
                (int)this->a_row_indices[i], (int)this->a_row_indices[i + 1] );

            is_valid = false;
            break;
        }

        for ( j = this->a_row_indices[i]; j < this->a_row_indices[i + 1]; ++j ) {
            if ( this->a_off_diagonal_values[j] == 0.0d ) {
                fprintf( stderr,
                    "The off-diagonal entry at location (%d, %d) with index %d is zero\n",
                    (int)i, (int)(this->a_column_indices[j]),
                    (int)j );
            }
        }

        for ( j = this->a_row_indices[i] + 1; j < this->a_row_indices[i + 1]; ++j ) {
            if ( this->a_column_indices[j - 1] >= this->a_column_indices[j] ) {
                fprintf( stderr,
                    "In row %d, columns %d and %d are out of order: %d %d\n",
                    (int)i, (int)(j - 1), (int)j,
                    (int)this->a_column_indices[i], (int)this->a_column_indices[i + 1] );

                is_valid = false;
                break;
            }
        }

        if ( this->a_row_indices[i] < this->a_row_indices[i + 1] ) {
            if ( this->a_column_indices[this->a_row_indices[i + 1] - 1]
                   >= this->dimension ) {
                fprintf( stderr,
                    "The last column index %d is beyond the dimension %d\n",
                    (int)(this->a_column_indices[this->a_row_indices[i + 1] - 1]),
                    (int)i );
                is_valid = false;
                break;
            }
        }
    }

    return is_valid;
}

/**********************************************************
 * Access an entry of a matrix
 *
 * Preconditions:
 *   - The indices are less than the dimension of this matrix
 *
 *                 th
 * Return the (i,j)   entry of this matrix:  this[i][j]
 *
 * Note that this algorithm does a linear search on the
 * range row_indices[i] to row_indices[i + 1] - 1.  For
 * reasonably sparse matrices, with fewer than 10 entries
 * per row, this is faster than a binary search.
 **********************************************************/

double matrix_get( sparse_matrix_t *this, size_t i, size_t j ) {
    size_t first, last, k;
    double return_value;

    assert( this->dimension > i );
    assert( this->dimension > j );

    if ( i == j ) {
        return_value = this->a_diagonal_values[i];
    } else {
        first = this->a_row_indices[i];
        last  = this->a_row_indices[i + 1];

        for ( k = first; k < last; ++k ) {
            if ( this->a_column_indices[k] >= j ) {
                return_value = ( this->a_column_indices[k] == j ) ?
                                         this->a_off_diagonal_values[k] : 0.0d;
                break;
            }
        }
    }

    return return_value;
}

/**********************************************************
 * Set an entry of a matrix
 *
 * Preconditions:
 *   - The indices are less than the dimension of this matrix
 *
 *              th
 * Set the (i,j)   entry of this matrix to 'value'
 *
 * This is a potentially expensive algorith, with a runtime
 * of O(max_off_diagonal_values), as potentially all stored
 * values may have to be shifted.  If it is known that the
 * entry being set would already fit into the last position,
 * use the 'matrix_append' function.
 **********************************************************/

void matrix_set( sparse_matrix_t *this, size_t i, size_t j, double value ) {
    size_t k, m;

    assert( this->dimension > i );
    assert( this->dimension > j );

    if ( i == j ) {
        this->a_diagonal_values[i] = value;
    } else {
        for ( k = this->a_row_indices[i]; k < this->a_row_indices[i + 1] && this->a_column_indices[k] < j; ++k ) {
            // do nothing
        }

        if ( k < this->a_row_indices[i + 1] && this->a_column_indices[k] == j ) {
            // We found it!!!  Now replace it...
            if ( value == 0.0d ) {
                for ( m = k + 1; m < this->a_row_indices[this->dimension]; ++m ) {
                    this->a_column_indices[m - 1]      = this->a_column_indices[m];
                    this->a_off_diagonal_values[m - 1] = this->a_off_diagonal_values[m];
                }

                for ( m = i + 1; m <= this->dimension; ++m ) {
                    --( this->a_row_indices[m] );
                }
            } else {
                this->a_off_diagonal_values[k] = value;
            }
        } else {
            // We didn't find it...add it
            if ( value != 0.0d ) {
                if ( this->a_row_indices[this->dimension] > 0 ) {
                    for ( m = this->a_row_indices[this->dimension] - 1; m > k; --m ) {
                        this->a_column_indices[m + 1]      = this->a_column_indices[m];
                        this->a_off_diagonal_values[m + 1] = this->a_off_diagonal_values[m];
                    }

                    this->a_column_indices[k + 1]      = this->a_column_indices[k];
                    this->a_off_diagonal_values[k + 1] = this->a_off_diagonal_values[k];
                } else {
                    assert( k == 0 );
                }

                for ( m = i + 1; m <= this->dimension; ++m ) {
                    ++( this->a_row_indices[m] );
                }

                this->a_off_diagonal_values[k] = value;
                this->a_column_indices[k] = j;
            }
        }
    }
}

/**********************************************************
 * Set the last entry of a matrix
 *
 * Preconditions:
 *   - The indices are less than the dimension of this matrix
 *     and they (i,j) is such that if (i', j') is the last
 *     off-diagonal entry in the array, then either:
 *        i > i', or
 *        i = i' and j > j'
 *
 *              th
 * Set the (i,j)   entry of this matrix to 'value'
 *
 * This is an O(N) algorithm.
 **********************************************************/

void matrix_append( sparse_matrix_t *this, size_t i, size_t j, double value ) {
    size_t k;

    assert( this->dimension > i );
    assert( this->dimension > j );
    assert( this->a_row_indices[this->dimension] <= this->max_off_diagonal_values );

    if ( i == j ) {
        this->a_diagonal_values[i] = value;
    } else {
        assert( this->a_row_indices[i + 1] == this->a_row_indices[this->dimension] );
        assert( (this->a_row_indices[i] == this->a_row_indices[i + 1])
             || (this->a_column_indices[this->a_row_indices[i + 1] - 1] <= j ) );

        if ( (this->a_row_indices[i] < this->a_row_indices[i + 1])
          && (this->a_column_indices[this->a_row_indices[i + 1] - 1] == j) ) {
            // We are either:
            //     removing the last value, or
            //     replacing it.
            if ( value == 0.0d ) {
                for ( k = i + 1; k <= this->dimension; ++k ) {
                    --( this->a_row_indices[k] );
                }
            } else {
                this->a_off_diagonal_values[this->a_row_indices[i + 1] - 1] = value;
            }
        } else {
            if ( value != 0.0d ) {
                this->     a_column_indices[this->a_row_indices[i + 1]] = j;
                this->a_off_diagonal_values[this->a_row_indices[i + 1]] = value;

                for ( k = i + 1; k <= this->dimension; ++k ) {
                    ++( this->a_row_indices[k] );
                }
            }
        }
    }
}

/**********************************************************
 * Assigning a matrix the values of a second matrix
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'A' are equal
 *   - The memory allocated for off-diagonal entries of
 *     'this' is at least as great as the number of off-
 *     diagonal entries of 'A'
 *
 * For each entry in 'this' matrix of dimesnion N,
 * assign it the value of the corresponding entry in 'v':
 *     this[i][j] <- A[i][j]       for i = 0, ..., N - 1
 *                                     j = 0, ..., N - 1
 **********************************************************/

void matrix_matrix_assign( sparse_matrix_t *this, sparse_matrix_t *A ) {
    size_t i;

    assert( this->dimension = A->dimension );
    assert( this->max_off_diagonal_values < A->a_row_indices[this->dimension] );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] = A->a_diagonal_values[i];
        this->a_row_indices[i]     = A->a_row_indices[i];
    }

    this->a_row_indices[this->dimension] = A->a_row_indices[this->dimension];

    for ( i = 0; i < this->a_row_indices[this->dimension]; ++i ) {
        this->a_column_indices[i]      = A->a_column_indices[i];
        this->a_off_diagonal_values[i] = A->a_off_diagonal_values[i];
    }
}

/**********************************************************
 * Assigning a matrix the values of a vector
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal
 *
 * Assign the diagonal entries of the matrix to be those
 * of the vector and set all off-diagonal entries to
 * zero.
 **********************************************************/

void matrix_vector_assign( sparse_matrix_t *this, vector_t *v ) {
    size_t i;

    assert( this->dimension = v->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] = v->a_values[i];
        this->a_row_indices[i]     = 0;
    }

    this->a_row_indices[this->dimension] = 0;
}

/**********************************************************
 * Assigning a matrix a scalar
 *
 * Preconditions:
 *   - None.
 *
 * The diagonal entries of 'this' are set to 'value' and
 * all off-diagonal entries are set to zero:
 *     this[j][j] <- value          for j = 0, ..., N - 1
 **********************************************************/

void matrix_scalar_assign( sparse_matrix_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] = value;
        this->a_row_indices[i]     = 0;
    }

    this->a_row_indices[this->dimension] = 0;
}

/**********************************************************
 * Scalar multiplication of a matrix
 *
 * Preconditions:
 *   - None.
 *
 * All entries in the matrix are multipled by the given
 * scalar 'value'.  If the scalar is 0.0, all off-diagonal
 * entries are set to zero.
 *     this[i][j] <- value*this[i][j]
 *                                  for i = 0, ..., N - 1
 *                                      j = 0, ..., N - 1
 *
 *  - Currently, this does not check if an underflow
 *    occurs, in which case, this would require updating
 *    the data structure to remove any newly formed
 *    zero entries off of the diagonal.
 **********************************************************/

void matrix_scalar_multiply( sparse_matrix_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] *= value;
    }

    if ( value == 0.0d ) {
        for ( i = 0; i <= this->dimension; ++i ) {
            this->a_row_indices[i] = 0;
        }
    } else {
        for ( i = 0; i < this->a_row_indices[this->dimension]; ++i ) {
            this->a_off_diagonal_values[i] *= value;
        }
    }
}

/**********************************************************
 * Adding a matrix to a matrix
 *
 * Preconditions:
 *   - The dimensions of 'this', 'A' and 'B' are equal
 *
 * Replace 'this' matrix by the sum of the matrices
 * 'A' and 'B':
 *     this[i][j] <- A[i][j] + B[i][j]
 *                                  for i = 0, ..., N - 1
 *                                      j = 0, ..., N - 1
 *
 * Note:
 *   - We do not check whether or not there are sufficient
 *     positions available for off-diagonal entries in
 *     'this' matrix.
 *   - There is no equivalent of A += B for matrices as
 *     this algorithm could potentially be either quadratic
 *     in run-time or require a third matrix anyway.
 **********************************************************/

void matrix_matrix_add( sparse_matrix_t *this, sparse_matrix_t *A, sparse_matrix_t *B ) {
    size_t i;
    size_t ka, kb, kc;
    double value;

    assert( this->dimension == A->dimension );
    assert( this->dimension == B->dimension );

    matrix_scalar_multiply( this, 0.0d );

    ka = kb = kc = 0;

    for ( i = 0; i < A->dimension; ++i ) {
        while ( ka < A->a_row_indices[i] && kb < B->a_row_indices[i] ) {
            if ( A->a_column_indices[ka] < A->a_column_indices[kb] ) {
                matrix_append( this, i, A->a_column_indices[ka], A->a_off_diagonal_values[ka] );
                ++ka;
            } else if ( A->a_column_indices[ka] == A->a_column_indices[kb] ) {
                value = A->a_off_diagonal_values[ka] + B->a_off_diagonal_values[kb];

                if ( value != 0 ) {
                    matrix_append( this, i, A->a_column_indices[ka], value );
                }

                ++ka;
                ++kb;
            } else {
                matrix_append( this, i, B->a_column_indices[kb], B->a_off_diagonal_values[kb] );
                ++kb;
            }
        }

        while ( ka < A->a_row_indices[i] ) {
            matrix_append( this, i, A->a_column_indices[ka], A->a_off_diagonal_values[ka] );
        }

        while ( kb < B->a_row_indices[i] ) {
            matrix_append( this, i, B->a_column_indices[kb], B->a_off_diagonal_values[kb] );
        }
    }
}

/**********************************************************
 * Add a vector to the diagonal of a matrix
 *
 * Preconditions:
 *   - The dimensions of 'this' and 'v' are equal.
 *
 * The corresponding entries of the vector are added to
 * the entries on the diagonal of the matrix.
 **********************************************************/

void matrix_vector_add( sparse_matrix_t *this, vector_t *v ) {
    size_t i;

    assert( this->dimension == v->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] += v->a_values[i];
    }
}

/**********************************************************
 * Add a scalar to the diagonal of a matrix
 *
 * Preconditions:
 *   - None.
 *
 * All entries in the matrix are multipled by the given
 * scalar 'value'.  If the scalar is 0.0, all off-diagonal
 * entries are set to zero.
 *  - Currently, this does not check if an underflow
 *    occurs, in which case, this would require updating
 *    the data structure to remove any newly formed
 *    zero entries off of the diagonal.
 **********************************************************/

void matrix_scalar_add( sparse_matrix_t *this, double value ) {
    size_t i;

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_diagonal_values[i] += value;
    }
}

/**********************************************************
 * Determine if a matrix is a diagonal matrix
 *
 * Preconditions:
 *   - None.
 *
 * This is true if there are no off-diagonal entries.
 **********************************************************/

bool is_diagonal( sparse_matrix_t *this ) {
    return ( this->a_row_indices[this->dimension] == 0 );
}

/**********************************************************
 * Determine if a matrix is upper triagular
 *
 * Preconditions:
 *   - None.
 *
 * This is true if all off-diagonal entries have a column
 * index greater than or equal to the row index.
 **********************************************************/

bool is_upper_triangular( sparse_matrix_t *this ) {
    size_t i;
    bool return_value = true;

    for ( i = 1; i < this->dimension; ++i ) {
        if (
            this->a_row_indices[i] < this->a_row_indices[i + 1]
            && this->a_column_indices[this->a_row_indices[i]] < i
        ) {
            return_value = false;
            break;
        }
    }

    return return_value;
}

/**********************************************************
 * Determine if a matrix is lower triagular
 *
 * Preconditions:
 *   - None.
 *
 * This is true if all off-diagonal entries have a column
 * index less than or equal to the row index.
 **********************************************************/

bool is_lower_triangular( sparse_matrix_t *this ) {
    size_t i;
    bool return_value = true;

    for ( i = 1; i < this->dimension; ++i ) {
        if (
            this->a_row_indices[i - 1] < this->a_row_indices[i]
            && this->a_column_indices[this->a_row_indices[i] - 1] > i
        ) {
            return_value = false;
            break;
        }
    }

    return return_value;
}

/**********************************************************
 * Determine if a matrix is symmetric
 *
 * Preconditions:
 *   - None.
 *
 * This is potentially expensive if the matrix is less
 * sparse, as it is necessary to check that each off-
 * diagonal entry has a corresponding entry in transposed
 * position.
 *
 *     this[i][j] = this[j][i]      for i = 0, ..., N - 1
 *                                      j = 0, ..., N - 1
 **********************************************************/

bool is_symmetric( sparse_matrix_t *A ) {
    size_t i, j;
    bool symmetric = true;

    j = 0;

    for ( i = 0; symmetric && (i < A->dimension); ++i ) {
        while ( j < A->a_row_indices[i + 1] ) {
            if ( A->a_off_diagonal_values[j] != matrix_get( A, A->a_column_indices[j], i ) ) {
                symmetric = false;
                break;
            }

            ++j;
        }
    }

    return symmetric;
}

/**********************************************************
 * Determine if a matrix is symmetric
 *
 * Preconditions:
 *   - None.
 *
 * This is potentially expensive if the matrix is less
 * sparse, as it is necessary to check that each off-
 * diagonal entry has a corresponding entry in transposed
 * position.  Separately, we check that all the diagonal
 * entries are zero.
 *
 *     this[i][j] = -this[j][i]     for i = 0, ..., N - 1
 *                                      j = 0, ..., N - 1
 **********************************************************/

bool is_skew_symmetric( sparse_matrix_t *A ) {
    size_t i, j;
    bool return_value = true;

    for ( i = 0; i < A->dimension; ++i ) {
        if ( A->a_diagonal_values[i] != 0.0d ) {
            return_value = false;
            break;
        }
    }

    j = 0;

    for ( i = 0; return_value && i < A->dimension; ++i ) {
        while ( j < A->a_row_indices[i + 1] ) {
            if ( A->a_off_diagonal_values[j] != -matrix_get( A, A->a_column_indices[j], i ) ) {
                return_value = false;
                break;
            }

            ++j;
        }
    }

    return return_value;
}

/**********************************************************
 * Print a matrix
 *
 * Preconditions:
 *   - None
 *
 * Print a matrix so that it is valid input for one
 * of four programming languages, including:
 *   - a Matlab matrix
 *
 * Currently, Maple, Mathematica and C are not
 * implemented...
 **********************************************************/

int matrix_printf( sparse_matrix_t *this, char *str, output_t language ) {
    int return_value;
    size_t i, j, k;

    assert( language == MATLAB );

    if ( this->dimension == 0 ) {
        return_value = printf( "[];" );
    } else {
        k = 0;

        return_value = printf( "[\n " );
        return_value += printf( str, this->a_diagonal_values[0] );

        for ( j = 1; j < this->dimension; ++j ) {
            return_value += printf( " " );

            if ( (k < this->a_row_indices[1]) && (j == this->a_column_indices[k]) ) {
                return_value += printf( str, this->a_off_diagonal_values[k] );
                ++k;
            } else {
                return_value += printf( str, 0.0d );
            }
        }

        for ( i = 1; i < this->dimension; ++i ) {
            return_value += printf( "\n" );

            for ( j = 0; j < this->dimension; ++j ) {
                return_value += printf( " " );

                if ( i == j ) {
                    return_value += printf( str, this->a_diagonal_values[i] );
                } else {
                    if ( (k < this->a_row_indices[i + 1]) && (j == this->a_column_indices[k]) ) {
                        return_value += printf( str, this->a_off_diagonal_values[k] );
                        ++k;
                    } else {
                        return_value += printf( str, 0.0d );
                    }
                }
            }
        }

        return_value += printf( "];" );
    }

    return return_value;
}

/**********************************************************
 * Print the density of the matrix
 *
 * Preconditions:
 *   - None
 *
 * Print matrix as a series of dashes or dots to represent
 * zeros, and 'x' to represent non-zero entries.
 *
 * For example, the matrix
 *     0      0.895  0      0      0      0
 *     0.895  0.082  0      0.608  0.630  0
 *     0      0      0      0.514  0      0
 *     0      0.608  0.514  0.420  0.331  0.691
 *     0      0.630  0      0.331  0      0
 *     0      0      0      0.691  0      0.135
 * would be printed as
 *       . x - - - -
 *       x x - x x -
 *       | | . x - -
 *       | x x x x x
 *       | x | x . -
 *       | | | x | x
 **********************************************************/

int density_print( double d ) {
    int return_value;

    if ( d == 0.0d ) {
        return_value = printf( " ." );
    } else if ( d > 0.0d ) {
        return_value = printf( " x" );
    } else {
        return_value = printf( " o" );
    }

    return return_value;
}

int matrix_density_print( sparse_matrix_t *this ) {
    int return_value;
    size_t i, j, k;

    if ( this->dimension != 0 ) {
        k = 0;

        return_value = density_print( this->a_diagonal_values[0] );

        for ( j = 1; j < this->dimension; ++j ) {
            if ( (k < this->a_row_indices[1]) && (j == this->a_column_indices[k]) ) {
                return_value += density_print( this->a_off_diagonal_values[k] );
                ++k;
            } else {
                return_value += printf( " -" );
            }
        }

        for ( i = 1; i < this->dimension; ++i ) {
            return_value += printf( "\n" );

            for ( j = 0; j < this->dimension; ++j ) {
                if ( i == j ) {
                    return_value += density_print( this->a_diagonal_values[i] );
                } else {
                    if ( (k < this->a_row_indices[i + 1]) && (j == this->a_column_indices[k]) ) {
                        return_value += density_print( this->a_off_diagonal_values[k] );
                        ++k;
                    } else if ( i < j ) {
                        return_value += printf( " -" );
                    } else {
                        return_value += printf( " |" );
                    }
                }
            }
        }

        return_value += printf( "\n" );
    }

    return return_value;
}

/**********************************************************
 * Print a matrix data structure
 *
 * Preconditions:
 *   - None
 *
 * Print a sparse matrix by printing the various arrays.
 * For example, this is a matrix and its display:
 *
 * [0   4   8
 *  1   5   9
 *  2   6  10];
 *
 *           Dimension: 3
 *    Maximum non-zero: 6
 *    Diagonal entries: 0.000000 5.000000 10.000000
 *         Row indices: 0 2 4 6
 *      Column indices: 1 2 0 2 0 1
 * Off-diagonal values: 4.000000 8.000000 1.000000 9.000000 2.000000 6.000000
 **********************************************************/

int matrix_data_structure_printf( sparse_matrix_t *this, char *str ) {
    size_t i;

    printf(   "           Dimension: %d x %d", (int)( this->dimension ), (int)( this->dimension ) );
    printf( "\nOff-diagonal entries: %d (max %d)", (int)( this->a_row_indices[this->dimension] ), (int)( this->max_off_diagonal_values ) );

    printf( "\n    Diagonal entries:" );

    for ( i = 0; i < this->dimension; ++i ) {
        printf( " " );
        printf( str, this->a_diagonal_values[i] );
    }

    printf( "\n         Row indices:" );

    for ( i = 0; i <= this->dimension; ++i ) {
        printf( " %d", (int)( this->a_row_indices[i] ) );
    }

    printf( "\n      Column indices:" );

    for ( i = 0; i < this->a_row_indices[this->dimension]; ++i ) {
        printf( " %d", (int)( this->a_column_indices[i] ) );
    }

    printf( "\n Off-diagonal values:" );

    for ( i = 0; i < this->a_row_indices[this->dimension]; ++i ) {
        printf( " " );
        printf( str, this->a_off_diagonal_values[i] );
    }

    printf( "\n" );

    return 0;
}


/**********************************************************
 * Find a matrix-vector product
 *
 * Preconditions:
 *   - The dimensions of A and u are equal
 *
 * Calculate the matrix-vector product:
 *            n - 1
 *            -----
 *             \
 * this[i] =    )   A[i][j] u[j]    for i = 0, ..., N - 1
 *             /
 *            -----
 *            j = 0
 **********************************************************/

void matrix_vector_multiply( vector_t *this, sparse_matrix_t *A, vector_t *u ) {
    size_t i, j;
    mixed_sum_t sum;

    assert( this->dimension == A->dimension );
    assert( this->dimension == u->dimension );

    j = 0;

    for ( i = 0; i < this->dimension; ++i ) {
        mixed_sum_init( &sum, A->a_diagonal_values[i]*u->a_values[i] );

        for ( ; j < A->a_row_indices[i + 1]; ++j ) {
            mixed_sum( &sum,
                A->a_off_diagonal_values[j]*u->a_values[A->a_column_indices[j]] );
        }

        this->a_values[i] = mixed_sum_value( &sum );
    }
}

/**********************************************************
 * Find a matrix-transposed--vector product
 *
 * Preconditions:
 *   - The dimensions of A and u are equal
 *
 * Calculate the matrix-vector product:
 *            n - 1
 *            -----
 *             \
 * this[i] =    )   A[j][i] u[j]    for i = 0, ..., N - 1
 *             /
 *            -----
 *            j = 0
 **********************************************************/

void matrix_transpose_vector_multiply( vector_t *this, sparse_matrix_t *A, vector_t *u ) {
    size_t i, j;

    assert( this->dimension == A->dimension );
    assert( this->dimension == u->dimension );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] = A->a_diagonal_values[i]*u->a_values[i];
    }

    j = 0;

    for ( i = 0; i < this->dimension; ++i ) {
        for ( ; j < A->a_row_indices[i + 1]; ++j ) {
            this->a_values[A->a_column_indices[j]] +=
                A->a_off_diagonal_values[j]*u->a_values[i];
        }
    }
}

/**********************************************************
 * Solve Ax = b for x when A is a diagonal matrix
 *
 * Preconditions:
 *   - The dimensions of A and b are equal
 *   - The matrix A is diagonal
 *
 * Calculate:
 *    this[i] = b[i]/A[i][i]        for i = 0, ..., N - 1
 **********************************************************/

void diagonal_solve( vector_t *this, sparse_matrix_t *A, vector_t *b ) {
    size_t i;

    assert( this->dimension == A->dimension );
    assert( this->dimension == b->dimension );
    assert( A->a_row_indices[A->dimension] == 0 );

    for ( i = 0; i < this->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }
}

/**********************************************************
 * Solve Ax = b for x when A is an upper-triangular matrix
 *
 * Preconditions:
 *   - The dimensions of A and b are equal
 *   - The matrix A is upper-triangular
 *
 * Perform backward substitution:
 *
 *                      n - 1
 *                      -----
 *                       \
 *            b[i] -      )     A[i][j] this[j]
 *                       /
 *                      -----
 *                    j = i + 1
 *  this[i] = ----------------------------------
 *                       A[i][i]
 *
 *                                  for i = N - 1, ..., 0
 **********************************************************/

void backward_substitute( vector_t *this, sparse_matrix_t *A, vector_t *b ) {
    size_t i, j;
    mixed_sum_t sum;

    assert( this->dimension == A->dimension );
    assert( this->dimension == b->dimension );

    j = A->a_row_indices[A->dimension] - 1;

    for ( i = this->dimension - 1; i < this->dimension; --i ) {
        // Verify that the matrix is upper triangular
        assert( (A->a_row_indices[i] == A->a_row_indices[i + 1])
             || (A->a_column_indices[A->a_row_indices[i]] > i) );

        mixed_sum_init( &sum, b->a_values[i] );
        this->a_values[i] = b->a_values[i];

        for ( ; j < A->a_row_indices[this->dimension] && j >= A->a_row_indices[i]; --j ) {
            mixed_sum( &sum,
                -A->a_off_diagonal_values[j]*this->a_values[A->a_column_indices[j]] );
        }

        this->a_values[i] = mixed_sum_value( &sum )/A->a_diagonal_values[i];
    }
}

/**********************************************************
 * Solve Ax = b for x when A is an lower-triangular matrix
 *
 * Preconditions:
 *   - The dimensions of A and b are equal
 *   - The matrix A is lower-triangular
 *
 * Perform backward substitution:
 *
 *                      i - 1
 *                      -----
 *                       \
 *            b[i] -      )     A[i][j] this[j]
 *                       /
 *                      -----
 *                      j = 0
 *  this[i] = ----------------------------------
 *                       A[i][i]
 *
 *                                  for i = 0, ..., N - 1
 **********************************************************/

void forward_substitute( vector_t *this, sparse_matrix_t *A, vector_t *b ) {
    size_t i, j;
    mixed_sum_t sum;

    assert( this->dimension == A->dimension );
    assert( this->dimension == b->dimension );

    j = 0;

    for ( i = 0; i < this->dimension; ++i ) {
        // Verify that the matrix is lower triangular
        assert( (A->a_row_indices[i] == A->a_row_indices[i + 1])
             || (A->a_column_indices[A->a_row_indices[i + 1] - 1] < i) );

        mixed_sum_init( &sum, b->a_values[i] );
        this->a_values[i] = b->a_values[i];


        for ( ; j < A->a_row_indices[i + 1]; ++j ) {
            mixed_sum( &sum,
                -A->a_off_diagonal_values[j]*this->a_values[A->a_column_indices[j]] );
        }

        this->a_values[i] = mixed_sum_value( &sum )/A->a_diagonal_values[i];
    }
}

/**********************************************************
 * Solve Ax = b using the Jacobi method
 *
 * Preconditions:
 *   - The dimensions of A, b and tmp are equal
 *   - The diagonal entries of A are non-zero
 *
 * Perform the Jacobi method for a maximum of
 * 'max_iterations' iterations
 *  - The vector 'tmp' is for intermediate vectors
 *  - Return if the difference between two consecutive
 *    approximations is less than 'epsilon_step'
 *  - The return value is the number of iterations
 *    performed
 *  - If the algorithm failed to converge, the number of
 *    iterations is 'max_iterations' + 1
 **********************************************************/

size_t jacobi( vector_t *this, sparse_matrix_t *A, vector_t *b,
             vector_t *tmp, size_t max_iterations, double epsilon_step ) {
    size_t i, j, k;
    mixed_sum_t sum;
    double error;

    assert( this->dimension ==   A->dimension );
    assert( this->dimension ==   b->dimension );
    assert( this->dimension == tmp->dimension );

    for ( i = 0; i < b->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }

    // Do we not have to initialize v?

    for ( k = 1; k <= max_iterations; ++k ) {
        for ( i = 0; i < A->dimension; ++i ) {
            mixed_sum_init( &sum, b->a_values[i] );

            for ( j = A->a_row_indices[i]; j < A->a_row_indices[i + 1]; ++j ) {
                mixed_sum( &sum,
                    -A->a_off_diagonal_values[j]*this->a_values[A->a_column_indices[j]] );
            }

            assert( A->a_diagonal_values[i] != 0.0d );
            tmp->a_values[i] = mixed_sum_value( &sum )/A->a_diagonal_values[i];
        }

        error = vector_distance( this, tmp, TWO_NORM );

        if ( isnan( error ) ) {
            k = max_iterations;
        } else if ( error < epsilon_step ) {
            break;
        } else {
            vector_vector_assign( this, tmp );
        }
    }

    return k;
}

/**********************************************************
 * Solve Ax = b using the Gauss-Seidel method
 *
 * Preconditions:
 *   - The dimensions of A, b and tmp are equal
 *   - The diagonal entries of A are non-zero
 *
 * Perform the Gauss-Seidel method for a maximum of
 * 'max_iterations' iterations
 *  - The vector 'tmp' is for intermediate vectors
 *  - Return if the difference between two consecutive
 *    approximations is less than 'epsilon_step'
 *  - The return value is the number of iterations
 *    performed
 *  - If the algorithm failed to converge, the number of
 *    iterations is 'max_iterations' + 1
 **********************************************************/

size_t gauss_seidel( vector_t *this, sparse_matrix_t *A, vector_t *b,
             vector_t *tmp, size_t max_iterations, double epsilon_step ) {
    size_t i, j, k;
    mixed_sum_t sum;
    double error;

    assert( this->dimension ==   A->dimension );
    assert( this->dimension ==   b->dimension );
    assert( this->dimension == tmp->dimension );

    for ( i = 0; i < b->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }

    for ( k = 1; k <= max_iterations; ++k ) {
        vector_vector_assign( tmp, this );

        for ( i = 0; i < A->dimension; ++i ) {
            mixed_sum_init( &sum, b->a_values[i] );

            for ( j = A->a_row_indices[i]; j < A->a_row_indices[i + 1]; ++j ) {
                mixed_sum( &sum,
                    -A->a_off_diagonal_values[j]
                        *this->a_values[A->a_column_indices[j]] );
            }

            assert( A->a_diagonal_values[i] != 0.0d );
            this->a_values[i] = mixed_sum_value( &sum )/A->a_diagonal_values[i];
        }

        error = vector_distance( this, tmp, TWO_NORM );

        if ( isnan( error ) ) {
            k = max_iterations;
        } else if ( error < epsilon_step ) {
            break;
        }
    }

    return k;
}

/**********************************************************
 * Solve Ax = b using the steepest-descent method
 *
 * Preconditions:
 *   - A is symmetric and positive definite
 *   - The dimensions of A, b, r and p are equal
 *   - The diagonal entries of A are non-zero
 *
 * Perform the steepest-descent method for a maximum of
 * 'max_iterations' iterations
 *  - The vectors 'r' and 'p' are for intermediate vectors
 *  - Return if the difference between two consecutive
 *    approximations is less than 'epsilon_step'
 *  - The return value is the number of iterations
 *    performed
 *  - If the algorithm failed to converge, the number of
 *    iterations is 'max_iterations' + 1
 *
 * The algorithm:
 *     Let r <- b -Ax
 *         p <- Ar
 *     Iterate:
 *                  <r, r>
 *         alpha <- ------
 *                  <p, r>
 *         x <- x + alpha*r
 *         r <- r - alpha*p
 *         p <- A*r
 *
 * Reference:  Yousef Saad
 *             Iterative Methods for Sparse Linear Systems
 *             SIAM, Philadelphia, 2003, p.138
 **********************************************************/

size_t steepest_descent( vector_t *this, sparse_matrix_t *A, vector_t *b,
                         vector_t *r, vector_t *p, size_t max_iterations, double epsilon_step ) {
    size_t i, k;
    double alpha, norm_r;

    assert( this->dimension == A->dimension );
    assert( this->dimension == b->dimension );
    assert( this->dimension == r->dimension );
    assert( this->dimension == p->dimension );

    // Initial approximation of the solution
    for ( i = 0; i < b->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }

    matrix_vector_multiply( r, A, this );
    vector_scalar_multiply( r, -1.0d );
    vector_vector_add( r, b );
    matrix_vector_multiply( p, A, r );

    for ( k = 1; k <= max_iterations; ++k ) {
        alpha = inner_product( r, r )/inner_product( p, r );
        vector_vector_scalar_add( this, r, alpha );

        norm_r = vector_norm( r, TWO_NORM );

        if ( isnan( norm_r ) ) {
            k = max_iterations;
        } else if ( fabs( alpha )*norm_r < epsilon_step ) {
            break;
        }

        vector_vector_scalar_add( r, p, -alpha );
        matrix_vector_multiply( p, A, r );
    }

    return k;
}

/**********************************************************
 * Solve Ax = b using the minimum-residual method
 *
 * Preconditions:
 *   - A is positive definite
 *   - The dimensions of A, b, r and p are equal
 *   - The diagonal entries of A are non-zero
 *
 * Perform the minimum-residual method for a maximum of
 * 'max_iterations' iterations
 *  - The vectors 'r' and 'p' are for intermediate vectors
 *  - Return if the difference between two consecutive
 *    approximations is less than 'epsilon_step'
 *  - The return value is the number of iterations
 *    performed
 *  - If the algorithm failed to converge, the number of
 *    iterations is 'max_iterations' + 1
 *
 * The algorithm:
 *     Let r <- b -Ax
 *         p <- Ar
 *     Iterate:
 *                  <r, r>
 *         alpha <- ------
 *                  <p, p>
 *         x <- x + alpha*r
 *         r <- r - alpha*p
 *         p <- A*r
 *
 * Reference:  Yousef Saad
 *             Iterative Methods for Sparse Linear Systems
 *             SIAM, Philadelphia, 2003, p.140
 **********************************************************/

size_t minimal_residual( vector_t *this, sparse_matrix_t *A, vector_t *b,
                       vector_t *r, vector_t *p, size_t max_iterations, double epsilon_step ) {
    size_t i, k;
    double alpha, norm_r;

    assert( this->dimension == A->dimension );
    assert( this->dimension == b->dimension );
    assert( this->dimension == r->dimension );
    assert( this->dimension == p->dimension );

    for ( i = 0; i < b->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }

    matrix_vector_multiply( r, A, this );
    vector_scalar_multiply( r, -1.0d );
    vector_vector_add( r, b );

    matrix_vector_multiply( p, A, r );

    for ( k = 1; k <= max_iterations; ++k ) {
        alpha = inner_product( r, r )/inner_product( p, p );
        vector_vector_scalar_add( this, r, alpha );

        norm_r = vector_norm( r, TWO_NORM );

        if ( isnan( norm_r ) ) {
            k = max_iterations;
        } else if ( fabs( alpha )*norm_r < epsilon_step ) {
            break;
        }

        vector_vector_scalar_add( r, p, -alpha );
        matrix_vector_multiply( p, A, r );
    }

    return k;
}

/**********************************************************
 * Solve Ax = b using
 *        the residual-norm steepest-descent method
 *
 * Preconditions:
 *   - A is non-singular
 *   - The dimensions of A, b, r, v and Av are equal
 *   - The diagonal entries of A are non-zero
 *
 * Perform the residual-norm steepest-descent method for
 * a maximum of 'max_iterations' iterations
 *  - The vectors 'r', 'v' and 'Av' are for intermediate vectors
 *  - Return if the difference between two consecutive
 *    approximations is less than 'epsilon_step'
 *  - The return value is the number of iterations
 *    performed
 *  - If the algorithm failed to converge, the number of
 *    iterations is 'max_iterations' + 1
 *
 * The algorithm:
 *     Let r <- b -Ax
 *     Iterate:
 *               T
 *         v <- A *r
 *                          2
 *                  || v ||
 *                         2
 *         alpha = ----------
 *                          2
 *                 || Av ||
 *                         2
 *         x <- x + alpha*v
 *         r <- r - alpha*A*v
 *
 * Reference:  Yousef Saad
 *             Iterative Methods for Sparse Linear Systems
 *             SIAM, Philadelphia, 2003, p.142
 **********************************************************/

size_t residual_norm_steepest_descent(
          vector_t *this, sparse_matrix_t *A, vector_t *b,
          vector_t *r, vector_t *v, vector_t *Av,
          size_t max_iterations, double epsilon_step ) {
    size_t i, k;
    double alpha, norm_r;

    assert( this->dimension ==  A->dimension );
    assert( this->dimension ==  b->dimension );
    assert( this->dimension ==  v->dimension );
    assert( this->dimension == Av->dimension );

    for ( i = 0; i < b->dimension; ++i ) {
        this->a_values[i] = b->a_values[i]/A->a_diagonal_values[i];
    }

    matrix_vector_multiply( r, A, this );
    vector_scalar_multiply( r, -1.0d );
    vector_vector_add( r, b );

    for ( k = 1; k <=max_iterations; ++k ) {
        matrix_transpose_vector_multiply( v, A, r );
        matrix_vector_multiply( Av, A, v );
        alpha = inner_product( v, v )/inner_product( Av, Av );

        vector_vector_scalar_add( this, v,  alpha );

        norm_r = vector_norm( r, TWO_NORM );

        if ( isnan( norm_r ) ) {
            k = max_iterations;
        } else if ( fabs( alpha )*norm_r < epsilon_step ) {
            break;
        }

        vector_vector_scalar_add( r, Av, -alpha );
    }

    return k;
}

/**********************************************************
 * Find the LU decomposition of the matrix
 *
 * Preconditions:
 *   - The diagonal entries are non-zero
 *       - No permutation matrix is calculated
 *   - The dimensions of A, L and U are equal
 *   - The matrices L and U have sufficient memory to
 *     store the off-diagonal entries of the decomposition
 *
 * If the matrix is diagonally dominant, this method will
 * certainly be stable.
 *
 * There is no guarantee that the LU decomposition of a
 * sparse matrix will itself be sparse.
 *   - If a matrix A is tridiagonal, so will L and U
 *   - If a matrix A is band-diagonal, so will L and U
 *
 * This algorithm can be particularly slow if the matrix
 * A is not band-diagonal with a reasonably tight band.
 **********************************************************/

void lu( sparse_matrix_t *A, sparse_matrix_t *L, sparse_matrix_t *U ) {
    bool found, diagonal_used;
    size_t i, j;
    size_t *a_first_column_indices;
    size_t min_column, row;
    mixed_sum_t sum;
    double value;

    assert( A->dimension == L->dimension );
    assert( A->dimension == U->dimension );

    // We will temporarily use the diagonal entries of L to store indices
    assert( sizeof( size_t ) <= sizeof( double ) );
    a_first_column_indices = (size_t *)L->a_diagonal_values;

    matrix_scalar_multiply( L, 0.0d );
    matrix_scalar_multiply( U, 0.0d );

    // We will use the diagonal of L to temporarily store

    for ( i = 0; i < A->dimension; ++i ) {
        for ( j = 0; j < i; ++j ) {
            a_first_column_indices[j] = U->a_row_indices[j];
        }

        a_first_column_indices[i] = A->a_row_indices[i];

        diagonal_used = false;

        while ( true ) {
            // Find the first column that does not have a non-zero value
            found = false;

            if ( a_first_column_indices[i] < A->a_row_indices[i + 1] ) {
                min_column = A->a_column_indices[a_first_column_indices[i]];
                found = true;
            }

            for ( j = L->a_row_indices[i]; j < L->a_row_indices[i + 1]; ++j ) {
                row = L->a_column_indices[j];

                if ( a_first_column_indices[row] < U->a_row_indices[row + 1] ) {
                    // There may be something in this row...

                    if ( !found || (
                        U->a_column_indices[a_first_column_indices[row]]
                            < min_column
                    ) ) {
                        min_column
                            = U->a_column_indices[a_first_column_indices[row]];
                        found = true;
                    }
                }
            }

            if ( !diagonal_used && (A->a_diagonal_values[i] != 0.0d) ) {
                if ( !found || (found && (i < min_column)) ) {
                    min_column = i;
                    found = true;
                }
            }

            if ( found ) {
                if ( min_column == i ) {
                    mixed_sum_init( &sum, A->a_diagonal_values[min_column] );
                    diagonal_used = true;
                } else {
                    if ( (a_first_column_indices[i] < A->a_row_indices[i + 1])
                      && (A->a_column_indices[a_first_column_indices[i]] == min_column) ) {
                        mixed_sum_init( &sum, A->a_off_diagonal_values[a_first_column_indices[i]] );
                        ++( a_first_column_indices[i] );
                    } else {
                        mixed_sum_init( &sum, 0.0d );
                    }
                }

                for ( j = L->a_row_indices[i]; j < L->a_row_indices[i + 1]; ++j ) {
                    row = L->a_column_indices[j];

                    if ( (a_first_column_indices[row] < U->a_row_indices[row + 1])
                      && (U->a_column_indices[a_first_column_indices[row]] == min_column) ) {
                        mixed_sum( &sum,
                                   -U->a_off_diagonal_values[a_first_column_indices[row]]*L->a_off_diagonal_values[j] );
                        ++( a_first_column_indices[row] );
                    }
                }

                value = mixed_sum_value( &sum );

                if ( value != 0.0d ) {
                    // If we are below the diagonal, update L, otherwise, update U

                    if ( min_column < i ) {
                        matrix_append( L, i, min_column, value/U->a_diagonal_values[min_column] );
                    } else {
                        matrix_append( U, i, min_column, value );
                    }
                }
            } else {
                break;
            }
        }
    }

    matrix_scalar_add( L, 1.0d );
}

/**********************************************************
 * Generate a random step
 *
 * Preconditions:
 *   - The argument p has been initialized.
 *   - The parameter lambda is in (0, 1]
 *
 * This generates a random step from an exponential
 * distribution with lambda.  This allows a matrix with
 * a density lambda provided lambda is not too large.
 **********************************************************/

size_t random_step( double *p, double lambda ) {
    double p_old;
    size_t step;

    assert( lambda > 0.0d );

    if ( *p == 0.0d ) {
        *p = -log(1.0d - drand48())/lambda;
        step = (size_t)floor( *p );
    } else {
        p_old = floor( *p );

        do {
            *p += -log(1.0d - drand48())/lambda;
        } while ( p_old == floor( *p ) );

        step = (size_t)floor( *p - p_old );
        assert( step > 0 );
    }

    return step;
}

/**********************************************************
 * Correct the density
 *
 * Preconditions:
 *   - 0 < density < 1
 *
 * We are using a poisson distribution to generate the
 * random entries, but poisson distributions assume that
 * there are multiple entries per bin.  Consequently, we
 * must tweak the value of distribution parameter to
 * ensure that the density is as expected.
 **********************************************************/

double corrected_density( double density ) {
    assert( (density > 0.0d) && (density < 1.0d) );
    return log( 1.0d/(1.0d - density ) );
}

/**********************************************************
 * negate-or-swap
 *
 * Preconditions:
 *   - None.
 *
 * Either set
 *     b <- -a
 * or
 *     b <-  a
 *     a <- -a
 *
 * A helper function for creating a skew-symmetric matrix.
 **********************************************************/

void negate_or_swap( double *a, double *b ) {
    if ( drand48() < 0.5 ) {
        *a = -*b;
    } else {
        *a =  *b;
        *b = -*b;
    }
}


void randomize_diagonal( sparse_matrix_t *A, matrix_shape_t shape, diagonal_t diag, distribution_t dist, double density ) {
    size_t i;

    // We must now update the diagonal to ensure it has the properties we expect
    //  - This must be done at the end, as the array for the diagonal is used
    //    throughout this algorithm to track entries for symmetric or skew-symmetric
    //    shapes.

    if ( (shape == SKEW_SYMMETRIC) || (diag == ZERO_DIAGONAL ) ) {
        for ( i = 0; i < A->dimension; ++i ) {
            A->a_diagonal_values[i] = 0.0;
        }
    } else if ( diag == DENSE_DIAGONAL ) {
        for ( i = 0; i < A->dimension; ++i ) {
            A->a_diagonal_values[i] = get_random( dist );
        }
    } else {
        assert( diag == RANDOM_DIAGONAL );

        for ( i = 0; i < A->dimension; ++i ) {
            A->a_diagonal_values[i] = ((drand48() < density) ? get_random( dist ) : 0.0);
        }
    }
}

size_t correct_symmetry( sparse_matrix_t *A, matrix_shape_t shape, size_t *a_first_column_indices, size_t i, size_t k ) {
    size_t m;

    // Go through all previous rows, and find any entries that need to be
    // copied to this i-th row to ensure either symmetry or skew-symmetry
    for ( m = 0; m < i; ++m ) {
        if ( (a_first_column_indices[m] < A->a_row_indices[m + 1])
                && (A->a_column_indices[a_first_column_indices[m]] == i) ) {
            assert( k < A->max_off_diagonal_values );

            // Add the new entry and update the data structure
            if ( shape == SYMMETRIC ) {
                A->a_off_diagonal_values[k] = A->a_off_diagonal_values[a_first_column_indices[m]];
            } else if ( shape == SKEW_SYMMETRIC ) {
                negate_or_swap( &( A->a_off_diagonal_values[k] ),
                                &( A->a_off_diagonal_values[a_first_column_indices[m]] ) );
            } else {
                assert( false );
            }

            A->a_column_indices[k] = m;

            ++( a_first_column_indices[m] );
            ++k;
        }
    }

    a_first_column_indices[i] = k;

    return k;
}

/**********************************************************
 * Generate a random matrix
 *
 * Preconditions:
 *   - The matrix has been initialized.
 *
 * Create a new matrix where each entry comes from the
 * given distribution:
 *
 *  UNIFORM_POSITIVE   from ( 0, 1)
 *  UNIFORM_SYMMETRIC  from (-1, 1)
 *  STANDARD_NORMAL    normally distributed with a mean
 *                     of 0 and a standard deviation of 1
 *
 * and where the local desity of the sparse matrix is
 * approximately 'desnity'.  The properties are the
 * diagonal entries are defined by:
 *
 *   ZERO_DIAGONAL     all diagonal entries are zero
 *   RANDOM_DIAGONAL   the local density on the diagonal
 *                     matches 'density'
 *   DENSE_DIAGONAL    all diagonal entries have a value
 **********************************************************/

void random_matrix( sparse_matrix_t *A, matrix_shape_t shape, distribution_t dist, diagonal_t diag, double density ) {
    size_t i, j, k, width, offset;
    size_t *a_first_column_indices;
    double p, r, algorithm_density;

    assert( A->a_row_indices[0] == 0 );
    assert( sizeof( size_t ) <= sizeof( double ) );
    assert( (density > 0.0d) && (density < 1.0d) );

    algorithm_density = corrected_density( density );

    // For the symmetric and skew-symmetric shapes, it is necessary to track the locations
    // of entries that have not yet been added to the matrix to ensure the appropriate shape
    //   - For example, once the first row is generated, the matrix is not symmetric or
    //     skew-symmetric with respect to any of these entries.
    a_first_column_indices = (size_t *)A->a_diagonal_values;
    a_first_column_indices[0] = 0;

    i = 0;
    j = 0;
    k = 0;
    p = 0.0d;

    if ( shape == GENERIC ) {
        width = A->dimension;
        offset = 0;
    } else if ( (shape == SYMMETRIC) || (shape == SKEW_SYMMETRIC) || (shape == UPPER_TRIANGULAR) ) {
        width = A->dimension - 1;
        offset = 1;
    } else {
        assert( shape == LOWER_TRIANGULAR );
        width = 0;
        offset = 0;
    }

    while ( true ) {
        j += random_step( &p, algorithm_density );

        while ( j >= width ) {
            j -= width;
            ++i;

            if ( (shape == SYMMETRIC) || (shape == SKEW_SYMMETRIC) || (shape == UPPER_TRIANGULAR) ) {
                --width;
                ++offset;
            } else if ( shape == LOWER_TRIANGULAR ) {
                ++width;
            }

            A->a_row_indices[i] = k;

            if ( i == A->dimension ) {
                break;
            } else if ( (shape == SYMMETRIC) || (shape == SKEW_SYMMETRIC) ) {
                k = correct_symmetry( A, shape, a_first_column_indices, i, k );
            }
        }

        if ( i == A->dimension ) {
            break;
        }

        // We cannot put an entry on the diagonal, and
        // this only happens if it is a generic matrix where i == j
        if ( (shape != GENERIC) || (i != j) ) {
            assert( k < A->max_off_diagonal_values );

            do {
                r = get_random( dist );
            } while ( r == 0.0d );

            A->a_off_diagonal_values[k] = r;
            A->a_column_indices[k] = offset + j;
            ++k;
        }
    }

    if ( diag == DENSE_DIAGONAL ) {
        for ( i = 0; i < A->dimension; ++i ) {
            A->a_diagonal_values[i] = get_random( dist );
        }
    }

    randomize_diagonal( A, shape, diag, dist, density );
}

/**********************************************************
 * Generate a random diagonal matrix
 *
 * Preconditions:
 *   - The matrix has been initialized.
 *
 * Create a new diagonal matrix where each entry
 * comes from the given distribution:
 *
 *  UNIFORM_POSITIVE   from ( 0, 1)
 *  UNIFORM_SYMMETRIC  from (-1, 1)
 *  STANDARD_NORMAL    normally distributed with a mean
 *                     of 0 and a standard deviation of 1
 *
 * The properties of the diagonal entries are defined by:
 *
 *   ZERO_DIAGONAL     all diagonal entries are zero (?!)
 *                       - added for consistency
 *   RANDOM_DIAGONAL   the local density on the diagonal
 *                     matches 'density'
 *   DENSE_DIAGONAL    all diagonal entries have a value
 **********************************************************/

void random_diagonal_matrix( sparse_matrix_t *A, distribution_t dist, diagonal_t diag, double density ) {
    size_t i;
    double p;

    matrix_scalar_multiply( A, 0.0d );

    if ( diag == DENSE_DIAGONAL ) {
        for ( i = 0; i < A->dimension; ++i ) {
            A->a_diagonal_values[i] = get_random( dist );
        }
    } else if ( diag == RANDOM_DIAGONAL ) {
        i = 0;
        p = 0.0d;

        while ( true ) {
            i += random_step( &p, density );

            if ( i >= A->dimension ) {
                break;
            }

            A->a_diagonal_values[i] = get_random( dist );
        }
    }
}

/**********************************************************
 * Generate a random band-diagonal matrix
 *
 * Preconditions:
 *   - The matrix has been initialized.
 *
 * Create a new band-diagonal matrix where each entry at most
 * 'bands' positions from the diagonal may have a value
 * where the values comes from the given distribution:
 *
 *  UNIFORM_POSITIVE   from ( 0, 1)
 *  UNIFORM_SYMMETRIC  from (-1, 1)
 *  STANDARD_NORMAL    normally distributed with a mean
 *                     of 0 and a standard deviation of 1
 *
 * and where the local desity of the band-diagonal
 * component is approximately 'desnity'.
 *
 * The shape of the matrix may be one of
 *   GENERIC           no restraints
 *   UPPER_TRIANGULAR  A(i,j)  ==  0       if i > j
 *   LOWER_TRIANGULAR  A(i,j)  ==  0       if i < j
 *   SYMMETRIC         A(i,j)  ==  A(j,i)
 *   SKEW_SYMMETRIC    A(i,j)  == -A(j,i)
 *
 * The properties of the diagonal entries are defined by:
 *
 *   ZERO_DIAGONAL     all diagonal entries are zero
 *   RANDOM_DIAGONAL   the local density on the diagonal
 *                     matches 'density'
 *   DENSE_DIAGONAL    all diagonal entries have a value
 **********************************************************/

void random_band_diagonal_matrix( sparse_matrix_t *A, size_t bands,
                                  matrix_shape_t shape, distribution_t dist,
                                  diagonal_t diag, double density ) {
    size_t i, j, k, width, offset;
    size_t *a_first_column_indices;
    double p, r, algorithm_density;

    assert( A->a_row_indices[0] == 0 );
    assert( sizeof( size_t ) <= sizeof( double ) );
    assert( (density > 0.0d) && (density < 1.0d) );

    algorithm_density = corrected_density( density );

    // For the symmetric and skew-symmetric shapes, it is necessary to track the locations
    // of entries that have not yet been added to the matrix to ensure the appropriate shape
    //   - For example, once the first row is generated, the matrix is not symmetric or
    //     skew-symmetric with respect to any of these entries.
    a_first_column_indices = (size_t *)A->a_diagonal_values;
    a_first_column_indices[0] = 0;

    i = 0;
    j = 0;
    k = 0;
    p = 0.0d;

    width = (shape == GENERIC) ? 2*bands + 1 : bands + 1;
    offset = ( (shape == UPPER_TRIANGULAR) || (shape == SYMMETRIC) || (shape == SKEW_SYMMETRIC) ) ? 0 : bands;

    while ( true ) {
        // Step forward 'p' a distance selected from an exponential distribution
        // with parameter 'density'.  This will indicate the next position to
        // potentially place a new entry to ensure that the density is the expected
        // density.
        j += random_step( &p, algorithm_density );

        // If we are beyond the end of the row:
        //   - update the data structure with respect to the row, and
        //   - go to the next row
        //
        // If we're going to the next row, then either
        //   - if we're beyond the end of the matrix, we're finished
        //   - if the shape is symmetric or skew-symmetric, we must
        //     add all the entries to the lower-triangular component from
        //     the upper-triangular component
        while ( j >= width ) {
            j -= width;
            ++i;

            A->a_row_indices[i] = k;

            if ( i == A->dimension ) {
                break;
            } else if ( (shape == SYMMETRIC) || (shape == SKEW_SYMMETRIC) ) {
                k = correct_symmetry( A, shape, a_first_column_indices, i, k );
            }
        }

        // Again, if we're beyond the end of the matrix, we're finished
        if ( i == A->dimension ) {
            break;
        }

        // If the indices are appropriate to fit the place it into the matrix
        //  - If the matrix shape is generic, the new entry may be above
        //    or below the diagonal
        //  - If the shape is symmetric or skew-symmetric, the new entry
        //    will be placed only in the upper triangular component
        if ( ((i + j) >= offset )
          && ((i + j) <  A->dimension + offset)
          && (   j    != offset) ) {
            assert( k < A->max_off_diagonal_values );

            do {
                r = get_random( dist );
            } while ( r == 0.0d );

            A->a_off_diagonal_values[k] = r;
            A->a_column_indices[k] = (i + j) - offset;
            ++k;
        }
    }
    randomize_diagonal( A, shape, diag, dist, density );
}

/**********************************************************
 * Generate a random tridiagonal matrix
 *
 * Preconditions:
 *   - The matrix has been initialized.
 *
 * Create a new tridiagonal matrix where each entry
 * comes from the given distribution:
 *
 *  UNIFORM_POSITIVE   from ( 0, 1)
 *  UNIFORM_SYMMETRIC  from (-1, 1)
 *  STANDARD_NORMAL    normally distributed with a mean
 *                     of 0 and a standard deviation of 1
 *
 * The shape of the matrix can be one of
 *   GENERIC           no retraints
 *   UPPER_TRIANGULAR  A(i,j)  ==  0       if i > j
 *   LOWER_TRIANGULAR  A(i,j)  ==  0       if i < j
 *   SYMMETRIC         A[i][j]  ==  A[j][i]
 *   SKEW_SYMMETRIC    A[i][j]  == -A[j][i]
 **********************************************************/

void random_tridiagonal_matrix( sparse_matrix_t *A, matrix_shape_t shape, distribution_t dist ) {
    size_t i, k;

    assert( A->max_off_diagonal_values >= 2*A->dimension - 2 );
    assert( A->a_row_indices[0] == 0 );

    k = 0;

    //////// 1. Assign the 1st row /////////
    // 1a. Diagonal entry
    A->a_diagonal_values[0] = (shape == SKEW_SYMMETRIC) ? 0.0d : get_random( dist );
    // 1b. 2nd entry
    if ( shape != LOWER_TRIANGULAR ) {
        A->a_off_diagonal_values[0] = get_random( dist );
        A->a_column_indices[0] = 1;
        ++k;
    }

    // 1c. Update end of row position
    A->a_row_indices[1] = k;

    //////// 2. Assign the 2nd through 2nd-last rows /////////
    for ( i = 1; i < A->dimension - 1; ++i ) {
        // 2a. 1st entry in the row
        if ( (shape == GENERIC) || (shape == LOWER_TRIANGULAR) ) {
            A->a_off_diagonal_values[k] = get_random( dist );
            A->a_column_indices[k] = i - 1;
            ++k;
        } else if ( shape == SYMMETRIC ) {
            A->a_off_diagonal_values[k] = A->a_off_diagonal_values[k - 1];
            A->a_column_indices[k] = i - 1;
            ++k;
        } else if ( shape == SKEW_SYMMETRIC ) {
            negate_or_swap( &( A->a_off_diagonal_values[k] ),
                            &( A->a_off_diagonal_values[k - 1] ) );
            A->a_column_indices[2*i - 1] = i - 1;
            ++k;
        } else {
            assert( shape == UPPER_TRIANGULAR );
        }

        // 2b. Diagonal entry
        A->a_diagonal_values[i] = (shape == SKEW_SYMMETRIC) ? 0.0d : get_random( dist );

        // 2c. 3rd entry in the row
        if ( shape != LOWER_TRIANGULAR ) {
            A->a_off_diagonal_values[2*i] = get_random( dist );
            A->a_column_indices[k] = i + 1;
            ++k;
        }

        // 2d. Update end of row position
        A->a_row_indices[i + 1] = k;
    }

    //////// 3. Assign the last row /////////

    // 3a. 1st entry in the row
    if ( (shape == GENERIC) || (shape == LOWER_TRIANGULAR) ) {
        A->a_off_diagonal_values[k] = get_random( dist );
        A->a_column_indices[k] = A->dimension - 2;
        ++k;
    } else if ( shape == SYMMETRIC ) {
        A->a_off_diagonal_values[k] = A->a_off_diagonal_values[k - 1];
        A->a_column_indices[k] = A->dimension - 2;
        ++k;
    } else if ( shape == SKEW_SYMMETRIC ) {
        negate_or_swap( &( A->a_off_diagonal_values[k] ),
                        &( A->a_off_diagonal_values[k - 1] ) );
        A->a_column_indices[k] = A->dimension - 2;
        ++k;
    } else {
        assert( shape == UPPER_TRIANGULAR );
    }

    // 3b. Diagonal entry
    A->a_diagonal_values[A->dimension - 1] = (shape == SKEW_SYMMETRIC) ? 0.0d : get_random( dist );

    // 3c. Update end of row position
    A->a_row_indices[A->dimension] = k;
}