386 $ af, ldaf, colequ, c, b, ldb, y,
387 $ ldy, berr_out, n_norms,
388 $ err_bnds_norm, err_bnds_comp, res,
389 $ ayb, dy, y_tail, rcond, ithresh,
390 $ rthresh, dz_ub, ignore_cwise,
399 INTEGER INFO, LDA, LDAF, LDB, LDY, N, NRHS, PREC_TYPE,
402 LOGICAL COLEQU, IGNORE_CWISE
403 DOUBLE PRECISION RTHRESH, DZ_UB
406 COMPLEX*16 A( lda, * ), AF( ldaf, * ), B( ldb, * ),
407 $ y( ldy, * ), res( * ), dy( * ), y_tail( * )
408 DOUBLE PRECISION C( * ), AYB( * ), RCOND, BERR_OUT( * ),
409 $ err_bnds_norm( nrhs, * ),
410 $ err_bnds_comp( nrhs, * )
416 INTEGER UPLO2, CNT, I, J, X_STATE, Z_STATE,
418 DOUBLE PRECISION YK, DYK, YMIN, NORMY, NORMX, NORMDX, DXRAT,
419 $ dzrat, prevnormdx, prev_dz_z, dxratmax,
420 $ dzratmax, dx_x, dz_z, final_dx_x, final_dz_z,
421 $ eps, hugeval, incr_thresh
426 INTEGER UNSTABLE_STATE, WORKING_STATE, CONV_STATE,
427 $ noprog_state, base_residual, extra_residual,
429 parameter( unstable_state = 0, working_state = 1,
430 $ conv_state = 2, noprog_state = 3 )
431 parameter( base_residual = 0, extra_residual = 1,
433 INTEGER FINAL_NRM_ERR_I, FINAL_CMP_ERR_I, BERR_I
434 INTEGER RCOND_I, NRM_RCOND_I, NRM_ERR_I, CMP_RCOND_I
435 INTEGER CMP_ERR_I, PIV_GROWTH_I
436 parameter( final_nrm_err_i = 1, final_cmp_err_i = 2,
438 parameter( rcond_i = 4, nrm_rcond_i = 5, nrm_err_i = 6 )
439 parameter( cmp_rcond_i = 7, cmp_err_i = 8,
441 INTEGER LA_LINRX_ITREF_I, LA_LINRX_ITHRESH_I,
443 parameter( la_linrx_itref_i = 1,
444 $ la_linrx_ithresh_i = 2 )
445 parameter( la_linrx_cwise_i = 3 )
446 INTEGER LA_LINRX_TRUST_I, LA_LINRX_ERR_I,
448 parameter( la_linrx_trust_i = 1, la_linrx_err_i = 2 )
449 parameter( la_linrx_rcond_i = 3 )
460 DOUBLE PRECISION DLAMCH
463 INTRINSIC abs, dble, dimag, max, min
466 DOUBLE PRECISION CABS1
469 cabs1( zdum ) = abs( dble( zdum ) ) + abs( dimag( zdum ) )
473 IF (info.NE.0)
RETURN
474 eps = dlamch(
'Epsilon' )
475 hugeval = dlamch(
'Overflow' )
477 hugeval = hugeval * hugeval
479 incr_thresh = dble(n) * eps
481 IF (lsame(uplo,
'L'))
THEN
482 uplo2 = ilauplo(
'L' )
484 uplo2 = ilauplo(
'U' )
488 y_prec_state = extra_residual
489 IF (y_prec_state .EQ. extra_y)
THEN
506 x_state = working_state
507 z_state = unstable_state
515 CALL zcopy( n, b( 1, j ), 1, res, 1 )
516 IF (y_prec_state .EQ. base_residual)
THEN
517 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
518 $ dcmplx(1.0d+0), res, 1)
519 ELSE IF (y_prec_state .EQ. extra_residual)
THEN
520 CALL blas_zhemv_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
521 $ y( 1, j ), 1, dcmplx(1.0d+0), res, 1, prec_type)
523 CALL blas_zhemv2_x(uplo2, n, dcmplx(-1.0d+0), a, lda,
524 $ y(1, j), y_tail, 1, dcmplx(1.0d+0), res, 1,
529 CALL zcopy( n, res, 1, dy, 1 )
530 CALL zpotrs( uplo, n, 1, af, ldaf, dy, n, info)
544 IF (yk .NE. 0.0d+0)
THEN
545 dz_z = max( dz_z, dyk / yk )
546 ELSE IF (dyk .NE. 0.0d+0)
THEN
550 ymin = min( ymin, yk )
552 normy = max( normy, yk )
555 normx = max(normx, yk * c(i))
556 normdx = max(normdx, dyk * c(i))
559 normdx = max(normdx, dyk)
563 IF (normx .NE. 0.0d+0)
THEN
564 dx_x = normdx / normx
565 ELSE IF (normdx .EQ. 0.0d+0)
THEN
571 dxrat = normdx / prevnormdx
572 dzrat = dz_z / prev_dz_z
576 IF (ymin*rcond .LT. incr_thresh*normy
577 $ .AND. y_prec_state .LT. extra_y)
580 IF (x_state .EQ. noprog_state .AND. dxrat .LE. rthresh)
581 $ x_state = working_state
582 IF (x_state .EQ. working_state)
THEN
583 IF (dx_x .LE. eps)
THEN
585 ELSE IF (dxrat .GT. rthresh)
THEN
586 IF (y_prec_state .NE. extra_y)
THEN
589 x_state = noprog_state
592 IF (dxrat .GT. dxratmax) dxratmax = dxrat
594 IF (x_state .GT. working_state) final_dx_x = dx_x
597 IF (z_state .EQ. unstable_state .AND. dz_z .LE. dz_ub)
598 $ z_state = working_state
599 IF (z_state .EQ. noprog_state .AND. dzrat .LE. rthresh)
600 $ z_state = working_state
601 IF (z_state .EQ. working_state)
THEN
602 IF (dz_z .LE. eps)
THEN
604 ELSE IF (dz_z .GT. dz_ub)
THEN
605 z_state = unstable_state
608 ELSE IF (dzrat .GT. rthresh)
THEN
609 IF (y_prec_state .NE. extra_y)
THEN
612 z_state = noprog_state
615 IF (dzrat .GT. dzratmax) dzratmax = dzrat
617 IF (z_state .GT. working_state) final_dz_z = dz_z
620 IF ( x_state.NE.working_state.AND.
621 $ (ignore_cwise.OR.z_state.NE.working_state) )
626 y_prec_state = y_prec_state + 1
637 IF (y_prec_state .LT. extra_y)
THEN
638 CALL zaxpy( n, dcmplx(1.0d+0), dy, 1, y(1,j), 1 )
649 IF (x_state .EQ. working_state) final_dx_x = dx_x
650 IF (z_state .EQ. working_state) final_dz_z = dz_z
654 IF (n_norms .GE. 1)
THEN
655 err_bnds_norm( j, la_linrx_err_i ) =
656 $ final_dx_x / (1 - dxratmax)
658 IF (n_norms .GE. 2)
THEN
659 err_bnds_comp( j, la_linrx_err_i ) =
660 $ final_dz_z / (1 - dzratmax)
671 CALL zcopy( n, b( 1, j ), 1, res, 1 )
672 CALL zhemv(uplo, n, dcmplx(-1.0d+0), a, lda, y(1,j), 1,
673 $ dcmplx(1.0d+0), res, 1)
676 ayb( i ) = cabs1( b( i, j ) )
682 $ a, lda, y(1, j), 1, 1.0d+0, ayb, 1)
integer function ilauplo(UPLO)
ILAUPLO
subroutine zla_lin_berr(N, NZ, NRHS, RES, AYB, BERR)
ZLA_LIN_BERR computes a component-wise relative backward error.
subroutine zcopy(N, ZX, INCX, ZY, INCY)
ZCOPY
subroutine zla_wwaddw(N, X, Y, W)
ZLA_WWADDW adds a vector into a doubled-single vector.
subroutine zla_porfsx_extended(PREC_TYPE, UPLO, N, NRHS, A, LDA, AF, LDAF, COLEQU, C, B, LDB, Y, LDY, BERR_OUT, N_NORMS, ERR_BNDS_NORM, ERR_BNDS_COMP, RES, AYB, DY, Y_TAIL, RCOND, ITHRESH, RTHRESH, DZ_UB, IGNORE_CWISE, INFO)
ZLA_PORFSX_EXTENDED improves the computed solution to a system of linear equations for symmetric or H...
subroutine zaxpy(N, ZA, ZX, INCX, ZY, INCY)
ZAXPY
double precision function dlamch(CMACH)
DLAMCH
subroutine zpotrs(UPLO, N, NRHS, A, LDA, B, LDB, INFO)
ZPOTRS
subroutine zhemv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZHEMV
subroutine zla_heamv(UPLO, N, ALPHA, A, LDA, X, INCX, BETA, Y, INCY)
ZLA_HEAMV computes a matrix-vector product using a Hermitian indefinite matrix to calculate error bou...