% function [Qhat,SigmaQ,Fit] = xLSalgos(measX,measY,SigmaX,SigmaY,gamma,Qnom) % % Tests the recursive performance of the xLS algorithms on a particular % dataset. % % Inputs: % measX: noisy z(2)-z(1) % measY: noisy integral(i(t)/3600 dt) % SigmaX: variance of X % SigmaY: variance of Y % gamma: geometric forgetting factor (gamma = 1 for perfect memory) % Qnom: nominal value of Q: used to initialize recursions if nonzero % % Outputs: % Qhat: estimate of capacity at every time step % column 1 = WLS - weighted, recursive % column 2 = WTLS - weighted, but not recursive % column 3 = SCTLS - scaled confidence TLS; recursive and % weighted, but using SigmaX(1) and SigmaY(1) only % to determine factor by which all SigmaX and % SigmaY are assumed to be related % column 4 = AWTLS - recursive and weighted % SigmaQ: variance of Q, computed via Hessian method (columns % correspond to methods in the same way as for Qhat) % Fit: goodness of fit metric for each method (columns % correspond to methods in the same way as for Qhat) % Copyright (c) 2016 by Gregory L. Plett of % University of Colorado Colorado Springs (UCCS). % % This work is licensed under a Creative Commons % Attribution-NonCommercial-ShareAlike 4.0 Intl. License, v. 1.0 % % It is provided "as is", without express or implied warranty, for % educational and informational purposes only. % % This file is provided as a supplement to: Plett, Gregory L., "Battery % Management Systems, Volume II, Equivalent-Circuit Methods," Artech House, % 2015. function [Qhat,SigmaQ,Fit] = xLSalgos(measX,measY,SigmaX,SigmaY,gamma,Qnom) % Reserve some memory Qhat = zeros(length(measX),4); SigmaQ = Qhat; Fit = Qhat; K = sqrt(SigmaX(1)/SigmaY(1)); % Initialize some variables used for the recursive methods c1 = 0; c2 =0; c3 = 0; c4 = 0; c5 = 0; c6 = 0; C1 = 0; C2 =0; C3 = 0; C4 = 0; C5 = 0; C6 = 0; if Qnom ~= 0, c1 = 1/SigmaY(1); c2 = Qnom/SigmaY(1); c3 = Qnom^2/SigmaY(1); c4 = 1/SigmaX(1); c5 = Qnom/SigmaX(1); c6 = Qnom^2/SigmaX(1); C1 = 1/(K^2*SigmaY(1)); C2 = K*Qnom/(K^2*SigmaY(1)); C3 = K^2*Qnom^2/(K^2*SigmaY(1)); C4 = 1/SigmaX(1); C5 = K*Qnom/SigmaX(1); C6 = K^2*Qnom^2/SigmaX(1); end for iter = 1:length(measX), % Compute some variables used for the recursive methods c1 = gamma*c1 + measX(iter)^2/SigmaY(iter); c2 = gamma*c2 + measX(iter)*measY(iter)/SigmaY(iter); c3 = gamma*c3 + measY(iter)^2/SigmaY(iter); c4 = gamma*c4 + measX(iter)^2/SigmaX(iter); c5 = gamma*c5 + measX(iter)*measY(iter)/SigmaX(iter); c6 = gamma*c6 + measY(iter)^2/SigmaX(iter); C1 = gamma*C1 + measX(iter)^2/(K^2*SigmaY(iter)); C2 = gamma*C2 + K*measX(iter)*measY(iter)/(K^2*SigmaY(iter)); C3 = gamma*C3 + K^2*measY(iter)^2/(K^2*SigmaY(iter)); C4 = gamma*C4 + measX(iter)^2/SigmaX(iter); C5 = gamma*C5 + K*measX(iter)*measY(iter)/SigmaX(iter); C6 = gamma*C6 + K^2*measY(iter)^2/SigmaX(iter); % Method 1: WLS Q = c2./c1; Qhat(iter,1) = Q; H = 2*c1; SigmaQ(iter,1) = 2/H; J = Q.^2.*c1 -2*Q.*c2 + c3; Fit(iter,1) = gammainc(J/2,(iter-1)/2,'upper'); % % Method 2: WTLS -- not recursive -- implementation 1 % % this implementation works, but is slower than FMINSEARCH method % Q = zeros(size(measX)); % H = zeros(size(measX)); % for k = 1:length(measX), % x = measX(1:k); % y = measY(1:k); % sx = sqrt(SigmaX(1:k)); % sy = sqrt(SigmaY(1:k)); % Ctls2 = Qhat(k,1); % for kk = 1:10, % jacobian = sum((2*(Ctls2*x-y).*(Ctls2*y.*sx.^2+x.*sy.^2))./... % ((Ctls2^2*sx.^2+sy.^2).^2)); % hessian = sum((2*sy.^4.*x.^2+sx.^4.*... % (6*Ctls2^2*y.^2-4*Ctls2^3*x.*y) - ... % sx.^2.*sy.^2.*... % (6*Ctls2^2*x.^2-12*Ctls2*x.*y+2*y.^2))./... % ((Ctls2^2*sx.^2+sy.^2).^3)); % Ctls2 = Ctls2 - jacobian/hessian; % end % Q(k) = Ctls2; % H(k) = hessian; % end % Qhat(:,2) = Q; % SigmaQ(:,2) = 2./H; % Method 2: WTLS -- not recursive -- implementation 2 % This implementation uses FMINSEARCH, and is faster than method above x = measX(1:iter); y = measY(1:iter); sx = sqrt(SigmaX(1:iter)); sy = sqrt(SigmaY(1:iter)); g = flipud((gamma.^(0:(iter-1)))'); Q = fminsearch(@(q) sum(g.*(y-q*x).^2./(sx.^2*q^2+sy.^2)),Qhat(iter,1)); H = 2*sum(g.*((sy.^4.*x.^2+sx.^4.*(3*Q^2*y.^2-2*Q^3*x.*y) - ... sx.^2.*sy.^2.*(3*Q^2*x.^2-6*Q*x.*y+y.^2))./((Q^2*sx.^2+sy.^2).^3))); Qhat(iter,2) = Q; SigmaQ(iter,2) = 2/H; J = sum(g.*(y-Q*x).^2./(sx.^2*Q^2+sy.^2)); Fit(iter,2) = gammainc(J/2,(2*iter-1)/2,'upper'); % Method 3: RTLS Q = (-c1+K^2*c3+sqrt((c1-K^2*c3)^2+4*K^2*c2^2))/(2*K^2*c2); Qhat(iter,3) = Q; H = ((-4*K^4*c2)*Q^3+6*K^4*c3*Q^2+(-6*c1+12*c2)*K^2*Q+2*(c1-K^2*c3))/(Q^2*K^2+1)^3; SigmaQ(iter,3) = 2/H; J = (Q^2*c1 -2*Q*c2 + c3)/(Q^2*K^2+1); Fit(iter,3) = gammainc(J/2,(2*iter-1)/2,'upper'); % % Method 4a: AWTLS without pre-scaling % r = roots([c5 (-c1+2*c4-c6) (3*c2-3*c5) (c1-2*c3+c6) -c2]); % r = r(r==conj(r)); % r = r(r>0); % Jr = ((1./(r.^2+1).^2).*(r.^4*c4-2*c5*r.^3+(c1+c6)*r.^2-2*c2*r+c3))'; % Q = r(Jr==min(Jr)); % J = min(Jr); % H = (2/(Q^2+1)^4)*(-2*c5*Q^5+(3*c1-6*c4+3*c6)*Q^4+(-12*c2+16*c5)*Q^3 ... % +(-8*c1+10*c3+6*c4-8*c6)*Q^2+(12*c2-6*c5)*Q+(c1-2*c3+c6)); % Qhat(iter,4) = Q; % SigmaQ(iter,4) = 2/H; % Fit(iter,4) = gammainc(J/2,(2*iter-1)'/2,'upper'); % Method 4b: AWTLS with pre-scaling r = roots([C5 (-C1+2*C4-C6) (3*C2-3*C5) (C1-2*C3+C6) -C2]); r = r(r==conj(r)); % discard complex-conjugate roots r = r(r>0); % discard negative roots Jr = ((1./(r.^2+1).^2).*(r.^4*C4-2*C5*r.^3+(C1+C6)*r.^2-2*C2*r+C3))'; J = min(Jr); Q = r(Jr==J); % keep Q that minimizes cost function H = (2/(Q^2+1)^4)*(-2*C5*Q^5+(3*C1-6*C4+3*C6)*Q^4+(-12*C2+16*C5)*Q^3 ... +(-8*C1+10*C3+6*C4-8*C6)*Q^2+(12*C2-6*C5)*Q+(C1-2*C3+C6)); Qhat(iter,4) = Q/K; SigmaQ(iter,4) = 2/H/K^2; Fit(iter,4) = gammainc(J/2,(2*iter-1)'/2,'upper'); end Fit = real(Fit); return