#Definimos los parámetros del modelo theta <- 0.3 #Definimos la densidad p(x) p <- function(x){ y <- 0.5*(1+theta*x) return(y) } #Para usar algunas funciones es necesario vectorizarla pv <- Vectorize(p) #Graficamos la densidad curve(pv,from = -1,to = 1,xlab = "x",ylab = "p(x)",lwd=2,main="Densidad") #Hallamos el máximo M de p(X) M <- optimize(p, interval=c(-1, 1), maximum=TRUE)$objective #Agregamos una linea horizontal a la altura de M abline(h=M,lty=2) #Fijamos el estado del generador de dígitos aleatorios set.seed(3000) #Definimos el vector de simulaciones xsim <- numeric(0) #Definimos la cantidad de simulaciones que queremos N <- 50 #Esta es la rutina central de la simulación while(length(xsim)<=(N-1)){ x <- runif(1,min = -1,max=1) y <- runif(1,min=0,max=M) px <- p(x) if(y<=px){ xsim <- c(xsim,x) color <- "green" }else{color <- "red"} points(x=x,y=y,pch=4,col=color) if(y<=px){ points(x=x,y=0,pch=20) } } #Graficamos un histograma de la simulacion hist(xsim,freq = FALSE,breaks = "FD") #Agregamos la densidad lines(density(xsim),col="red") #Comparamos resultados curve(pv,from = -1,to = 1,xlab = "x",ylab = "p(x)",lwd=2,main="Densidad") lines(density(xsim),col="red") #Calculamos la menos log L nlogL <- function(theta){ -sum(log(0.5*(1+theta*xsim))) } #Vectorizamos nlogLv <- Vectorize(nlogL) #Calculamos la verosimilitud sin logaritmo L <- function(theta){ prod(0.5*(1+theta*xsim)) } #Vectorizamos Lv <- Vectorize(L) #Graficamos layout(matrix(1:2,ncol = 2)) curve(Lv,from=-1,to=1,n = 1024,xlab = expression(theta),ylab = expression(L(theta)),main="Verosimilitud",lwd=2) curve(nlogLv,from = -1,to = 1,n=1024,xlab = expression(theta),ylab = expression(-lnL(theta)),main="-Log Verosimilitud",lwd=2) #Valores iniciales para descenso por gradiente theta0 <- -0.8 alpha <- 0.015 #Agregamos el valor inicial al grafico layout(1) curve(nlogLv,from = -1,to = 1,n=1024,xlab = expression(theta),ylab = expression(-lnL(theta)),main="-Log Verosimilitud",lwd=2) points(theta0,nlogL(theta0),pch=19,col="red") text(theta0,nlogL(theta0)+2,"0",col="red") #Calculamos el gradiente gradiente <- function(theta){ -sum(xsim/(1+theta*xsim)) } #Iteramos theta1 <- theta0-alpha*gradiente(theta0) points(theta1,nlogL(theta1),pch=19,col="blue") text(theta1,nlogL(theta1)+2,"1",col="blue",cex=0.5) theta2 <- theta1-alpha*gradiente(theta1) points(theta2,nlogL(theta2),pch=19,col="blue") text(theta2,nlogL(theta2)+2,"2",col="blue",cex=0.5) thetak <- theta2 for(i in 3:8){ thetakk <- thetak-alpha*gradiente(thetak) points(thetakk,nlogL(thetakk),pch=19,col="blue") text(thetakk,nlogL(thetakk)+2,i,col="blue",cex=0.5) i <- i+1 thetak <- thetakk readline(prompt="Press [enter] to continue") } ##Graficamos tambien el tamaño del paso #Valores iniciales para gradiente por descenso theta0 <- -0.8 alpha <- 0.015 tolerancia <- 0.0001 #Agregamos el valor inicial al grafico layout(matrix(1:2,ncol = 2)) curve(nlogLv,from = -1,to = 1,n=1024,xlab = expression(theta),ylab = expression(-lnL(theta)),main="-Log Verosimilitud",lwd=2) points(theta0,nlogL(theta0),pch=19,col="blue") theta1 <- theta0-alpha*gradiente(theta0) points(theta1,nlogL(theta1),pch=19,col="blue") plot(1,-alpha*gradiente(theta0),col="blue",pch=19,xlim = c(1,30),ylim = c(0,-alpha*gradiente(theta0)),main="Paso",xlab = "Iterado",ylab = "Paso") #Iteramos thetak <- c(theta0,theta1) pasok <- -alpha*gradiente(theta0) for(i in 2:30){ thetakk <- thetak[length(thetak)]-alpha*gradiente(thetak[length(thetak)]) thetak <- c(thetak,thetakk) pasok <- c(pasok,-alpha*gradiente(thetak[length(thetak)])) curve(nlogLv,from = -1,to = 1,n=1024,xlab = expression(theta),ylab = expression(-lnL(theta)),main="-Log Verosimilitud",lwd=2) points(thetak,nlogLv(thetak),pch=19,col="blue") plot(1:i,pasok,type="b",col="blue",pch=19,xlim = c(1,30),ylim = c(0,-alpha*gradiente(theta0)),main="Paso",xlab = "Iterado",ylab = "Paso") abline(h=tolerancia,lty=2,lwd=2) readline(prompt="Press [enter] to continue") } #Usando la funcion optimize de R opt <- optimize(nlogL,interval = c(-1,1)) hat_theta <- opt$minimum