Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- library("markovchain")
- matrix.power <- function(A, n) {
- if(n == 0)
- return(diag(dim(P)[1]))
- e <- eigen(A)
- M <- e$vectors
- d <- e$values
- if(length(M) == dim(P)[1]) # si es diagonalizable
- return(M %*% diag(d^n) %*% solve(M))
- else{
- newA = diag(dim(P)[1])
- for(i in 1:n)
- newA = newA %*% A
- return(newA)
- }
- }
- S = c("A", "B", "C", "D")
- mu0 = c(1/8, 2/8, 1/8, 4/8)
- P = matrix(
- c(0.1, 0.1, 0.8, 0,
- 0.4, 0 , 0.5, 0.1,
- 0.1, 0 , 0 , 0.9,
- 0.4, 0.1, 0.5, 0),
- nrow = 4,
- ncol = 4,
- byrow = TRUE
- )
- if( sum(mu0) != 1 ) #el vector de distrib. inicial k debe sumar 1
- stop( "Las probabilidades del vector inicial no suman 1" )
- for(i in 1:dim(P)[1]){
- if( sum (P[i,]) != 1) #las filas deben sumar 1
- stop( "Las filas individuales de P no suman 1" )
- if( P[i,i] == 1 ) #no debe haber estados absorbentes por enunciado
- stop( "Hay algún estado absorbente presente" )
- }
- Q = new("markovchain", transitionMatrix = P,
- states = S, name = "Buffer")
- plot(Q, main = "Cadena de Markov", edge.arrow.size=0.5)
- #simulate
- tests=10000
- n = 20
- for(i in 1:tests){
- T = markovchainSequence(n, M, t0 = sample(Q@states, prob = mu0, replace = TRUE, 1), include.t0 = TRUE)
- if(i==1){
- K = table(T)
- plot(0:n, factor(T), yaxt = "n", main = "Simulación de una Trayectoria", ylab = "Estado", xlab = "Tiempo", type = "l", col = "blue")
- axis(side=2, at = 1:4, labels = S)
- } else {
- L = table(T)
- u <- intersect(names(K), names(L))
- res <- c(K[!(names(K) %in% u)], L[!(names(L) %in% u)], K[u] + L[u])
- K = res[order(names(res))]
- }
- }
- K = K / tests
- #newP = P^n
- newP = matrix.power(P, n)
- muN = mu0 %*% newP #probabilidad de estar en el i-esimo nodo luego de 'n' pasos
- print("Promedio de visitas a los nodos (en n=20 pasos) simulada: ")
- print(K)
- print("Cantidad teorica de visitas a los nodos (en n=20 pasos): ")
- print(muN*n)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement