1.Normal Distribution
set.seed(1) cord.x<-c(0,seq(0,5,0.1),5) cord.y<-c(0,dnorm(seq(0,5,0.1),0,5),0) curve(dnorm(x,0,5),xlim=c(-15,15),main='Normal Distribution,PDF',col="darkgreen",xlab="",ylab="Density",type="l",lwd=2,cex=2,cex.axis=.8) polygon(cord.x,cord.y,col='skyblue') runs<-1000 xs<-rnorm(runs,mean=0,sd=5) sum(xs>=0&xs<=5)/runs
2.Binomial Distribution
runs<-1000 one.trial<-function(){ sum(sample(c(0,1),10,replace=T))>5 } sum(replicate(runs,one.trial()))/runs pbinom(q=5,size=10,prob=0.5,lower.tail=FALSE)
3.Estimate pi
runs<-10000 xs<-runif(runs,min=-5,max=5) ys<-runif(runs,min=-5,max=5) in.circle<-xs^2+ys^2<=5^2 (sum(in.circle)/runs)*4 plot(xs,ys,pch='.',col=ifelse(in.circle,"blue","orange"),xlab='',ylab='',asp=1,main=paste("MC Estimate of Pi =",(sum(in.circle)/runs)*4))
4.A/B Test&Beta distribution
#It certainly looks like B is the winner,but we'd really like to know how likely this is.We could of course run a single tailed t-test,that would require that we assume that these are Normal distributions(which isn't a terrible approximation in this case).However we can also solve this via a Monte Carlo simulation! We're going to take 100,000 samples from A and 100,000 samples from B and see how often A ends up being larger than B.# x=seq(0,1,length=100) y=dbeta(x,25,75) plot(x,y,type="l",col="darkgreen",lwd=2,cex=2,cex.axis=.8,add=T) text(0.34,11,'B',col='blue') runs<-1000 a.samples<-rbeta(runs,25,75) b.samples<-rbeta(runs,37,63) sum(a.samples>b.samples)/runs #statistically significant hist(b.samples/a.samples,main='Difference ratio between two simulated\n distriction to show average improvement')