ANFIS 仿真

ANFIS 仿真,在该程序中,神经模糊分类器参数通过比例共轭梯度法进行调整。此外,将功率值应用于模糊集并通过SCG进行调整。创建训练数据。 这里两个关节角的阵列是创建。 通过增加输入关节角度作为训练数据,可以提高训练后的FIS的分辨率,从而提高准确性。 较少的训练数据可加快训练过程,但解决方案的准确性较低

应用介绍

ANFIS 仿真,在该程序中,神经模糊分类器参数通过比例共轭梯度法进行调整。此外,将功率值应用于模糊集并通过SCG进行调整。创建训练数据。 这里两个关节角的阵列是创建。 通过增加输入关节角度作为训练数据,可以提高训练后的FIS的分辨率,从而提高准确性。 较少的训练数据可加快训练过程,但解决方案的准确性较低


static void anfisForward(FIS *fis, int from, int to)
{
	int i;

	if (from < fis->in_n || to >= fis->node_n)
		fisError("Node index out of bound!");

	/* forward calculation */
	for (i = from; i <= to ; i++)
		fis->node[i]->value = (*fis->node[i]->nodeFcn)(fis, i, "forward", -1);
	/*
	PRINT(from);
	PRINT(to);
	anfisPrintData(fis);
	*/
}

/* backward pass from node 'from' to node 'to' */
/* the de_do field of output nodes should have been set */
static void anfisBackward(FIS *fis, int from, int to)
{
	int i;

	if (from < fis->in_n || to >= fis->node_n)
		fisError("Node index out of bound!");

	/* backward calculation */
	for (i = from; i >= to; i--) {
		DOUBLE de_do, do_do;
		FAN *p, *q;
		int k;

		de_do = 0;
		for (p = fis->node[i]->fanout; p != NULL; p = p->next) {
			/* O_i is the k-th fanin of O_{p->index} --> find k */
			for (k = 0, q = fis->node[p->index]->fanin;	q->index != i; q = q->next, k++);
			if (k >= fis->node[p->index]->fanin_n)
				fisError("Cannot find k in anfisBackward!");
			do_do = (*fis->node[p->index]->nodeFcn)(fis, p->index, "backward", k);
			/*
			printf("do%d_do%d = %lf\n", p->index, i, do_do);
			*/
			de_do += fis->node[p->index]->de_do * do_do;
		}
		/* update fis->node[i]->de_do */
		fis->node[i]->de_do = de_do;
	}
}

/* update de_dp of parameterized node from 'from' to 'to'. */
static void anfisUpdateDE_DP(FIS *fis, int from, int to)
{
	int i, j;
	for (i = from; i <= to; i++)
		for (j = 0; j < fis->node[i]->para_n; j++) {
			fis->node[i]->do_dp[j] = (*fis->node[i]->nodeFcn)(fis, i, "parameter", j);
			fis->node[i]->de_dp[j] += fis->node[i]->de_do * fis->node[i]->do_dp[j];
		}
}

/* This is good for both on-line and off-line */
/* update parameters of nodes from 'from' to 'to' */
static void anfisUpdateParameter(FIS *fis, int from, int to)
{
	int i, j;
	DOUBLE length = 0;

	/* find the length of gradient vector */ 
	for (i = from; i <= to; i++)
		for (j = 0; j < fis->node[i]->para_n; j++)
			length += pow(fis->node[i]->de_dp[j], 2.0);
	length = sqrt(length);
	if (length == 0) {
		/*
		printf("gradient vector length == 0!\n");
		*/
		return;
	}

	/*
	printf("length = %lf\n", length);
	fisPrintArray(fis->de_dp, fis->para_n);
	fisPrintArray(fis->do_dp, fis->para_n);
	fisPrintArray(fis->para, fis->para_n);
	*/

	/* update parameters */
	for (i = from; i <= to; i++)
		for (j = 0; j < fis->node[i]->para_n; j++)
			fis->node[i]->para[j] -= fis->ss * fis->node[i]->de_dp[j] / length;
}

/* clear de_do */
/* do_dp is overwritten every time, so it needs not to be cleared */
static void anfisClearDerivative(FIS *fis)
{
	int i;
	for (i = 0; i < fis->para_n; i++)
		fis->de_dp[i] = 0;
}

/* compute training error */
static DOUBLE anfisComputeTrainingError(FIS *fis)
{
	int j, k;
	DOUBLE squared_error = 0, e;

	for (j = 0; j < fis->trn_data_n; j++) {
		/* dispatch inputs */
		for (k = 0; k < fis->in_n; k++)
			fis->node[k]->value = fis->trn_data[j][k];

		/* forward calculation */
		anfisForward(fis, fis->in_n, fis->node_n - 1);

		e = fis->trn_data[j][fis->in_n] - fis->node[fis->node_n - 1]->value;

		/* calculate error measure */
		squared_error += pow(e, 2.0);
	}
	return(sqrt(squared_error / fis->trn_data_n));
}

/* compute checking error */
static DOUBLE anfisComputeCheckingError(FIS *fis)
{
	int j, k;
	DOUBLE squared_error = 0, e;

	for (j = 0; j < fis->chk_data_n; j++) {
		/* dispatch inputs */
		for (k = 0; k < fis->in_n; k++)
			fis->node[k]->value = fis->chk_data[j][k];

		/* forward calculation */
		anfisForward(fis, fis->in_n, fis->node_n - 1);

		e = fis->chk_data[j][fis->in_n] - fis->node[fis->node_n - 1]->value;

		/* calculate error measure */
		squared_error += pow(e, 2.0);
	}
	return(sqrt(squared_error / fis->chk_data_n));
}

/* a single epoch with index i, using GD only */
static void anfisOneEpoch0(FIS *fis, int i)
{
	int j, k;
	DOUBLE squared_error = 0, e;

	anfisClearDerivative(fis);	
	for (j = 0; j < fis->trn_data_n; j++) {
		/* dispatch inputs */
		for (k = 0; k < fis->in_n; k++)
			fis->node[k]->value = fis->trn_data[j][k];

		/* forward calculation from layer 1 to layer 3 */
		anfisForward(fis, fis->in_n, fis->node_n - 1);

		e = fis->trn_data[j][fis->in_n] - fis->node[fis->node_n - 1]->value;

		/* calculate error measure */
		squared_error += pow(e, 2.0);

		/* dispatch de_do at outputs */
		fis->node[fis->node_n - 1]->de_do = -2 * e;

		/* backward calculation */
		anfisBackward(fis, fis->node_n - 2, fis->in_n);

		/* update de_dp */
		anfisUpdateDE_DP(fis, fis->in_n, fis->node_n - 1);

		/* print data for debugging */
		/*
		anfisPrintData(fis);
		*/
	}
	fis->trn_error[i] = sqrt(squared_error / fis->trn_data_n);
	if (fis->chk_data_n != 0)
		fis->chk_error[i] = anfisComputeCheckingError(fis);
}

/* a single epoch with index i, using both GD and LSE */
static void anfisOneEpoch1(FIS *fis, int i)
{
	int j, k;
	DOUBLE squared_error = 0, e;

	anfisClearDerivative(fis);	
	anfisKalman(fis, 1, 1e6);	/* reset matrices used in kalman */
	for (j = 0; j < fis->trn_data_n; j++) {
		/* dispatch inputs */
		for (k = 0; k < fis->in_n; k++)
			fis->node[k]->value = fis->trn_data[j][k];

		/* forward calculation from layer 1 to layer 3 */
		anfisForward(fis, fis->in_n, fis->layer[4]->index - 1);

		/* store node outputs from layer 0 to 3 */
		for (k = 0; k < fis->layer[4]->index; k++)
			fis->tmp_node_output[j][k] = fis->node[k]->value;
		anfisGetKalmanDataPair(fis, j);
		anfisKalman(fis, 0, 1e6);	/* normal operation */
	}
	anfisPutKalmanParameter(fis);
	for (j = 0; j < fis->trn_data_n; j++) {
		/* restore node outputs from layer 0 to 3 */
		for (k = 0; k < fis->layer[4]->index; k++)
			fis->node[k]->value = fis->tmp_node_output[j][k];

		fis->skipdatapoint = 0;

		/* forward pass from layer 4 to 6 */
		anfisForward(fis, fis->layer[4]->index, fis->node_n - 1);

		if (fis->skipdatapoint)
		{
			fis->bias[0] += fis->ss * (fis->trn_data[j][fis->in_n] - fis->bias[0]);
			continue;
		}

		e = fis->trn_data[j][fis->in_n] - fis->node[fis->node_n - 1]->value;

		/* calculate error measure */
		squared_error += pow(e, 2.0);

		/* dispatch de_do at outputs */
		fis->node[fis->node_n - 1]->de_do = -2 * e;

		/* backward calculation */
		anfisBackward(fis, fis->node_n - 2, fis->in_n);

		/* update de_dp of layer 1*/
		anfisUpdateDE_DP(fis, fis->in_n, fis->layer[2]->index - 1);

		/* print data for debugging */
		/*
		anfisPrintData(fis);
		*/
	}
	fis->trn_error[i] = sqrt(squared_error / fis->trn_data_n);
	if (fis->chk_data_n != 0)
		fis->chk_error[i] = anfisComputeCheckingError(fis);
}

/* main loop for learning */
static void anfisLearning(FIS *fis)
{
	int i, k;

	if (fis->display_error)
		PRINTF("\nStart training ANFIS ...\n\n");

	for (i = 0; i < fis->epoch_n; i++) {
		/* GD only */
		if (fis->method==0){
			anfisOneEpoch0(fis, i);
			anfisUpdateParameter(fis, fis->in_n, fis->node_n - 1);
		
		} else {
			/* GD + LSE */
			anfisOneEpoch1(fis, i);
		}
		/* update min. training error if necessary */
		if (fis->trn_error[i] < fis->min_trn_error) {
			fis->min_trn_error = fis->trn_error[i];
			/* record best parameters so far */
			for (k = 0; k < fis->para_n; k++)
				fis->trn_best_para[k] = fis->para[k];
		}


		/* update min. checking error if necessary */
		if (fis->chk_data_n != 0)
			if (fis->chk_error[i] < fis->min_chk_error) {
				fis->min_chk_error = fis->chk_error[i];
				/* record best parameters so far */
				for (k = 0; k < fis->para_n; k++)
					fis->chk_best_para[k] = fis->para[k];
			}

		if (fis->display_error)
			if (fis->chk_data_n != 0)
				PRINTF("%4d \t %g \t %g\n", i + 1, fis->trn_error[i], fis->chk_error[i]);
			else
				PRINTF("%4d \t %g\n", i + 1, fis->trn_error[i]);

		/* stop training if error goal is reached */
		if (fis->min_trn_error <= fis->trn_error_goal) {
			fis->actual_epoch_n = i + 1;
			if (fis->display_error)
				PRINTF("\nError goal (%g) reached --> ANFIS training completed at epoch %d.\n\n", fis->trn_error_goal, fis->actual_epoch_n);
			return;
		}

		/* update parameters */
        if (fis->method == 1)
			anfisUpdateParameter(fis, fis->in_n, fis->layer[2]->index - 1);

		/* update step size */
		fis->ss_array[i] = fis->ss;	/* record step size */
		anfisUpdateStepSize(fis, i);	/* update step size */
	}
	if (fis->display_error)
		PRINTF("\nDesignated epoch number reached --> ANFIS training completed at epoch %d.\n\n", fis->epoch_n);
}

文件列表(部分)

名称 大小 修改日期
NFA User's Manual v2.1.pdf492.65 KB2012-10-21
Adapt_Equalizer.mdl11.44 KB2012-10-21
ANFIS_ART_MG.mdl9.55 KB2012-10-21
anfis_art_narmax.mdl11.76 KB2012-10-21
ANFIS_Grid_MG.mdl9.45 KB2012-10-21
ANFIS_Scatter_MG.mdl9.51 KB2012-10-21
bb_anfis_grid.mdl11.15 KB2012-10-21
bb_anfis_scatter.mdl11.15 KB2012-10-21
slcp_anfis.mdl11.35 KB2012-10-21
CANFIS_ART_Lorenz.mdl10.78 KB2012-10-21
CANFIS_Grid_Lorenz.mdl10.71 KB2012-10-21
CANFIS_Scatter_Lorenz.mdl10.71 KB2012-10-21
GenLorenzDat4Sim.m0.51 KB2011-12-20
lorenz_data.mat154.55 KB2011-03-15
MG_Check.dat9.31 KB2011-03-15
MG_Check_2000.dat18.02 KB2011-12-18
MG_Train.dat9.35 KB2011-03-15
MG_Train_2000.dat18.10 KB2011-12-18
SimDataGenAnfis1.m0.39 KB2011-12-24
SimDataGenAnfis2.m0.39 KB2011-12-18
NFA_matlab.mdl9.89 KB2012-04-22
anfisim_art.m3.65 KB2012-04-22
g.m0.18 KB2011-08-31
anfisim_grid.m2.08 KB2012-04-22
combinem.c1.19 KB2011-12-12
anfisim_scatter.m2.04 KB2012-04-22
canfisim_art.m3.70 KB2012-04-22
g.m0.18 KB2011-08-31
canfisim_grid.m2.21 KB2012-04-22
canfisim_scatter.m2.19 KB2012-04-22

立即下载

相关下载

[ANFIS 仿真] ANFIS 仿真,在该程序中,神经模糊分类器参数通过比例共轭梯度法进行调整。此外,将功率值应用于模糊集并通过SCG进行调整。创建训练数据。 这里两个关节角的阵列是创建。 通过增加输入关节角度作为训练数据,可以提高训练后的FIS的分辨率,从而提高准确性。 较少的训练数据可加快训练过程,但解决方案的准确性较低

评论列表 共有 0 条评论

暂无评论

微信捐赠

微信扫一扫体验

立即
上传
发表
评论
返回
顶部