function [W, loss_log] = batchDescent(initial_W, dl, nc, svmbuff, ss, dataa, label, num_iters) % data_length = 3072; % num_classes = 10; % svm_buffer = 1; % step_size = 0.01; data_length = dl; num_classes = nc; svm_buffer = svmbuff; step_size = ss; data = dataa; labels = label; num_iterations = num_iters; %Might want to make these random values smaller %W = rand(num_classes, data_length); W = initial_W; %Load CIFAR-10 data %load('data_batch_1.mat'); batch_size = 32; epochs = 100; %num_imgs = length(data); total_size = length(data); num_imgs = batch_size; loss_log = zeros(1,num_iterations); for j = 1:num_iterations total_loss = 0; total_gradient = zeros(num_classes, data_length); %for i = 1:num_imgs for i = 1:epochs img_num = ceil(total_size.*rand(batch_size,1)); %Calculate score of image i % img = im2double(data(i,:)); for k=1:length(img_num) img = im2double(data(k,:)); score_i = W*img'; %Get the label of image i y = double(labels(k)); tmp = score_i' - score_i(y+1) + svm_buffer; maxed = max(0,tmp); total_loss = total_loss + sum(maxed)-svm_buffer; %Subtract loss contributed by the correct image %Accumulate gradient bin_mask = find(maxed); bin_array = zeros(1,num_classes); bin_array(bin_mask) = 1; bin_array(y+1) = 0; total_gradient = total_gradient + bin_array' * img; end end average_loss = total_loss / num_imgs; average_gradient = total_gradient / num_imgs; W = W - step_size * average_gradient; loss_log(j) = average_loss; end end