Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TM activeCells_ is a SDR #442

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion API_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,6 @@ longer accept a synapse permanence threshold argument. PR #305

* SDRClassifier class is replaced by `Classifier` and `Predictor` classes.

* TemporalMemory::getPredictiveCells() now returns a SDR. This ensures more convenient API and that the SDR object has correct
* TemporalMemory::getPredictiveCells(), and getActiveCells() now return a SDR. This ensures more convenient API and that the SDR object has correct
dimensions matching TM. use TM.getPredictiveCells().getSparse() to obtain the sparse vector as before.

Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,7 @@ using namespace nupic::algorithms::connections;

py_HTM.def("getActiveCells", [](const HTM_t& self)
{
auto activeCells = self.getActiveCells();

return py::array_t<nupic::UInt32>(activeCells.size(), activeCells.data());
return self.getActiveCells();
});

py_HTM.def("activateDendrites", [](HTM_t &self, bool learn) {
Expand Down
61 changes: 29 additions & 32 deletions src/nupic/algorithms/TemporalMemory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,10 @@ void TemporalMemory::initialize(
connections = Connections(static_cast<CellIdx>(numberOfColumns() * cellsPerColumn_), connectedPermanence_);
rng_ = Random(seed);

auto cellsDims = getColumnDimensions(); //nD column dimensions (eg 10x100)
cellsDims.push_back(getCellsPerColumn()); //add n+1-th dimension for cellsPerColumn (eg. 10x100x8)
activeCells_.initialize(cellsDims);

maxSegmentsPerCell_ = maxSegmentsPerCell;
maxSynapsesPerSegment_ = maxSynapsesPerSegment;
iteration_ = 0;
Expand Down Expand Up @@ -298,7 +302,7 @@ static void growSynapses(Connections &connections,
}

static void activatePredictedColumn(
vector<CellIdx> &activeCells,
SDR &activeCellsSDR,
vector<CellIdx> &winnerCells,
Connections &connections,
Random &rng,
Expand All @@ -314,6 +318,7 @@ static void activatePredictedColumn(
const SynapseIdx maxSynapsesPerSegment,
const bool learn) {
auto activeSegment = columnActiveSegmentsBegin;
auto& activeCells = activeCellsSDR.getSparse();
do {
const CellIdx cell = connections.cellForSegment(*activeSegment);
activeCells.push_back(cell);
Expand All @@ -337,6 +342,8 @@ static void activatePredictedColumn(
} while (++activeSegment != columnActiveSegmentsEnd &&
connections.cellForSegment(*activeSegment) == cell);
} while (activeSegment != columnActiveSegmentsEnd);

activeCellsSDR.setSparse(activeCells); //update SDR
}

static Segment createSegment(Connections &connections, //TODO remove, use TM::createSegment
Expand Down Expand Up @@ -365,7 +372,7 @@ static Segment createSegment(Connections &connections, //TODO remove, use TM::c
}

static void
burstColumn(vector<CellIdx> &activeCells,
burstColumn(SDR &activeCellsSDR,
vector<CellIdx> &winnerCells,
Connections &connections,
Random &rng,
Expand All @@ -385,12 +392,17 @@ burstColumn(vector<CellIdx> &activeCells,
const SegmentIdx maxSegmentsPerCell,
const SynapseIdx maxSynapsesPerSegment,
const bool learn) {

{
auto& activeCells = activeCellsSDR.getSparse();
// Calculate the active cells.
const CellIdx start = column * cellsPerColumn;
const CellIdx end = start + cellsPerColumn;
for (CellIdx cell = start; cell < end; cell++) {
activeCells.push_back(cell);
}
activeCellsSDR.setSparse(activeCells);
}

const auto bestMatchingSegment =
std::max_element(columnMatchingSegmentsBegin, columnMatchingSegmentsEnd,
Expand Down Expand Up @@ -474,10 +486,10 @@ void TemporalMemory::activateCells(const size_t activeColumnsSize,
}

vector<bool> prevActiveCellsDense(numberOfCells() + extra_, false);
for (CellIdx cell : activeCells_) {
for (CellIdx cell : activeCells_.getSparse()) {
prevActiveCellsDense[cell] = true;
}
activeCells_.clear();
activeCells_.zero();

const vector<CellIdx> prevWinnerCells = std::move(winnerCells_);

Expand Down Expand Up @@ -567,10 +579,15 @@ void TemporalMemory::activateDendrites(bool learn,
NTA_CHECK( extraWinners.size() != 1 || extraWinners[0] != SENTINEL )
<< "TM.ActivateDendrites() missing argument extraWinners!";

//add extra active
auto& activeVec = activeCells_.getSparse();
for(const auto &active : extraActive) {
NTA_ASSERT( active < extra_ );
activeCells_.push_back( static_cast<CellIdx>(active + numberOfCells()) );
activeVec.push_back( static_cast<CellIdx>(active + numberOfCells()) );
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

continuing the discussion from #437 (comment)

This code breaks because we're adding extraActive indices offsetted by numOfCells() which does not pass a check in the SDR used.
@ctrl-z-9000-times do you have a proposal how to proceed?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, make two new vectors named something like presynActive & presynWinner. Copy the TM's active and winner cells into it, and concatenate the extra inputs into those vectors too.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The external predictive inputs are not part of the active cells of this TM, and shouldn't be added to the activeCells. This is a hack, really it should concatenate both the activeCells and the externalPredictiveCells into a new vector, instead of repurposing an existing vector.

This is the culpit!

The external predictive inputs are not part of the active cells

ok, let's do it properly. and not add them to activeCells_.
Proposal: Should we use a member SDR extraActivations_ ?

  • would allow to (again) unite the activateDendrites API (no version for extra*)
  • users would call setExtra*Cells(SDR) to set them. After compute, the SDR gets cleared again.

are not part of the active cells of this TM

but they do refer to cells in this TM, right?
Sorry, again, what is the meaing of externalActive/WinnerCells? I thought it's an "deus ex" activation of the TM's cells.

So, if I have a TM with only 3 cells {0,1,2}. we now encode extras with offset, so {0,1,2, ex3, ex4, ex5}.
Assume this state (dense repr): {101 |extra: 111}
Does this translate to:

  • {111} active cells?
  • or only +3 number to pass threshold, so 5 active cells (2 from this TM).

Another question, how should we treat the extra activations in context of anomalies?

  • include in the computation,
  • or ignore?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but they do refer to cells in this TM, right?
Sorry, again, what is the meaing of externalActive/WinnerCells? I thought it's an "deus ex" activation of the TM's cells.

No, external here means that it comes from outside of yhe TM.

For example i could have 2 brain areas with 2 TMs which are looking different sensors, and one TM can help the othermake predictions.

Another Example is to model L4 with a TM and to generate a location signal and feed it to the TM. Numenta did this in their "columns" paper.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another question, how should we treat the extra activations in context of anomalies?

They should ve included if they are given.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Another view is, that it's valid and working to have activeCells_,winnerCells_ as vector due to the extra* functionality, and we cannot do that with SDR that strictly checks the members are within its bounds. Also, having active/winners as SDR does not give some big advantage. So we could wery well just close this PR.

thoughts?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can work on this a bit.
I will change the name from extra to something more descriptive.

TMReg default output

The TM has multiple outputs, all of which are valid. IMO its better to not have a default at all and to force the user to specify what they want

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍 thank you!

MO its better to not have a default at all and to force the user to specify what they want

you're talking about TM, I agree. I was talking about TMRegion where NetworkAPI forces us to take a default output. Is it that winner cells would be generally more useful/"the nicest"?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it that winner cells would be generally more useful?

It depends on what you're doing.

Winner cells are the cells which the TM has designated to represent the current state of its world, so they should be used for learning.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[winner cells] represent the current state of its world,

Sounds that should be the default output of TMRegion, I'll update that.
Should anomaly also be made from Winners/Predictive?

}
activeCells_.setSparse(activeVec);

//add extra winners
for(const auto &winner : extraWinners) {
NTA_ASSERT( winner < extra_ );
winnerCells_.push_back( static_cast<CellIdx>(winner + numberOfCells()) );
Expand All @@ -589,7 +606,7 @@ void TemporalMemory::activateDendrites(bool learn,
numActivePotentialSynapsesForSegment_.assign(length, 0);
connections.computeActivity(numActiveConnectedSynapsesForSegment_,
numActivePotentialSynapsesForSegment_,
activeCells_);
activeCells_.getSparse());

// Active segments, connected synapses.
activeSegments_.clear();
Expand Down Expand Up @@ -650,7 +667,7 @@ void TemporalMemory::compute(const SDR &activeColumns, bool learn) {
}

void TemporalMemory::reset(void) {
activeCells_.clear();
activeCells_.zero();
winnerCells_.clear();
activeSegments_.clear();
matchingSegments_.clear();
Expand All @@ -674,10 +691,7 @@ UInt TemporalMemory::columnForCell(const CellIdx cell) const {


SDR TemporalMemory::cellsToColumns(const SDR& cells) const {
auto correctDims = getColumnDimensions(); //nD column dimensions (eg 10x100)
correctDims.push_back(getCellsPerColumn()); //add n+1-th dimension for cellsPerColumn (eg. 10x100x8)

NTA_CHECK(cells.dimensions == correctDims)
NTA_CHECK(cells.dimensions == activeCells_.dimensions)
<< "cells.dimensions must match TM's (column dims x cellsPerColumn) ";

SDR cols(getColumnDimensions());
Expand Down Expand Up @@ -706,24 +720,16 @@ vector<CellIdx> TemporalMemory::cellsForColumn(CellIdx column) {
return cellsInColumn;
}

vector<CellIdx> TemporalMemory::getActiveCells() const { return activeCells_; }

void TemporalMemory::getActiveCells(SDR &activeCells) const
{
NTA_CHECK( activeCells.size == numberOfCells() );
activeCells.setSparse( getActiveCells() );
}
SDR TemporalMemory::getActiveCells() const { return activeCells_; }


SDR TemporalMemory::getPredictiveCells() const {

NTA_CHECK( segmentsValid_ )
<< "Call TM.activateDendrites() before TM.getPredictiveCells()!";

auto correctDims = getColumnDimensions();
correctDims.push_back(getCellsPerColumn());
SDR predictive(correctDims);

SDR predictive(activeCells_.dimensions); //match TM's dimensions, same as active cells
auto& predictiveCells = predictive.getSparse();

for (auto segment = activeSegments_.cbegin(); segment != activeSegments_.cend();
Expand Down Expand Up @@ -881,10 +887,7 @@ void TemporalMemory::save(ostream &outStream) const {
}
outStream << endl;

outStream << activeCells_.size() << " ";
for (CellIdx cell : activeCells_) {
outStream << cell << " ";
}
activeCells_.save(outStream);
outStream << endl;

outStream << winnerCells_.size() << " ";
Expand Down Expand Up @@ -962,13 +965,7 @@ void TemporalMemory::load(istream &inStream) {
inStream >> columnDimensions_[i];
}

UInt numActiveCells;
inStream >> numActiveCells;
for (UInt i = 0; i < numActiveCells; i++) {
CellIdx cell;
inStream >> cell;
activeCells_.push_back(cell);
}
activeCells_.load(inStream);

if (version < 2) {
UInt numPredictiveCells;
Expand Down
9 changes: 3 additions & 6 deletions src/nupic/algorithms/TemporalMemory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -303,12 +303,9 @@ using namespace nupic::algorithms::connections;
size_t numberOfCells(void) const { return connections.numCells(); }

/**
* Returns the indices of the active cells.
*
* @returns (std::vector<CellIdx>) Vector of indices of active cells.
* @return SDR with indices of active cells.
*/
vector<CellIdx> getActiveCells() const; //TODO remove
void getActiveCells(sdr::SDR &activeCells) const;
sdr::SDR getActiveCells() const;

/**
* @return SDR with indices of the predictive cells.
Expand Down Expand Up @@ -613,7 +610,7 @@ using namespace nupic::algorithms::connections;
Permanence predictedSegmentDecrement_;
UInt extra_;

vector<CellIdx> activeCells_;
sdr::SDR activeCells_;
vector<CellIdx> winnerCells_;
bool segmentsValid_;
vector<Segment> activeSegments_;
Expand Down
11 changes: 7 additions & 4 deletions src/nupic/regions/TMRegion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,17 +249,20 @@ void TMRegion::compute() {
if (out && (out->hasOutgoingLinks() || LogItem::isDebug())) {
SDR& sdr = out->getData().getSDR();
if (args_.orColumnOutputs) { //aggregate to columns
tm_->getActiveCells(sdr);
SDR cols = tm_->cellsToColumns(sdr);
SDR cols = tm_->cellsToColumns(tm_->getActiveCells());
sdr.setSparse(cols.getSparse());
} else { //output as cells
tm_->getActiveCells(sdr);
SDR cells = tm_->getActiveCells();
sdr.setSparse(cells.getSparse());
}
NTA_DEBUG << "compute " << *out << std::endl;
}
out = getOutput("activeCells");
if (out && (out->hasOutgoingLinks() || LogItem::isDebug())) {
tm_->getActiveCells(out->getData().getSDR());
SDR& sdr = out->getData().getSDR();
SDR cells = tm_->getActiveCells();
sdr.setSparse(cells.getSparse());

NTA_DEBUG << "compute " << *out << std::endl;
}
out = getOutput("predictedActiveCells");
Expand Down
14 changes: 7 additions & 7 deletions src/test/unit/algorithms/TemporalMemoryTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ TEST(TemporalMemoryTest, ActivateCorrectlyPredictiveCells) {
ASSERT_EQ(expectedActiveCells, tm.getPredictiveCells().getSparse());
tm.compute(numActiveColumns, activeColumns, true);

EXPECT_EQ(expectedActiveCells, tm.getActiveCells());
EXPECT_EQ(expectedActiveCells, tm.getActiveCells().getSparse());
}

/**
Expand All @@ -141,7 +141,7 @@ TEST(TemporalMemoryTest, BurstUnpredictedColumns) {

tm.compute(1, activeColumns, true);

EXPECT_EQ(burstingCells, tm.getActiveCells());
EXPECT_EQ(burstingCells, tm.getActiveCells().getSparse());
}

/**
Expand Down Expand Up @@ -175,13 +175,13 @@ TEST(TemporalMemoryTest, ZeroActiveColumns) {
tm.connections.createSynapse(segment, previousActiveCells[2], 0.5f);
tm.connections.createSynapse(segment, previousActiveCells[3], 0.5f);
tm.compute(1, previousActiveColumns, true);
ASSERT_FALSE(tm.getActiveCells().empty());
ASSERT_FALSE(tm.getActiveCells().getSum() == 0);
ASSERT_FALSE(tm.getWinnerCells().empty());
tm.activateDendrites();
ASSERT_FALSE(tm.getPredictiveCells().getSum() == 0);

EXPECT_NO_THROW(tm.compute(0, nullptr, true)) << "failed with empty compute";
EXPECT_TRUE(tm.getActiveCells().empty());
EXPECT_TRUE(tm.getActiveCells().getSum() == 0);
EXPECT_TRUE(tm.getWinnerCells().empty());
tm.activateDendrites();
EXPECT_TRUE(tm.getPredictiveCells().getSum() == 0);
Expand Down Expand Up @@ -1053,7 +1053,7 @@ TEST(TemporalMemoryTest, AddSegmentToCellWithFewestSegments) {
tm.compute(4, previousActiveColumns, true);
tm.compute(1, activeColumns, true);

ASSERT_EQ(activeCells, tm.getActiveCells());
ASSERT_EQ(activeCells, tm.getActiveCells().getSparse());

EXPECT_EQ(3ul, tm.connections.numSegments());
EXPECT_EQ(1ul, tm.connections.segmentsForCell(0).size());
Expand Down Expand Up @@ -1443,7 +1443,7 @@ void serializationTestVerify(TemporalMemory &tm) {

// Verify the correct cells were activated.
EXPECT_EQ((vector<UInt>{4, 8, 9, 10, 11, 12, 13, 14, 15}),
tm.getActiveCells());
tm.getActiveCells().getSparse());
const vector<UInt> winnerCells = tm.getWinnerCells();
ASSERT_EQ(3ul, winnerCells.size());
EXPECT_EQ(4ul, winnerCells[0]);
Expand Down Expand Up @@ -1615,7 +1615,7 @@ TEST(TemporalMemoryTest, testExtraActive) {
SDR predictedColumns = tm.cellsToColumns(tm.getPredictiveCells());
// Calculate TM output
tm.compute(x, true);
extraActive = tm.getActiveCells();
extraActive = tm.getActiveCells().getSparse();
extraWinners = tm.getWinnerCells();

// Calculate Anomaly of current input based on prior predictions.
Expand Down