I am using mongodb Aggregate query. My db is like this:
{
\"_id\" : ObjectId(\"5a81636f017e441d609283cc\"),
\"userid\": \"123\",
page : \'A\',
newpage: \
There you go:
db.Collection.aggregate([
{$match:{ "userid":"123"}}, // filter out what's not of interest
{$facet: { // process two stages in parallel --> this will give us a single result document with the following two fields
"newpage": [ // "newpage" holding the ids and sums per "newpage" field
{$group:{"_id":"$newpage", "count":{"$sum":1}}}
],
"page": [ // and "page" holding the ids and sums per "page" field
{$group:{"_id":"$page", "count":{"$sum":1}}}
]
}},
{$project: {x:{$concatArrays:["$newpage", "$page"]}}}, // merge the two arrays into one
{$unwind: "$x"}, // flatten the single result document into multiple ones so we do not need to $reduce but can nicely $group
{$group: {_id: "$x._id", "count": {$sum: "$x.count"}}} // perform the final grouping/counting,
{$sort: {count: -1}} // well, the sort according to your question
]);
Use $facet pipeline stage to process multiple aggregation pipelines within a single stage on the same set of input documents. In your case you need to aggregate the counts separately then join the two results and calculate the final aggregates.
This can be demonstrated by running the following pipeline:
db.collection.aggregate([
{ "$match": { "userid": "123" } },
{
"$facet": {
"groupByPage": [
{ "$unwind": "$page" },
{
"$group": {
"_id": "$page",
"count": { "$sum": 1 }
}
}
],
"groupByNewPage": [
{ "$unwind": "$newpage" },
{
"$group": {
"_id": "$newpage",
"count": { "$sum": 1 }
}
}
]
}
},
{
"$project": {
"pages": {
"$concatArrays": ["$groupByPage", "$groupByNewPage"]
}
}
},
{ "$unwind": "$pages" },
{
"$group": {
"_id": "$pages._id",
"count": { "$sum": "$pages.count" }
}
},
{ "$sort": { "count": -1 } }
], function(error, data){
if (error) {
console.log(error);
} else {
console.log(data);
}
)