创建集合语法:

db.createCollection(name,options)

name就是集合的名字,options可选,用来配置集合的参数。

例如我要创建一个名为mycol的集合,命令如下:

> db.createCollection("mycol", { capped : true, size : 6142800, max : 10000 } )
{ "ok" : 1 }
>

以上命令创建了一个名为mycol的集合,在参数中指定了启用封顶集合,并且设置该集合的大小为6142800个字节,以及设置该集合允许在文件的最大数量为10000。

可配置集合的参数如下:

  • capped true/false (可选)如果为true,则启用封顶集合。封顶集合是固定大小的集合,当它达到其最大大小,会自动覆盖最早的条目。如果指定true,则也需要指定尺寸参数。
  • autoindexID true/false (可选)如果为true,自动创建索引_id字段的默认值是false。
  • size (可选)指定最大大小字节封顶集合。如果封顶如果是 true,那么你还需要指定这个字段。单位B
  • max (可选)指定封顶集合允许在文件的最大数量。

MongoDB其他的一些常用命令:

show collections命令可以查看集合,或者使用show tables也可以:

> show tables
mycol
> show collections
mycol
>

插入数据命令,一个集合的数据结构是在插入数据时定义的:

// 如果集合不存在,直接插入数据,则mongodb会自动创建集合
> db.Account.insert({AccountID:1,UserName:"test",password:"123456"})
WriteResult({ "nInserted" : 1 })
> show tables
Account
mycol
> db.mycol.insert({AccountID:1,UserName:"test",password:"123456"})
WriteResult({ "nInserted" : 1 })
>

更新数据命令:

// $set是一个动作,以下这条语句是在集合中新增了一个名为Age的key,设置的value为20
> db.Account.update({AccountID:1},{"$set":{"Age":20}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
>

查看所有的文档:

> db.Account.insert({AccountID:2,UserName:"test2",password:"123456"})
WriteResult({ "nInserted" : 1 })
> db.Account.find()  // 查看指定集合中的所有文档
{ "_id" : ObjectId("5a5377cb503451a127782146"), "AccountID" : 1, "UserName" : "test", "password" : "123456", "Age" : 20 }
{ "_id" : ObjectId("5a537949503451a127782149"), "AccountID" : 2, "UserName" : "test2", "password" : "123456" }
>

可以根据条件进行查询,例如我要指定id进行查看:

> db.Account.find({AccountID:1})
{ "_id" : ObjectId("5a5377cb503451a127782146"), "AccountID" : 1, "UserName" : "test", "password" : "123456", "Age" : 20 }
> db.Account.find({AccountID:2})
{ "_id" : ObjectId("5a537949503451a127782149"), "AccountID" : 2, "UserName" : "test2", "password" : "123456" }
>

根据条件删除数据:

> db.Account.remove({AccountID:1})
WriteResult({ "nRemoved" : 1 })
> db.Account.find()
{ "_id" : ObjectId("5a537949503451a127782149"), "AccountID" : 2, "UserName" : "test2", "password" : "123456" }
>

删除集合:

> db.Account.drop()
true
> show tables
mycol
>

查看集合的状态:

> db.printCollectionStats()
mycol
{
    "ns" : "db1.mycol",
    "size" : 162,
    "count" : 2,
    "avgObjSize" : 81,
    "storageSize" : 32768,
    "capped" : true,
    "max" : 10000,
    "maxSize" : 6142976,
    "sleepCount" : 0,
    "sleepMS" : 0,
    "wiredTiger" : {
        "metadata" : {
            "formatVersion" : 1
        },
        "creationString" : "access_pattern_hint=none,allocation_size=4KB,app_metadata=(formatVersion=1),assert=(commit_timestamp=none,read_timestamp=none),block_allocation=best,block_compressor=snappy,cache_resident=false,checksum=on,colgroups=,collator=,columns=,dictionary=0,encryption=(keyid=,name=),exclusive=false,extractor=,format=btree,huffman_key=,huffman_value=,ignore_in_memory_cache_size=false,immutable=false,internal_item_max=0,internal_key_max=0,internal_key_truncate=true,internal_page_max=4KB,key_format=q,key_gap=10,leaf_item_max=0,leaf_key_max=0,leaf_page_max=32KB,leaf_value_max=64MB,log=(enabled=true),lsm=(auto_throttle=true,bloom=true,bloom_bit_count=16,bloom_config=,bloom_hash_count=8,bloom_oldest=false,chunk_count_limit=0,chunk_max=5GB,chunk_size=10MB,merge_max=15,merge_min=0),memory_page_max=10m,os_cache_dirty_max=0,os_cache_max=0,prefix_compression=false,prefix_compression_min=4,source=,split_deepen_min_child=0,split_deepen_per_child=0,split_pct=90,type=file,value_format=u",
        "type" : "file",
        "uri" : "statistics:table:collection-0-4593892186656792650",
        "LSM" : {
            "bloom filter false positives" : 0,
            "bloom filter hits" : 0,
            "bloom filter misses" : 0,
            "bloom filter pages evicted from cache" : 0,
            "bloom filter pages read into cache" : 0,
            "bloom filters in the LSM tree" : 0,
            "chunks in the LSM tree" : 0,
            "highest merge generation in the LSM tree" : 0,
            "queries that could have benefited from a Bloom filter that did not exist" : 0,
            "sleep for LSM checkpoint throttle" : 0,
            "sleep for LSM merge throttle" : 0,
            "total size of bloom filters" : 0
        },
        "block-manager" : {
            "allocations requiring file extension" : 7,
            "blocks allocated" : 7,
            "blocks freed" : 1,
            "checkpoint size" : 4096,
            "file allocation unit size" : 4096,
            "file bytes available for reuse" : 12288,
            "file magic number" : 120897,
            "file major version number" : 1,
            "file size in bytes" : 32768,
            "minor version number" : 0
        },
        "btree" : {
            "btree checkpoint generation" : 261,
            "column-store fixed-size leaf pages" : 0,
            "column-store internal pages" : 0,
            "column-store variable-size RLE encoded values" : 0,
            "column-store variable-size deleted values" : 0,
            "column-store variable-size leaf pages" : 0,
            "fixed-record size" : 0,
            "maximum internal page key size" : 368,
            "maximum internal page size" : 4096,
            "maximum leaf page key size" : 2867,
            "maximum leaf page size" : 32768,
            "maximum leaf page value size" : 67108864,
            "maximum tree depth" : 3,
            "number of key/value pairs" : 0,
            "overflow pages" : 0,
            "pages rewritten by compaction" : 0,
            "row-store internal pages" : 0,
            "row-store leaf pages" : 0
        },
        "cache" : {
            "bytes currently in the cache" : 1290,
            "bytes read into cache" : 0,
            "bytes written from cache" : 437,
            "checkpoint blocked page eviction" : 0,
            "data source pages selected for eviction unable to be evicted" : 0,
            "eviction walk passes of a file" : 0,
            "eviction walk target pages histogram - 0-9" : 0,
            "eviction walk target pages histogram - 10-31" : 0,
            "eviction walk target pages histogram - 128 and higher" : 0,
            "eviction walk target pages histogram - 32-63" : 0,
            "eviction walk target pages histogram - 64-128" : 0,
            "eviction walks abandoned" : 0,
            "eviction walks gave up because they restarted their walk twice" : 0,
            "eviction walks gave up because they saw too many pages and found no candidates" : 0,
            "eviction walks gave up because they saw too many pages and found too few candidates" : 0,
            "eviction walks reached end of tree" : 0,
            "eviction walks started from root of tree" : 0,
            "eviction walks started from saved location in tree" : 0,
            "hazard pointer blocked page eviction" : 0,
            "in-memory page passed criteria to be split" : 0,
            "in-memory page splits" : 0,
            "internal pages evicted" : 0,
            "internal pages split during eviction" : 0,
            "leaf pages split during eviction" : 0,
            "modified pages evicted" : 0,
            "overflow pages read into cache" : 0,
            "page split during eviction deepened the tree" : 0,
            "page written requiring lookaside records" : 0,
            "pages read into cache" : 0,
            "pages read into cache requiring lookaside entries" : 0,
            "pages requested from the cache" : 2,
            "pages seen by eviction walk" : 0,
            "pages written from cache" : 4,
            "pages written requiring in-memory restoration" : 0,
            "tracked dirty bytes in the cache" : 0,
            "unmodified pages evicted" : 0
        },
        "cache_walk" : {
            "Average difference between current eviction generation when the page was last considered" : 0,
            "Average on-disk page image size seen" : 0,
            "Average time in cache for pages that have been visited by the eviction server" : 0,
            "Average time in cache for pages that have not been visited by the eviction server" : 0,
            "Clean pages currently in cache" : 0,
            "Current eviction generation" : 0,
            "Dirty pages currently in cache" : 0,
            "Entries in the root page" : 0,
            "Internal pages currently in cache" : 0,
            "Leaf pages currently in cache" : 0,
            "Maximum difference between current eviction generation when the page was last considered" : 0,
            "Maximum page size seen" : 0,
            "Minimum on-disk page image size seen" : 0,
            "Number of pages never visited by eviction server" : 0,
            "On-disk page image sizes smaller than a single allocation unit" : 0,
            "Pages created in memory and never written" : 0,
            "Pages currently queued for eviction" : 0,
            "Pages that could not be queued for eviction" : 0,
            "Refs skipped during cache traversal" : 0,
            "Size of the root page" : 0,
            "Total number of pages currently in cache" : 0
        },
        "compression" : {
            "compressed pages read" : 0,
            "compressed pages written" : 0,
            "page written failed to compress" : 0,
            "page written was too small to compress" : 4,
            "raw compression call failed, additional data available" : 0,
            "raw compression call failed, no additional data available" : 0,
            "raw compression call succeeded" : 0
        },
        "cursor" : {
            "bulk-loaded cursor-insert calls" : 0,
            "create calls" : 1,
            "cursor-insert key and value bytes inserted" : 164,
            "cursor-remove key bytes removed" : 0,
            "cursor-update value bytes updated" : 0,
            "insert calls" : 2,
            "modify calls" : 0,
            "next calls" : 0,
            "prev calls" : 1,
            "remove calls" : 0,
            "reserve calls" : 0,
            "reset calls" : 3,
            "restarted searches" : 0,
            "search calls" : 0,
            "search near calls" : 0,
            "truncate calls" : 0,
            "update calls" : 0
        },
        "reconciliation" : {
            "dictionary matches" : 0,
            "fast-path pages deleted" : 0,
            "internal page key bytes discarded using suffix compression" : 0,
            "internal page multi-block writes" : 0,
            "internal-page overflow keys" : 0,
            "leaf page key bytes discarded using prefix compression" : 0,
            "leaf page multi-block writes" : 0,
            "leaf-page overflow keys" : 0,
            "maximum blocks required for a page" : 1,
            "overflow values written" : 0,
            "page checksum matches" : 0,
            "page reconciliation calls" : 4,
            "page reconciliation calls for eviction" : 0,
            "pages deleted" : 0
        },
        "session" : {
            "object compaction" : 0,
            "open cursor count" : 1
        },
        "transaction" : {
            "update conflicts" : 0
        }
    },
    "nindexes" : 1,
    "totalIndexSize" : 32768,
    "indexSizes" : {
        "_id_" : 32768
    },
    "ok" : 1
}
---
>

21.31 php的mongodb扩展

php的官方给出了两个mongodb的扩展,一个是mongodb.so,另一个是mongo.so。mongodb.so是针对新版本的php扩展,而mongo.so则是对旧版本的php扩展。

以下是官方给出的关于两个扩展的参考文档:

https://docs.mongodb.com/ecosystem/drivers/php/

由于现在新旧版本的php都有在使用,所以我们需要了解两种扩展的安装方式,首先介绍mongodb.so的安装方式:
有两种方式可以安装mongodb.so,第一种是通过git安装:

[root@localhost ~]# cd /usr/local/src/
[root@localhost /usr/local/src]# git clone https://github.com/mongodb/mongo-php-driver
[root@localhost /usr/local/src/mongo-php-driver]# git submodule update --init
[root@localhost /usr/local/src/mongo-php-driver]# /usr/local/php/bin/phpize
[root@localhost /usr/local/src/mongo-php-driver]# ./configure --with-php-config=/usr/local/php/bin/php-config
[root@localhost /usr/local/src/mongo-php-driver]# make && make install
[root@localhost /usr/local/src/mongo-php-driver]# vim /usr/local/php/etc/php.ini
extension = mongodb.so   // 增加这一行
[root@localhost /usr/local/src/mongo-php-driver]# /usr/local/php/bin/php -m |grep mongodb
mongodb
[root@localhost /usr/local/src/mongo-php-driver]#

由于国内连GitHub不是很流畅,所以这种安装方式会有点慢。

第二种是通过源码包安装:

[root@localhost ~]# cd /usr/local/src/
[root@localhost /usr/local/src]# wget https://pecl.php.net/get/mongodb-1.3.0.tgz
[root@localhost /usr/local/src]# tar zxvf mongodb-1.3.0.tgz
[root@localhost /usr/local/src]# cd mongodb-1.3.0
[root@localhost /usr/local/src/mongodb-1.3.0]# /usr/local/php/bin/phpize
[root@localhost /usr/local/src/mongodb-1.3.0]# ./configure --with-php-config=/usr/local/php/bin/php-config
[root@localhost /usr/local/src/mongodb-1.3.0]# make && make install
[root@localhost /usr/local/src/mongodb-1.3.0]# vim /usr/local/php/etc/php.ini
extension = mongodb.so  // 增加这一行
[root@localhost /usr/local/src/mongodb-1.3.0]# /usr/local/php/bin/php -m |grep mongodb
mongodb
[root@localhost /usr/local/src/mongodb-1.3.0]#

21.32 php的mongo扩展

安装过程如下:

[root@localhost ~]# cd /usr/local/src/
[root@localhost /usr/local/src]# wget https://pecl.php.net/get/mongo-1.6.16.tgz
[root@localhost /usr/local/src]# tar -zxvf mongo-1.6.16.tgz
[root@localhost /usr/local/src]# cd mongo-1.6.16/
[root@localhost /usr/local/src/mongo-1.6.16]# /usr/local/php/bin/phpize
[root@localhost /usr/local/src/mongo-1.6.16]# ./configure --with-php-config=/usr/local/php/bin/php-config
[root@localhost /usr/local/src/mongo-1.6.16]# make && make install
[root@localhost /usr/local/src/mongo-1.6.16]# vim /usr/local/php/etc/php.ini
extension = mongo.so  // 增加这一行
[root@localhost /usr/local/src/mongo-1.6.16]# /usr/local/php/bin/php -m |grep mongo
mongo
mongodb
[root@localhost /usr/local/src/mongo-1.6.16]#

测试mongo扩展:

1.先去掉MongoDB的用户认证,然后编辑测试页:

[root@localhost ~]# vim /usr/lib/systemd/system/mongod.service  # 将--auth去掉
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart mongod.service
[root@localhost ~]# vim /data/wwwroot/abc.com/index.php  # 编辑测试页
<?php
$m = new MongoClient(); # 连接
$db = $m->test; # 获取名称为 "test" 的数据库
$collection = $db->createCollection("runoob");
echo "集合创建成功";
?>

2.访问测试页:

[root@localhost ~]# curl localhost/index.php
集合创建成功
[root@localhost ~]#

3.到MongoDB里看看集合是否存在:

[root@localhost ~]# mongo --host 192.168.77.130 --port 27017
> use test
switched to db test
> show tables
runoob  # 集合创建成功就代表没问题了
>

http://blog.51cto.com/zero01/2058637