Do Something With ElasticSearch

elasticsearch.yml (node01, node02)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
network.host: $xx
http.port: 9200
transport.tcp.port: 9300
index.number_of_replicas: 1
discovery:
zen:
ping:
multicast.enabled: false
unicast.hosts: ["$yy"]
timeout: "3s"
cluster:
name: es-cluster
node:
name: $zz
path.repo: ["/mount/backups"]

index.search.slowlog.threshold.query.warn: 10s
index.search.slowlog.threshold.query.info: 5s
index.search.slowlog.threshold.query.debug: 2s
index.search.slowlog.threshold.query.trace: 500ms

index.search.slowlog.threshold.fetch.warn: 1s
index.search.slowlog.threshold.fetch.info: 800ms
index.search.slowlog.threshold.fetch.debug: 500ms
index.search.slowlog.threshold.fetch.trace: 200ms

index.indexing.slowlog.threshold.index.warn: 10s
index.indexing.slowlog.threshold.index.info: 5s
index.indexing.slowlog.threshold.index.debug: 2s
index.indexing.slowlog.threshold.index.trace: 500ms

replace vairable in node01 with ($xx=node01, $yy=node2, $zz=es-node01)
replace vairable in node02 with ($xx=node02, $yy=node1, $zz=es-node02)

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
elasticsearch:
build: elasticsearch/
net: "host"
volumes:
- ./elasticsearch/config:/opt/elasticsearch/config:ro
- /var/data:/opt/elasticsearch/data
- /opt/backups/es_backup:/mount/backups
expose:
- "9200"
- "9300"

Install/Configure NFS server and client

In node02 add NFS server

sudo apt-get install nfs-kernel-server
vi /etc/exports to add below setting
/opt/backups/es_backup node01(rw,sync,no_subtree_check)
then run exportfs -ra

In node01 add NFS client

1
2
3
sudo apt-get install nfs-common
midir -p /opt/backups/es_backup
mount node02:/opt/backups/es_backup es_backup

vi /etc/fstab to add below setting
10.13.135.38:/opt/backups/es_backup /opt/backups/es_backup nfs rw,hard,intr 0 0

Restart docker

Run command docker-compose up -d to build and run ES.

Create snapshot in ES with sense

1
2
3
4
5
6
7
8
9
10
PUT _snapshot/logstash_repo
{
"type": "fs",
"settings": {
"compress": "true",
"location": "/mount/backups/logstash_repo",
"max_snapshot_bytes_per_sec" : "50mb",
"max_restore_bytes_per_sec" : "50mb"
}
}

Backup/Restore indices

1
2
3
4
5
6
7
8
9
10
11
12
13
14
PUT _snapshot/logstash_repo/snapshot_20151211
{
"indices": "logstash-2015.12.10,logstash-2015.12.11",
"ignore_unavailable": "true",
"include_global_state": "false"
}

GET /_snapshot/logstash_repo/_all or GET /_snapshot/logstash_repo/snapshot_20151211/_status (Check)

DELETE logstash-2015.12.10,logstash-2015.12.11 (Delete Indice)

POST /_snapshot/logstash_repo/snapshot_20151211/_restore?wait_for_completion (Restore)

DELETE /_snapshot/logstash_repo (Delete Snapshot)

Following command should be remembered

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
GET _cluster/health
or GET _cluster/health?level=indices
or GET _cluster/health?level=shards
or GET _cluster/health?wait_for_status=green

GET _cat/indices

GET logstash-2015.12.09/_settings

GET logstash-2015.12.09/_count

GET logstash-2015.12.09/_mapping

GET logstash-2015.12.16/server-SystemOut/_search?q=message:Exception

GET logstash-2015.12.08/server-SystemOut/_search
{
"query": {
"match": {
"message": "Exception"
}
}
}

GET logstash-2015.12.15/_search?fields=timestamp,host
{
"query": {
"filtered": {
"query": {
"match": {
"message": "Exception"
}
},
"filter": {
"range": {
"@timestamp": {
"gte": 1450212195957,
"lte": 1450265395957,
"format": "epoch_millis"
}
}
}
}
},
"sort": [
{
"@timestamp": {
"order": "asc",
"unmapped_type": "boolean"
}
}
],
"size": 10,
"from": 0,
"fields": ["host","message"]
}

GET logstash-2015.12.15/_search?size=20&fields=message
{
"query": {
"query_string": {
"query": "message : Exception AND type : moon01"
}
}
}

Client API to search ES cluster

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
package dh;

import io.searchbox.client.JestClient;
import io.searchbox.client.JestClientFactory;
import io.searchbox.client.config.HttpClientConfig;
import io.searchbox.core.Search;
import io.searchbox.core.SearchResult;

import java.io.IOException;
import java.util.Calendar;
import java.util.List;

import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;

public class Main {
public static void main(String[] args) throws IOException {
JestClientFactory factory = new JestClientFactory();
factory.setHttpClientConfig(new HttpClientConfig.Builder("http://10.13.135.37:9200").build());
JestClient client = factory.getObject();

SearchSourceBuilder builder = new SearchSourceBuilder();

// single and mutilple criteria
QueryBuilder q1 = QueryBuilders.queryStringQuery("message : Exception AND type : moon01");
QueryBuilder q2 = QueryBuilders.matchQuery("message", "Exception");

long now = System.currentTimeMillis();
Calendar c = Calendar.getInstance();
c.set(Calendar.DAY_OF_MONTH, c.get(Calendar.DAY_OF_MONTH) - 3);
long thatDay = c.getTimeInMillis();

String filtered = builder.query(
QueryBuilders.filteredQuery(QueryBuilders.queryFilter(q2), QueryBuilders.rangeQuery("@timestamp").from(
Long.toString(thatDay)).to(Long.toString(now)).format("epoch_millis"))).sort("@timestamp")
.size(200).from(0).field("_source").toString();

System.out.println("DSL Query " + filtered);

Search search = ((Search.Builder) new Search.Builder(filtered).addIndex("logstash-2015.12.15").addIndex(
"logstash-2015.12.16")).build();
SearchResult result = (SearchResult) client.execute(search);

List<SearchResult.Hit<Log, Void>> hits = result.getHits(Log.class);

int i = 0;
for (SearchResult.Hit<Log, Void> hit : hits) {
i++;
if (hit.source.getWas_message() != null && hit.source.getWas_message().length() > 200) {
System.out.println(i + " " + hit.index + ", " + hit.source.getType() + ", "
+ hit.source.getWas_message().substring(0, 200));
} else {
System.out.println(i + " " + hit.index + ", " + hit.source.getType() + ", "
+ hit.source.getWas_message());
}
}
System.out.println("Total: " + result.getTotal() + ", Hits size: " + hits.size());
}
}

class Log {
private String type;
private String was_message;

public String getType() {
return type;
}

public void setType(String type) {
this.type = type;
}

public String getWas_message() {
return was_message;
}

public void setWas_message(String was_message) {
this.was_message = was_message;
}

}

Software installed in server

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
fun:
cowsay
fortune
sl
jq
banner
toilet
figlet
libaa-bin

utility:
htop
python-pip
atop
sysstat

pip:
paramiko
fabric
elasticsearch

nfs:
nfs-common
nfs-kernel-server

Monitor docker with following technology

Rsyslog
ElasticSearch + Logstash + Kibana
Graphite
Collectd
grafana/grafana
tutum/influxdb
google/cadvisor
Heka
flunted

Reference

https://help.ubuntu.com/community/SettingUpNFSHowTo