`
wuneng94zui
  • 浏览: 35102 次
  • 性别: Icon_minigender_1
  • 来自: 北京
社区版块
存档分类
最新评论
收藏列表
标题 标签 来源
git
git config --global credential.helper store
kafkaOffsetMonitor
java -cp kafkaOffsetMonitor-assembly-1.0.jar com.quantifind.kafka.offsetapp.OffsetGetterWeb --offsetStorage trident --stormZKOffsetBase /transactional --zk hyj1:2181 --trident_zk hyj1:2181 --port 7000 --refresh 10.seconds --retain 1.days
shell
获取ip地址
netip=`ifconfig eth0 | grep "inet addr" | cut -d ":" -f 2 | cut -d " " -f 1 `
echo $netid
netip=`ifconfig | awk -F'addr:|Bcast' '/Bcast/{print $2}' `
netip=`hostname -i`

#!/bin/bash
BASEDIR=/export/App/bmncollector.m.jd.local
FEATURE=com.jd.app.pat.log.server.LogServer
ports=(${port1} ${port2} ${port3})
for port in ${ports[@]}
do
	exist=$(ps -ef | grep $FEATURE | grep $port | grep -v 'grep'| awk '{print $2}')
	if [ "$exist" != "" ];then
		echo "port: $port is already on pid: $exist; please stop it first!"
		continue
	fi
	setsid java -cp $CLASSPATH:/export/App/bmncollector.m.jd.local/config -server -Xms4096m -Xmx4096m -XX:MaxPermSize=1024m -XX:+UseParallelGC -XX:+UseParallelOldGC -DinstanceId=$port -Djava.ext.dirs=$BASEDIR/lib $FEATURE $port &
	usleep 100000
	exist=$(ps -ef | grep $FEATURE | grep $port | grep -v 'grep'| awk '{print $2}')
	if [ "$exist" != "" ];then
		echo "port: $port startup success on pid: $exist"
	else
		echo "port: $port startup failed."
	fi
done
Python
 nohup $JSTORM_HOME/bin/jstorm $PROCESS >/dev/null 2>&1 &

>是输出重定向
/dev/null是系统黑洞,丢啥都行,不会满
2其实就是文件描述符,默认是0是标准输入,1是标准输出,2是标准错误。
>实际上是1>/dev/null的简写
2>&1表示把标准错误重定向到标准输出
最后一个&表示后台执行,可以通过jobs,fg,bg,Cntl+Z进行调度

nohup java -Xms512M -Xmx512M -Xmn256M -XX:PermSize=128m -XX:MaxPermSize=256m -cp /public/dcm/server3/DcmServer.jar dcm.DcmSystem >> nohup.out  &

ps -ef|grep monitor_psr.sh|grep iboss2|grep ismp|grep -v grep|awk '{print $2}'|xargs kill -9
注*
A.  $2表示第2列,即进程号PID; awk很强大,这里不再详细介绍;
B.  grep -v grep是列出除开grep命令本身的进程,grep iboss2确认进程关键字
C.  kill -9 强杀进程;
D.  xargs 使用上一个操作的结果作为下一个命令的参数使用
本来就是针对字符的操作,无需使用XAGRS,直接管道即可.对于不是对字符进行操作的才需要用xargs
例如:
ps -ef|grep mm|xargs wc -l  (WRONG)
ps -ef|grep mm|wc -l        (RIGHT)
E.  grep ismp加这个为了更加保险,确实此进程是ismp这个UNIX USER建立的进程,避免误杀进程;
对打开这个进程的用户ismp再进行一次搜索过滤,避免把别的用户的进程杀掉了

获取本机ip地址
#!/usr/bin/python
import socket
myname = socket.getfqdn(socket.gethostname(  ))
myaddr = socket.gethostbyname(myname)
#print myname
print myaddr
ambari重装删除脚本
#!/bin/bash

rm -rf /etc/hadoop
rm -rf /etc/hbase
rm -rf /etc/zookeeper
rm -rf /var/run/hadoop
rm -rf /var/run/hbase
rm -rf /var/run/zookeeper
rm -rf /var/log/hadoop
rm -rf /var/log/hbase
rm -rf /var/log/zookeeper
rm -rf /usr/lib/flume
rm -rf /usr/lib/storm
rm -rf /var/lib/hadoop-hdfs
rm -rf /var/lib/hadoop-yarn
rm -rf /var/lib/hadoop-mapreduce
rm -rf /hadoop/hbase
rm -rf /hadoop/zookeeper
rm -rf /hadoop/hdfs
rm -rf /etc/hive
rm -rf /etc/storm
rm -rf /etc/hive-hcatalog
rm -rf /etc/tez
rm -rf /etc/hive-webhcat
rm -rf /etc/slider
rm -rf /etc/storm-slider-client
rm -rf /var/run/hive
rm -rf /var/run/storm
rm -rf /var/run/webhcat
rm -rf /var/run/hadoop-yarn
rm -rf /var/run/hadoop-mapreduce
rm -rf /var/log/hive
rm -rf /var/log/storm
rm -rf /var/log/hadoop-yarn
rm -rf /var/log/hadoop-mapreduce
rm -rf /var/lib/hive
rm -rf /hadoop/storm

userdel -rf hive
userdel -rf mapred
userdel -rf hbase
userdel -rf ambari-qa
userdel -rf zookeeper
userdel -rf tez
userdel -rf hdfs
userdel -rf storm
userdel -rf yarn
userdel -rf hcat
userdel -rf ams
storm使用
storm jar storm-starter-topologies-0.10.0.jar storm.starter.WordCountTopology WordCount -c nimbus.host=hyj3
redis
telnet 192.168.144.66 6379

set mykey "this is a test"
+OK
get mykey
$14
this is a test
strlen mykey
:14
append mykey "hello"
:19
get mykey
$19
this is a testhello
kafka
bin/kafka-topics.sh --zookeeper hyj1:2181/ --alter --topic newgateway_log_1 --partitions 50
hive与hbase
//hive创建hbase外部表,访问hbase中的表
create external table hive_ambarismoketest (key string,family map<string,string>) 
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
WITH SERDEPROPERTIES ("hbase.columns.mapping" ="family:")  
TBLPROPERTIES  ("hbase.table.name" = "ambarismoketest");
//查询表中数据
select * from hive_ambarismoketest;
//查询列族中列数据
select family['col01'] from hive_ambarismoketest;
//扫描一行的多版本
get 'versiona','$HBASE_STATE_GLOBAL$',{COLUMN=>'x:x',TIMERANGE=>[1449993600000,1459997200000],VERSIONS=>1440}
//创建表
create 'minute-functionid-client-version', {NAME => 'x', VERSIONS => '525600'}

mdata表中,只put了一天的数据量,从2015-12-15 17:01:00 到 2015-12-16 17:00:00
1.所有function的数据
rowkey: '1functionids'
get 'mdata','1functionids'  --每个function的最新总访问量
-------------------------------------------------------
2.每个client的数据
rowkey: 'function0'
get 'mdata','functionid0',{TIMERANGE=>[1449993600000,1459997200000],VERSIONS=>10}    --function每个client的总访问量
--------------------------------------------------------
3.每个client版本的数据
get 'versionb','[order, m]',{TIMERANGE=>[1449993600000,145999720000000],VERSIONS=>10}
  --function每个client的每个版本的总访问量
--------------------------------------------------------
http代理
172.17.36.127
std_squid
std_1qaz@WSX
Global site tag (gtag.js) - Google Analytics