移动开发的一些关键字记录
1.rem 字体大小单位,会按照上级节点的大小来自适应字体大小
2.phantomjs 用来做抓取的nodejs模块
3.ASNI颜色,console命令行的颜色控制
4.adb调试,ios6.0以上用safari进行调试
/** * */ package cn.focus.dc.hadoop; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.util.Progressable; /** * @author qiaowang * */ public class PingeUgcGroupStat { private static final String PINGE_ACTIVE = "pinge.log"; private static java.util.Map<String, Set<Integer>> map = new HashMap<String, Set<Integer>>(); public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { private Text word = new Text(); public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String[] ugc = { "user.login", "user.register", "pic.fav", "pic.unfav", "user.follow", "user.unfollow", "pic.like", "pic.unlike" }; Set<String> ugcSet = new HashSet<String>(); //构建默认key for (int i = 0; i < ugc.length; i++) { String ugcWord = ugc[i]; ugcSet.add(ugcWord); } String line = value.toString(); String[] words = line.split("\\|"); if (ugcSet.contains(words[3]) && !"".equals(words[4])) { // 没有版本信息 StringBuilder buf = new StringBuilder(); buf.append(words[1]).append("\t").append(words[2]).append("\t").append(words[3]); word.set(buf.toString()); int uid = Integer.valueOf(words[4]); output.collect(word, new IntWritable(uid)); } else if (ugcSet.contains(words[4]) && !"".equals(words[5])) { // 有版本信息 StringBuilder buf = new StringBuilder(); buf.append(words[1]).append("\t").append(words[2]).append("\t").append(words[4]); word.set(buf.toString()); int uid = Integer.valueOf(words[5]); output.collect(word, new IntWritable(uid)); } } } public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { while (values.hasNext()) { if(!map.containsKey(key.toString())){ Set<Integer> set = new HashSet<Integer>(); map.put(key.toString(), set); } else { Set<Integer> set = map.get(key.toString()); set.add(values.next().get()); map.put(key.toString(), set); } } int size = map.get(key.toString()).size(); output.collect(key, new IntWritable(size)); } } public static void main(String[] args) throws Exception { JobConf conf = new JobConf(PingeStat.class); conf.setJobName("pingeStat"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[1])); FileOutputFormat.setOutputPath(conf, new Path(args[2])); Configuration config = new Configuration(); FileSystem hdfs = FileSystem.get(URI.create(args[1]), config); // 获得hdfs文件系统设置 // FileSystem hdfs = DistributedFileSystem.get(conf); FileSystem local = FileSystem.getLocal(config); // 获得本地文件系统设置 Path inputDir = new Path(args[0]); // 本地输入目录 Path hdfsFile = new Path(args[1]); // 远程输出文件 try { FileStatus[] inputFiles = local.listStatus(inputDir); // 数组,用来循环保存本地文件目录信息 FSDataOutputStream out = hdfs.create(hdfsFile, new Progressable() { @Override public void progress() { // TODO Auto-generated method stub System.out.print("."); } }); // 创新输出hdfs文件 for (int i = 0; i < inputFiles.length; i++) { // 循环取出本地文件目录信息 if (inputFiles[i].isDir()) { // 根据目录机构的特点获取每个子目录下pinge.access.log文件 // 补全文件名 String fileName = args[0] + inputFiles[i].getPath().getName() + "/" + PINGE_ACTIVE;// 主要关心文件名 Path filePath = new Path(fileName); FSDataInputStream in = null; try { in = local.open(filePath); // 打开本地文件 } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } if (null != in) { byte buffer[] = new byte[256]; int bytesRead = 0; while ((bytesRead = in.read(buffer)) > 0) { out.write(buffer, 0, bytesRead); // 合并文件 } in.close(); } } } out.close(); } catch (IOException e) { // TODO: handle exception e.printStackTrace(); } // 删除输出目录 deleteFromHdfs(args[2]); // 运行job JobClient.runJob(conf); } /** 从HDFS上删除文件 */ private static void deleteFromHdfs(String dst) throws FileNotFoundException, IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); fs.deleteOnExit(new Path(dst)); fs.close(); } }
获取活跃用户
/** * */ package cn.focus.dc.hadoop; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.util.Progressable; /** * @author qiaowang * */ public class PingeActiveStat { private static final String PINGE_ACTIVE = "pinge.access.log"; private static java.util.Map<String, Set<Integer>> map = new HashMap<String, Set<Integer>>(); public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { private Text word = new Text(); public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { String line = value.toString(); String[] words = line.split("\\|"); StringBuilder buf = new StringBuilder(); if ("user.active".equals(words[3]) && !"".equals(words[4])) { // 没有版本信息 buf.append(words[1]).append("\t").append(words[2]).append("\t").append(words[3]); int uid = Integer.valueOf(words[4]); //int uidEnd = uid%10; //word.set(String.valueOf(uidEnd)); word.set(buf.toString()); output.collect(word, new IntWritable(uid)); } else if ("user.active".equals(words[4]) && !"".equals(words[5])) { // 有版本信息 buf.append(words[1]).append("\t").append(words[2]).append("\t").append(words[4]); int uid = Integer.valueOf(words[5]); //int uidEnd = uid%10; //word.set(String.valueOf(uidEnd)); word.set(buf.toString()); output.collect(word, new IntWritable(uid)); } } } public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> { public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { while (values.hasNext()) { if(!map.containsKey(key.toString())){ Set<Integer> set = new HashSet<Integer>(); map.put(key.toString(), set); } else { Set<Integer> set = map.get(key.toString()); set.add(values.next().get()); map.put(key.toString(), set); } } int size = map.get(key.toString()).size(); output.collect(key, new IntWritable(size)); } } public static void main(String[] args) throws Exception { JobConf conf = new JobConf(PingeStat.class); conf.setJobName("pingeStat"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[1])); FileOutputFormat.setOutputPath(conf, new Path(args[2])); Configuration config = new Configuration(); FileSystem hdfs = FileSystem.get(URI.create(args[1]), config); // 获得hdfs文件系统设置 // FileSystem hdfs = DistributedFileSystem.get(conf); FileSystem local = FileSystem.getLocal(config); // 获得本地文件系统设置 Path inputDir = new Path(args[0]); // 本地输入目录 Path hdfsFile = new Path(args[1]); // 远程输出文件 try { FileStatus[] inputFiles = local.listStatus(inputDir); // 数组,用来循环保存本地文件目录信息 FSDataOutputStream out = hdfs.create(hdfsFile, new Progressable() { @Override public void progress() { // TODO Auto-generated method stub System.out.print("."); } }); // 创新输出hdfs文件 for (int i = 0; i < inputFiles.length; i++) { // 循环取出本地文件目录信息 if (inputFiles[i].isDir()) { // 根据目录机构的特点获取每个子目录下pinge.access.log文件 // 补全文件名 String fileName = args[0] + inputFiles[i].getPath().getName() + "/" + PINGE_ACTIVE;// 主要关心文件名 Path filePath = new Path(fileName); FSDataInputStream in = null; try { in = local.open(filePath); // 打开本地文件 } catch (Exception e) { // TODO: handle exception e.printStackTrace(); } if (null != in) { byte buffer[] = new byte[256]; int bytesRead = 0; while ((bytesRead = in.read(buffer)) > 0) { out.write(buffer, 0, bytesRead); // 合并文件 } in.close(); } } } out.close(); } catch (IOException e) { // TODO: handle exception e.printStackTrace(); } // 删除输出目录 deleteFromHdfs(args[2]); // 运行job JobClient.runJob(conf); } /** 从HDFS上删除文件 */ private static void deleteFromHdfs(String dst) throws FileNotFoundException, IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(URI.create(dst), conf); fs.deleteOnExit(new Path(dst)); fs.close(); } }
编译:
/usr/lib/jdk1.6.0_33/bin/javac -classpath /opt/hadoop/hadoop-core-1.1.2.jar -d /home/hadoop/pinge_classes/ /home/hadoop/pinge_classes/PingeActiveStat.java
hadoop@Master:~/pinge_classes$ /usr/lib/jdk1.6.0_33/bin/jar -cvf /home/hadoop/PingeActiveStat.jar -C /home/hadoop/pinge_classes/ .
运行
/opt/hadoop/bin/hadoop jar /home/hadoop/PingeActiveStat.jar cn.focus.dc.hadoop.PingeActiveStat /opt/tmp_log/pinge-access-2013-08-18.log hdfs://10.1.77.213:54310/user/hadoop/pinge_access/pinge-access-2013-08-18.log hdfs://10.1.77.213:54310/user/hadoop/pinge_access_output/
放权限
/opt/apps/hadoop/bin/hadoop fs -chmod -R 777 /user
/usr/lib/jdk1.6.0_33/bin/javac -classpath /opt/hadoop/hadoop-core-1.1.2.jar -d /home/hadoop/pinge_ugc_classes/ /home/hadoop/pinge_ugc_classes/PingeUgcStat.java
/usr/lib/jdk1.6.0_33/bin/jar -cvf /home/hadoop/PingeUgcStat.jar -C /home/hadoop/pinge_ugc_classes/ .
/opt/hadoop/bin/hadoop jar /home/hadoop/PingeUgcStat.jar cn.focus.dc.hadoop.PingeUgcStat /opt/tmp_log/pinge-2013-08-24.log hdfs://10.1.77.213:54310/user/hadoop/pinge_ugc/pinge-ugc-2013-08-24.log hdfs://10.1.77.213:54310/user/hadoop/pinge_ugc_output/
/opt/apps/hadoop/bin/hadoop jar /opt/stat/PingeUgcStat.jar cn.focus.dc.hadoop.PingeUgcStat /opt/tmp_log/pinge-2013-08-24.log hdfs://localhost:54310/user/hadoop/pinge_ugc/pinge-ugc-2013-08-24.log hdfs://localhost:54310/user/hadoop/pinge_ugc_output/
其实一个或是几个作为普通的视频源使用的TVideoGrabber组件,可以进行混合来作为一个TVideoGrabber组件使用,这些普通的 组件可以是视频捕捉设备或是视频剪辑等。同时这个混合的组件独立于源组件,当第一组组件持续的发送给它视频帧时,它可以停止、预览、录制、暂停 等······
但是如何进行混合,好多的朋友就不是很了解,源组件可以用以下的方式显示到混合器组件中:
——作为一个基本的“复制”:在混合模式中的第二个组成部分接收和显示从第一部分组件来的视频。
——在需要的时候,将几个视频源切换成一个单一的来源。
——最为一个镶嵌模式,比如说在同一时间用2x2布局显示4个摄像机。
——交替显示,比如4摄像机一个接一个的显示
——一个马赛克/交替布局,比如16个摄像机用4个交替的2 x2摄像机的马赛克布局显示。
结合马赛克/交替混合
结合马赛克和交替布局,比如通过打组交替成4 X 4的视频窗口显示的4个摄像机来显示16个摄像机。
Mixer_AddToMixer组件参数:
——第1个参数是源组件的UniqueId
——第2个参数在那个时候不使用,只是将它设置为0
——第3个参数是源将会显示在的镶嵌线(对于一个只是交替使用设置为0)
——第4个参数是源将会显示在的马赛克列(对于一个只是交替使用设置为0)
——第5个参数是显示组号(对于一个只是交替使用设置为0)
——第6个参数是显示组持续的毫秒时间(对于一个只有马赛克设置为0)
——第 7参数应设置为TRUE
—— 第8参数应设置为TRUE
基本混合(第二个组件从第一组件接收到视频帧)
在这个模式下,你有第二组组件,可以将第一个组件作为一个视频源使用。
比如第一个组件使用预览,第二组件独立的开始/停止/暂停/恢复录音的第一个组件,并进行连续的预览。
VideoGrabber1.VideoSource = vs_VideoCaptureDevice VideoGrabber1.StartPreview() VideoGrabber2.VideoSource = vs_Mixer VideoGrabber2.Mixer_AddToMixer (VideoGrabber1.UniqueID, 0, 0, 0, 0, 0, true, true); VideoGrabber2.StartPreview()
VideoGrabber2将会作为视频源接收通过VideoGrabber1显示和发送的视频。
选择混合视频尺寸
默认情况下,混合组件将会使用以下视频大小:
——在开启混合组件之前,如果源已经开始,这个混合组件就会使用源尺寸。
——如果源尚未开始,混合组件会开始于320 x240。
在StartRecording StartPreview之前,在混合组件上调用UseNearestVideoSize来选择视频尺寸。
VideoGrabberMixer.VideoSource = vs_Mixer VideoGrabberMixer.UseNearestVideoSize (640, 480, true) VideoGrabberMixer.StartPreview()
在需要的时候,将几个源转换为一个
类似于用多于一个视频源的基本混合,可以通过调用Mixer_Activation来转换源,比如:
1、开启第1个捕捉设备的预览:
VideoGrabber1.VideoSource = vs_VideoCaptureDevice VideoGrabber2.VideoDevice = 0 VideoGrabber1.StartPreview()
2、开启第2个捕捉设备的预览:
VideoGrabber2.VideoSource = vs_VideoCaptureDevice VideoGrabber2.VideoDevice = 1 VideoGrabber2.StartPreview()
3、开启将会使得预览或记录在混合模式中的第3个组件
VideoGrabber3.VideoSource = vs_Mixer int MixerId1 = VideoGrabber3.Mixer_AddToMixer (VideoGrabber1.UniqueID, 0, 0, 0, 0, 0, true, true) int MixerId2 = VideoGrabber3.Mixer_AddToMixer (VideoGrabber2.UniqueID, 0, 0, 0, 0, 0, true, true) VideoGrabber3.Mixer_Activation (MixerId2, false) // let' start with MixerId1 activated only VideoGrabber3.StartPreview()
4、然后在输入、激活和停用以及其他之间进行切换:
VideoGrabber3.Mixer_Activation (MixerId1, false) VideoGrabber3.Mixer_Activation (MixerId2, true) or VideoGrabber3.Mixer_Activation (MixerId2, false) VideoGrabber3.Mixer_Activation (MixerId1, true)