android 获取旋转角度-android 获取旋转角度的旋转手势旋,怎么计算旋转的角度

君,已阅读到文档的结尾了呢~~
扫扫二维码,随身浏览文档
手机或平板扫扫即可继续访问
android中手势操绘制片的平移、缩放、旋
举报该文档为侵权文档。
举报该文档含有违规或不良信息。
反馈该文档无法正常浏览。
举报该文档为重复文档。
推荐理由:
将文档分享至:
分享完整地址
文档地址:
粘贴到BBS或博客
flash地址:
支持嵌入FLASH地址的网站使用
html代码:
&embed src='/DocinViewer-4.swf' width='100%' height='600' type=application/x-shockwave-flash ALLOWFULLSCREEN='true' ALLOWSCRIPTACCESS='always'&&/embed&
450px*300px480px*400px650px*490px
支持嵌入HTML代码的网站使用
您的内容已经提交成功
您所提交的内容需要审核后才能发布,请您等待!
3秒自动关闭窗口Android中利用matrix 控制图片的旋转、缩放、移动_Android
本文主要讲解利用android中Matrix控制图形的旋转缩放移动,具体参见一下代码:
复制代码 代码如下:
* 使用矩阵控制图片移动、缩放、旋转
public class CommonImgEffectView extends View {
private Bitmap mainBmp , controlB
private int mainBmpWidth , mainBmpHeight , controlBmpWidth , controlBmpH
private float [] srcPs , dstP
private RectF srcRect , dstR
private Paint paint ,paintRect , paintF
private float deltaX = 0, deltaY = 0; //位移值
private float scaleValue = 1; //缩放值
private Point lastP
private Point prePivot , lastP
private float preDegree , lastD
private short currentSelectedP
//当前操作点击点
private Point symmetricPoint
= new Point();
//当前操作点对称点
* 图片操作类型
public static final int OPER_DEFAULT = -1;
public static final int OPER_TRANSLATE = 0;
public static final int OPER_SCALE = 1;
public static final int OPER_ROTATE = 2;
public static final int OPER_SELECTED = 3;
public int lastOper = OPER_DEFAULT;
/* 图片控制点
* 0---1---2
* 6---5---4
public static final int CTR_NONE = -1;
public static final int CTR_LEFT_TOP = 0;
public static final int CTR_MID_TOP = 1;
public static final int CTR_RIGHT_TOP = 2;
public static final int CTR_RIGHT_MID = 3;
public static final int CTR_RIGHT_BOTTOM = 4;
public static final int CTR_MID_BOTTOM = 5;
public static final int CTR_LEFT_BOTTOM = 6;
public static final int CTR_LEFT_MID = 7;
public static final int CTR_MID_MID = 8;
public int current_ctr = CTR_NONE;
public CommonImgEffectView(Context context){
super(context);
this.context =
public CommonImgEffectView(Context context, AttributeSet attrs) {
super(context, attrs);
this.context =
initData();
* 初始化数据
* @author 张进
private void initData(){
mainBmp = BitmapFactory.decodeResource(this.context.getResources(), R.drawable.flower);
controlBmp = BitmapFactory.decodeResource(this.context.getResources(), R.drawable.control);
mainBmpWidth = mainBmp.getWidth();
mainBmpHeight = mainBmp.getHeight();
controlBmpWidth = controlBmp.getWidth();
controlBmpHeight = controlBmp.getHeight();
srcPs = new float[]{
mainBmpWidth/2,0,
mainBmpWidth,0,
mainBmpWidth,mainBmpHeight/2,
mainBmpWidth,mainBmpHeight,
mainBmpWidth/2,mainBmpHeight,
0,mainBmpHeight,
0,mainBmpHeight/2,
mainBmpWidth/2,mainBmpHeight/2
dstPs = srcPs.clone();
srcRect = new RectF(0, 0, mainBmpWidth, mainBmpHeight);
dstRect = new RectF();
matrix = new Matrix();
prePivot = new Point(mainBmpWidth/2, mainBmpHeight/2);
lastPivot = new Point(mainBmpWidth/2, mainBmpHeight/2);
lastPoint = new Point(0,0);
paint = new Paint();
paintRect = new Paint();
paintRect.setColor(Color.RED);
paintRect.setAlpha(100);
paintRect.setAntiAlias(true);
paintFrame = new Paint();
paintFrame.setColor(Color.GREEN);
paintFrame.setAntiAlias(true);
setMatrix(OPER_DEFAULT);
* 矩阵变换,达到图形平移的目的
* @author 张进
private void setMatrix(int operationType){
switch (operationType) {
case OPER_TRANSLATE:
matrix.postTranslate(deltaX , deltaY);
case OPER_SCALE:
matrix.postScale(scaleValue, scaleValue, symmetricPoint.x, symmetricPoint.y);
case OPER_ROTATE:
matrix.postRotate(preDegree - lastDegree, dstPs[CTR_MID_MID * 2], dstPs[CTR_MID_MID * 2 + 1]);
matrix.mapPoints(dstPs, srcPs);
matrix.mapRect(dstRect, srcRect);
private boolean isOnPic(int x , int y){
if(dstRect.contains(x, y)){
private int getOperationType(MotionEvent event){
int evX = (int)event.getX();
int evY = (int)event.getY();
int curOper = lastO
switch(event.getAction()) {
case MotionEvent.ACTION_DOWN:
current_ctr = isOnCP(evX, evY);
Log.i("img", "current_ctr is "+current_ctr);
if(current_ctr != CTR_NONE || isOnPic(evX, evY)){
curOper = OPER_SELECTED;
case MotionEvent.ACTION_MOVE:
if(current_ctr & CTR_NONE && current_ctr & CTR_MID_MID ){
curOper = OPER_SCALE;
}else if(current_ctr == CTR_MID_MID ){
curOper = OPER_ROTATE;
}else if(lastOper == OPER_SELECTED){
curOper = OPER_TRANSLATE;
case MotionEvent.ACTION_UP:
curOper = OPER_SELECTED;
Log.d("img", "curOper is "+curOper);
return curO
* 判断点所在的控制点
* @param evX
* @param evY
private int isOnCP(int evx, int evy) {
Rect rect = new Rect(evx-controlBmpWidth/2,evy-controlBmpHeight/2,evx+controlBmpWidth/2,evy+controlBmpHeight/2);
int res = 0 ;
for (int i = 0; i & dstPs. i+=2) {
if(rect.contains((int)dstPs[i], (int)dstPs[i+1])){
return CTR_NONE;
public boolean dispatchTouchEvent(MotionEvent event) {
int evX = (int)event.getX();
int evY = (int)event.getY();
int operType = OPER_DEFAULT;
operType = getOperationType(event);
switch (operType) {
case OPER_TRANSLATE:
translate(evX, evY);
case OPER_SCALE:
scale(event);
case OPER_ROTATE:
rotate(event);
lastPoint.x = evX;
lastPoint.y = evY;
lastOper = operT
invalidate();//重绘
* @param evx
* @param evy
* @author zhang_jin1
private void translate(int evx , int evy){
prePivot.x += evx - lastPoint.x;
prePivot.y += evy -lastPoint.y;
deltaX = prePivot.x - lastPivot.x;
deltaY = prePivot.y - lastPivot.y;
lastPivot.x = prePivot.x;
lastPivot.y = prePivot.y;
setMatrix(OPER_TRANSLATE); //设置矩阵
* 0---1---2
* 6---5---4
* @param evX
* @param evY
private void scale(MotionEvent event) {
int pointIndex = current_ctr*2 ;
float px = dstPs[pointIndex];
float py = dstPs[pointIndex+1];
float evx = event.getX();
float evy = event.getY();
float oppositeX = 0 ;
float oppositeY = 0 ;
if(current_ctr&4 && current_ctr &= 0){
oppositeX = dstPs[pointIndex+8];
oppositeY = dstPs[pointIndex+9];
}else if(current_ctr &= 4){
oppositeX = dstPs[pointIndex-8];
oppositeY = dstPs[pointIndex-7];
float temp1 = getDistanceOfTwoPoints(px,py,oppositeX,oppositeY);
float temp2 = getDistanceOfTwoPoints(evx,evy,oppositeX,oppositeY);
this.scaleValue = temp2 / temp1 ;
symmetricPoint.x = (int) oppositeX;
symmetricPoint.y = (int)oppositeY;
Log.i("img", "scaleValue is "+scaleValue);
setMatrix(OPER_SCALE);
* 旋转图片
* 0---1---2
* 6---5---4
* @param evX
* @param evY
private void rotate(MotionEvent event) {
if(event.getPointerCount() == 2){
preDegree = computeDegree(new Point((int)event.getX(0), (int)event.getY(0)), new Point((int)event.getX(1), (int)event.getY(1)));
preDegree = computeDegree(new Point((int)event.getX(), (int)event.getY()), new Point((int)dstPs[16], (int)dstPs[17]));
setMatrix(OPER_ROTATE);
lastDegree = preD
* 两点与垂直方向夹角
* @param p1
* @param p2
public float computeDegree(Point p1, Point p2){
float tran_x = p1.x - p2.x;
float tran_y = p1.y - p2.y;
float degree = 0.0f;
float angle = (float)(Math.asin(tran_x/Math.sqrt(tran_x*tran_x + tran_y* tran_y))*180/Math.PI);
if(!Float.isNaN(angle)){
if(tran_x &= 0 && tran_y &= 0){//第一象限
}else if(tran_x &= 0 && tran_y &= 0){//第二象限
}else if(tran_x &= 0 && tran_y &= 0){//第三象限
degree = -180 -
}else if(tran_x &= 0 && tran_y &= 0){//第四象限
degree = 180 -
* 计算两个点之间的距离
* @param p1
* @param p2
private float getDistanceOfTwoPoints(Point p1, Point p2){
return (float)(Math.sqrt((p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y)));
private float getDistanceOfTwoPoints(float x1,float y1,float x2,float y2){
return (float)(Math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)));
public void onDraw(Canvas canvas){
drawBackground(canvas);//绘制背景,以便测试矩形的映射
canvas.drawBitmap(mainBmp, matrix, paint);//绘制主图片
drawFrame(canvas);//绘制边框,以便测试点的映射
drawControlPoints(canvas);//绘制控制点图片
private void drawBackground(Canvas canvas){
canvas.drawRect(dstRect, paintRect);
private void drawFrame(Canvas canvas){
canvas.drawLine(dstPs[0], dstPs[1], dstPs[4], dstPs[5], paintFrame);
canvas.drawLine(dstPs[4], dstPs[5], dstPs[8], dstPs[9], paintFrame);
canvas.drawLine(dstPs[8], dstPs[9], dstPs[12], dstPs[13], paintFrame);
canvas.drawLine(dstPs[0], dstPs[1], dstPs[12], dstPs[13], paintFrame);
canvas.drawPoint(dstPs[16], dstPs[17], paintFrame);
private void drawControlPoints(Canvas canvas){
for (int i = 0; i & dstPs. i+=2) {
canvas.drawBitmap(controlBmp, dstPs[i]-controlBmpWidth/2, dstPs[i+1]-controlBmpHeight/2, paint);
Demo效果:Android(34)
代码如下:
package com.example.d;
import java.nio.ByteB
import java.nio.ByteO
import java.nio.FloatB
import java.nio.IntB
import javax.microedition.khronos.egl.EGLC
import javax.microedition.khronos.opengles.GL10;
import android.opengl.GLSurfaceV
import android.opengl.GLSurfaceView.R
import android.os.B
import android.app.A
import android.content.C
import android.view.GestureD
import android.view.GestureDetector.OnGestureL
import android.view.MotionE
public class Activity3D extends Activity implements OnGestureListener{
private float anglex = 0f;
private float angley = 0f;
static final float ROTATE_FACTOR = 60;
@SuppressWarnings(&deprecation&)
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
GLSurfaceView glView = new GLSurfaceView(this);
MyRenderer myRender = new MyRenderer(this);
glView.setRenderer(myRender);
setContentView(glView);
detector = new GestureDetector(this);
public boolean onTouchEvent(MotionEvent me)
return detector.onTouchEvent(me);
public boolean onDown(MotionEvent arg0) {
// TODO Auto-generated method stub
public boolean onFling(MotionEvent event1, MotionEvent event2, float velocityX,
float velocityY) {
// TODO Auto-generated method stub
public void onLongPress(MotionEvent arg0) {
// TODO Auto-generated method stub
public boolean onScroll(MotionEvent event1, MotionEvent event2, float velocityX,
float velocityY) {
// TODO Auto-generated method stub
angley += velocityX * ROTATE_FACTOR /360;
anglex += velocityY * ROTATE_FACTOR /360;
public void onShowPress(MotionEvent arg0) {
// TODO Auto-generated method stub
public boolean onSingleTapUp(MotionEvent arg0) {
// TODO Auto-generated method stub
public class MyRenderer implements Renderer
private float[] cubeVertices = {
0.5f, 0.5f, 0.5f,
0.5f, -0.5f, 0.5f,
-0.5f, -0.5f, 0.5f,
-0.5f, 0.5f, 0.5f,
0.5f, 0.5f, -0.5f,
0.5f,-0.5f, -0.5f,
-0.5f, -0.5f, -0.5f,
-0.5f, 0.5f, -0.5f
private byte[] cubeFacets = new byte[]{
int[] taperColors = new int[]{
0, 0, 65535, 0,
private FloatBuffer cubeVerticesB
private ByteBuffer cubeFacetsB
private IntBuffer taperColorsB
public MyRenderer(Context main)
cubeVerticesBuffer = floatBufferUtil(cubeVertices);
cubeFacetsBuffer = ByteBuffer.wrap(cubeFacets);
taperColorsBuffer = intBufferUtil(taperColors);
private FloatBuffer floatBufferUtil(float[] arr)
FloatBuffer mB
//初始化ByteBuffer,长度为arr数组的长度*4,因为一个int占4个字节
ByteBuffer qbb = ByteBuffer.allocateDirect(arr.length * 4);
//数组排列用nativeOrder
qbb.order(ByteOrder.nativeOrder());
mBuffer = qbb.asFloatBuffer();
mBuffer.put(arr);
mBuffer.position(0);
private IntBuffer intBufferUtil(int[] arr)
IntBuffer mB
//初始化ByteBuffer,长度为arr数组的长度*4,因为一个int占4个字节
ByteBuffer qbb = ByteBuffer.allocateDirect(arr.length * 4);
//数组排列用nativeOrder
qbb.order(ByteOrder.nativeOrder());
mBuffer = qbb.asIntBuffer();
mBuffer.put(arr);
mBuffer.position(0);
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
// TODO Auto-generated method stub
// 关闭抗抖动
gl.glDisable(GL10.GL_DITHER); //可提高性能
//设置系统对透视进行修正
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_FASTEST);
gl.glClearColor(0, 0, 0, 0); //ClearColor(float red, float green, float blue, float alpha)
//设置阴影平滑模式
gl.glShadeModel(GL10.GL_SMOOTH);
//启用深度测试
* 让OpenGL ES负责跟踪每个物体在Z轴上的深度,
* 这样就可以避免后面的无图遮挡前面的物体
gl.glEnable(GL10.GL_DEPTH_TEST);
//设置深度测试的类型
gl.glDepthFunc(GL10.GL_LEQUAL);
* 初始化3D场景
public void onSurfaceChanged(GL10 gl, int width, int height) {
// TODO Auto-generated method stub
//设置3D视窗的大小及位置(前两参数为视窗位置,后两参数为视窗宽、高)
gl.glViewport(0, 0, width, height);
//将当前矩阵模式设为投影矩阵
gl.glMatrixMode(GL10.GL_PROJECTION);
//初始化单位矩阵
gl.glLoadIdentity();
//计算透视窗的宽度、高度比
float ratio = (float) width /
//调用此方法设置透视窗的空间大小
* 前两个参数用于设置X轴上的最小和最大坐标值,
* 中间两个参数用于设置Y轴上的最小和最大坐标值,
* 最后两个参数用于设置Z轴上所能绘制的场景的深
* 度的最小和最大值。
gl.glFrustumf(-ratio, ratio, -1, 1, 1, 10);
public void onDrawFrame(GL10 gl) {
// TODO Auto-generated method stub
//清除屏幕缓存和深度缓存
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
//启用顶点坐标数据
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
//设置当前矩阵堆栈为模型堆栈
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
gl.glTranslatef(0f, 0.0f, -2.0f);
* 旋转需要判断的哪一面在正面 在调整旋转的方向
* 本文未判断
gl.glRotatef(angley, 0, -1, 0);
gl.glRotatef(anglex, 1, 0, 0);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, cubeVerticesBuffer);
gl.glColorPointer(4, GL10.GL_FIXED, 0, taperColorsBuffer);
gl.glDrawElements(GL10.GL_TRIANGLE_STRIP, cubeFacetsBuffer.remaining(), GL10.GL_UNSIGNED_BYTE, cubeFacetsBuffer);
gl.glFinish();
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
图形旋转的方向未经过面的判断,上下旋转可能与自己想的不一样,有大神能指点下吗?
&&相关文章推荐
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
访问:34080次
排名:千里之外
转载:34篇
(1)(2)(7)(4)(28)
(window.slotbydup = window.slotbydup || []).push({
id: '4740881',
container: s,
size: '200,200',
display: 'inlay-fix'自定义ImageVIew 支持手势 拖动、缩放、旋转 - Ringer - 博客园
import android.content.C import android.graphics.M import android.graphics.PointF; import android.util.AttributeS import android.util.FloatM import android.view.MotionE import android.widget.ImageV public class ImageTouchView extends ImageView {
private PointF startPoint = new PointF();
private Matrix matrix = new Matrix();
private Matrix currentMaritx = new Matrix();
PointF mid = new PointF();
private int mode = 0;//用于标记模式
private static final int DRAG = 1;//拖动
private static final int ZOOM = 2;//放大
private float startDis = 0;
private PointF midP//中心点
float oldRotation = 0;
* 默认构造函数
* @param context
public ImageTouchView(Context context){
super(context);
@Override protected void onLayout(boolean changed, int left, int top, int right,
int bottom) {
super.onLayout(changed, left, top, right, bottom);
* 该构造方法在静态引入XML文件中是必须的
* @param context
* @param paramAttributeSet
public ImageTouchView(Context context,AttributeSet paramAttributeSet){
super(context,paramAttributeSet);
public boolean onTouchEvent(MotionEvent event) {
switch (event.getAction() & MotionEvent.ACTION_MASK) {
case MotionEvent.ACTION_DOWN:
mode = DRAG;
currentMaritx.set(this.getImageMatrix());//记录ImageView当期的移动位置
startPoint.set(event.getX(),event.getY());//开始点
case MotionEvent.ACTION_MOVE://移动事件
if (mode == DRAG) {//图片拖动事件
float dx = event.getX() - startPoint.x;//x轴移动距离
float dy = event.getY() - startPoint.y;
matrix.set(currentMaritx);//在当前的位置基础上移动
matrix.postTranslate(dx, dy);
} else if(mode == ZOOM){//图片放大事件
float rotation = rotation(event) - oldR
float endDis = distance(event);//结束距离
if(endDis & 10f){
float scale = endDis / startD//放大倍数
//Log.v("scale=", String.valueOf(scale));
matrix.set(currentMaritx);
matrix.postScale(scale, scale, midPoint.x, midPoint.y);
matrix.postRotate(rotation, midPoint.x, midPoint.y);// 旋轉
case MotionEvent.ACTION_UP:
//有手指离开屏幕,但屏幕还有触点(手指)
case MotionEvent.ACTION_POINTER_UP:
//当屏幕上已经有触点(手指),再有一个手指压下屏幕
case MotionEvent.ACTION_POINTER_DOWN:
oldRotation = rotation(event);
mode = ZOOM;
startDis = distance(event);
if(startDis & 10f){//避免手指上有两个茧
midPoint = mid(event);
currentMaritx.set(this.getImageMatrix());//记录当前的缩放倍数
this.setImageMatrix(matrix);
// 取旋转角度
private float rotation(MotionEvent event) {
double delta_x = (event.getX(0) - event.getX(1));
double delta_y = (event.getY(0) - event.getY(1));
double radians = Math.atan2(delta_y, delta_x);
return (float) Math.toDegrees(radians);
// 取手势中心点 private void midPoint(PointF point, MotionEvent event) {
float x = event.getX(0) + event.getX(1);
float y = event.getY(0) + event.getY(1);
point.set(x / 2, y / 2); }
* 两点之间的距离
* @param event
private static float distance(MotionEvent event){
//两根线的距离
float dx = event.getX(1) - event.getX(0);
float dy = event.getY(1) - event.getY(0);
return FloatMath.sqrt(dx*dx + dy*dy);
* 计算两点之间中心点的距离
* @param event
private static PointF mid(MotionEvent event){
float midx = event.getX(1) + event.getX(0);
float midy = event.getY(1) - event.getY(0);
return new PointF(midx/2, midy/2);
阅读(...) 评论()20:06 提问
Android的旋转手势旋,怎么计算旋转的角度?
Android的旋转手势旋,怎么计算旋转的角度?
大神求帮忙
按赞数排序
Android单点触控技术,对图片进行平移,缩放,旋转操作
其他相关推荐}

我要回帖

更多关于 android 图片手势旋转 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信