aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_fixed.h
blob: a327094de2bd7e5d3576d9888cfae8ea40549fdb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2018 Intel Corporation
 */

#ifndef _I915_FIXED_H_
#define _I915_FIXED_H_

#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/types.h>

typedef struct {
	u32 val;
} uint_fixed_16_16_t;

#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })

static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
{
	return val.val == 0;
}

static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
{
	uint_fixed_16_16_t fp = { .val = val << 16 };

	WARN_ON(val > U16_MAX);

	return fp;
}

static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
	return DIV_ROUND_UP(fp.val, 1 << 16);
}

static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
{
	return fp.val >> 16;
}

static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
					     uint_fixed_16_16_t min2)
{
	uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };

	return min;
}

static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
					     uint_fixed_16_16_t max2)
{
	uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };

	return max;
}

static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
{
	uint_fixed_16_16_t fp = { .val = (u32)val };

	WARN_ON(val > U32_MAX);

	return fp;
}

static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
				       uint_fixed_16_16_t d)
{
	return DIV_ROUND_UP(val.val, d.val);
}

static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
	u64 tmp;

	tmp = mul_u32_u32(val, mul.val);
	tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
	WARN_ON(tmp > U32_MAX);

	return (u32)tmp;
}

static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
					     uint_fixed_16_16_t mul)
{
	u64 tmp;

	tmp = mul_u32_u32(val.val, mul.val);
	tmp = tmp >> 16;

	return clamp_u64_to_fixed16(tmp);
}

static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
{
	u64 tmp;

	tmp = (u64)val << 16;
	tmp = DIV_ROUND_UP_ULL(tmp, d);

	return clamp_u64_to_fixed16(tmp);
}

static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
{
	u64 tmp;

	tmp = (u64)val << 16;
	tmp = DIV_ROUND_UP_ULL(tmp, d.val);
	WARN_ON(tmp > U32_MAX);

	return (u32)tmp;
}

static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
	u64 tmp;

	tmp = mul_u32_u32(val, mul.val);

	return clamp_u64_to_fixed16(tmp);
}

static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
					     uint_fixed_16_16_t add2)
{
	u64 tmp;

	tmp = (u64)add1.val + add2.val;

	return clamp_u64_to_fixed16(tmp);
}

static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
						 u32 add2)
{
	uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
	u64 tmp;

	tmp = (u64)add1.val + tmp_add2.val;

	return clamp_u64_to_fixed16(tmp);
}

#endif /* _I915_FIXED_H_ */